summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 07:46:09 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 07:46:09 +0000
commit043aa641ad4373e96fd748deb1e7fab3cb579a07 (patch)
treef8fde8a97ab5db152043f6c01043672114c0a4df
parentReleasing progress-linux version 2.1.6-5~progress7.99u1. (diff)
downloadpacemaker-043aa641ad4373e96fd748deb1e7fab3cb579a07.tar.xz
pacemaker-043aa641ad4373e96fd748deb1e7fab3cb579a07.zip
Merging upstream version 2.1.7.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
-rw-r--r--ChangeLog433
-rw-r--r--GNUmakefile8
-rw-r--r--INSTALL.md8
-rw-r--r--Makefile.am71
-rw-r--r--agents/Makefile.am4
-rw-r--r--agents/alerts/Makefile.am4
-rwxr-xr-xagents/ocf/HealthCPU.in23
-rwxr-xr-xagents/ocf/HealthSMART.in16
-rw-r--r--agents/ocf/Makefile.am11
-rwxr-xr-xagents/ocf/ifspeed.in20
-rwxr-xr-xagents/ocf/ping.in13
-rw-r--r--agents/stonith/Makefile.am3
-rw-r--r--configure.ac793
-rw-r--r--cts/Makefile.am61
-rw-r--r--cts/README.md110
-rw-r--r--cts/benchmark/Makefile.am5
-rw-r--r--cts/benchmark/clubench.in2
-rw-r--r--cts/cli/crm_verify_invalid_bz.xml72
-rw-r--r--cts/cli/crm_verify_invalid_no_stonith.xml12
-rw-r--r--cts/cli/regression.daemons.exp10
-rw-r--r--cts/cli/regression.error_codes.exp12
-rw-r--r--cts/cli/regression.rules.exp12
-rw-r--r--cts/cli/regression.tools.exp348
-rwxr-xr-xcts/cluster_test.in (renamed from cts/lab/cluster_test.in)2
-rw-r--r--cts/cts-attrd.in17
-rwxr-xr-xcts/cts-cli.in56
-rw-r--r--cts/cts-lab.in136
-rw-r--r--cts/cts-log-watcher.in (renamed from cts/lab/cts-log-watcher.in)0
-rw-r--r--cts/cts-scheduler.in88
-rwxr-xr-xcts/cts.in404
-rw-r--r--cts/lab/CIB.py518
-rw-r--r--cts/lab/CM_corosync.py60
-rwxr-xr-xcts/lab/CTSaudits.py879
-rw-r--r--cts/lab/CTSlab.py.in135
-rw-r--r--cts/lab/CTSscenarios.py563
-rw-r--r--cts/lab/CTStests.py3178
-rw-r--r--cts/lab/ClusterManager.py940
-rw-r--r--cts/lab/Makefile.am31
-rw-r--r--cts/lab/OCFIPraTest.py.in173
-rw-r--r--cts/lab/__init__.py15
-rw-r--r--cts/lab/cib_xml.py319
-rwxr-xr-xcts/lab/cts.in262
-rw-r--r--cts/lxc_autogen.sh.in545
-rw-r--r--cts/scheduler/Makefile.am8
-rw-r--r--cts/scheduler/dot/bug-lf-2422.dot3
-rw-r--r--cts/scheduler/dot/bundle-interleave-start.dot46
-rw-r--r--cts/scheduler/dot/bundle-nested-colocation.dot1
-rw-r--r--cts/scheduler/dot/bundle-order-startup-clone-2.dot2
-rw-r--r--cts/scheduler/dot/bundle-probe-remotes.dot10
-rw-r--r--cts/scheduler/dot/bundle-promoted-anticolocation-1.dot7
-rw-r--r--cts/scheduler/dot/bundle-promoted-anticolocation-2.dot7
-rw-r--r--cts/scheduler/dot/bundle-promoted-anticolocation-3.dot32
-rw-r--r--cts/scheduler/dot/bundle-promoted-anticolocation-4.dot32
-rw-r--r--cts/scheduler/dot/bundle-promoted-anticolocation-5.dot32
-rw-r--r--cts/scheduler/dot/bundle-promoted-anticolocation-6.dot32
-rw-r--r--cts/scheduler/dot/bundle-promoted-colocation-1.dot7
-rw-r--r--cts/scheduler/dot/bundle-promoted-colocation-2.dot7
-rw-r--r--cts/scheduler/dot/bundle-promoted-colocation-3.dot32
-rw-r--r--cts/scheduler/dot/bundle-promoted-colocation-4.dot32
-rw-r--r--cts/scheduler/dot/bundle-promoted-colocation-5.dot32
-rw-r--r--cts/scheduler/dot/bundle-promoted-colocation-6.dot32
-rw-r--r--cts/scheduler/dot/bundle-promoted-location-1.dot2
-rw-r--r--cts/scheduler/dot/bundle-promoted-location-2.dot75
-rw-r--r--cts/scheduler/dot/bundle-promoted-location-3.dot2
-rw-r--r--cts/scheduler/dot/bundle-promoted-location-4.dot2
-rw-r--r--cts/scheduler/dot/bundle-promoted-location-5.dot2
-rw-r--r--cts/scheduler/dot/bundle-promoted-location-6.dot37
-rw-r--r--cts/scheduler/dot/bundle-replicas-change.dot1
-rw-r--r--cts/scheduler/dot/cancel-behind-moving-remote.dot121
-rw-r--r--cts/scheduler/dot/clone-order-16instances.dot93
-rw-r--r--cts/scheduler/dot/clone-recover-no-shuffle-1.dot10
-rw-r--r--cts/scheduler/dot/clone-recover-no-shuffle-10.dot10
-rw-r--r--cts/scheduler/dot/clone-recover-no-shuffle-11.dot21
-rw-r--r--cts/scheduler/dot/clone-recover-no-shuffle-12.dot35
-rw-r--r--cts/scheduler/dot/clone-recover-no-shuffle-2.dot21
-rw-r--r--cts/scheduler/dot/clone-recover-no-shuffle-3.dot32
-rw-r--r--cts/scheduler/dot/clone-recover-no-shuffle-4.dot10
-rw-r--r--cts/scheduler/dot/clone-recover-no-shuffle-5.dot21
-rw-r--r--cts/scheduler/dot/clone-recover-no-shuffle-6.dot32
-rw-r--r--cts/scheduler/dot/clone-recover-no-shuffle-7.dot30
-rw-r--r--cts/scheduler/dot/clone-recover-no-shuffle-8.dot63
-rw-r--r--cts/scheduler/dot/clone-recover-no-shuffle-9.dot69
-rw-r--r--cts/scheduler/dot/coloc-with-inner-group-member.dot40
-rw-r--r--cts/scheduler/dot/group-anticolocation-2.dot29
-rw-r--r--cts/scheduler/dot/group-anticolocation-3.dot8
-rw-r--r--cts/scheduler/dot/group-anticolocation-4.dot29
-rw-r--r--cts/scheduler/dot/group-anticolocation-5.dot29
-rw-r--r--cts/scheduler/dot/group-anticolocation.dot27
-rw-r--r--cts/scheduler/dot/guest-host-not-fenceable.dot4
-rw-r--r--cts/scheduler/dot/inc4.dot2
-rw-r--r--cts/scheduler/dot/node-pending-timeout.dot7
-rw-r--r--cts/scheduler/dot/order-clone.dot3
-rw-r--r--cts/scheduler/dot/pending-node-no-uname.dot7
-rw-r--r--cts/scheduler/dot/promoted-ordering.dot28
-rw-r--r--cts/scheduler/dot/promoted-probed-score.dot292
-rw-r--r--cts/scheduler/dot/timeout-by-node.dot40
-rw-r--r--cts/scheduler/dot/unfence-definition.dot4
-rw-r--r--cts/scheduler/dot/unfence-parameters.dot4
-rw-r--r--cts/scheduler/dot/utilization-complex.dot1
-rw-r--r--cts/scheduler/exp/bug-1822.exp4
-rw-r--r--cts/scheduler/exp/bug-lf-2422.exp9
-rw-r--r--cts/scheduler/exp/bundle-interleave-start.exp608
-rw-r--r--cts/scheduler/exp/bundle-nested-colocation.exp3
-rw-r--r--cts/scheduler/exp/bundle-order-fencing.exp242
-rw-r--r--cts/scheduler/exp/bundle-order-startup-clone-2.exp6
-rw-r--r--cts/scheduler/exp/bundle-order-stop-on-remote.exp134
-rw-r--r--cts/scheduler/exp/bundle-probe-remotes.exp30
-rw-r--r--cts/scheduler/exp/bundle-promoted-anticolocation-1.exp37
-rw-r--r--cts/scheduler/exp/bundle-promoted-anticolocation-2.exp37
-rw-r--r--cts/scheduler/exp/bundle-promoted-anticolocation-3.exp179
-rw-r--r--cts/scheduler/exp/bundle-promoted-anticolocation-4.exp179
-rw-r--r--cts/scheduler/exp/bundle-promoted-anticolocation-5.exp179
-rw-r--r--cts/scheduler/exp/bundle-promoted-anticolocation-6.exp179
-rw-r--r--cts/scheduler/exp/bundle-promoted-colocation-1.exp37
-rw-r--r--cts/scheduler/exp/bundle-promoted-colocation-2.exp37
-rw-r--r--cts/scheduler/exp/bundle-promoted-colocation-3.exp179
-rw-r--r--cts/scheduler/exp/bundle-promoted-colocation-4.exp179
-rw-r--r--cts/scheduler/exp/bundle-promoted-colocation-5.exp179
-rw-r--r--cts/scheduler/exp/bundle-promoted-colocation-6.exp179
-rw-r--r--cts/scheduler/exp/bundle-promoted-location-1.exp1
-rw-r--r--cts/scheduler/exp/bundle-promoted-location-2.exp328
-rw-r--r--cts/scheduler/exp/bundle-promoted-location-3.exp1
-rw-r--r--cts/scheduler/exp/bundle-promoted-location-4.exp1
-rw-r--r--cts/scheduler/exp/bundle-promoted-location-5.exp1
-rw-r--r--cts/scheduler/exp/bundle-promoted-location-6.exp136
-rw-r--r--cts/scheduler/exp/bundle-replicas-change.exp3
-rw-r--r--cts/scheduler/exp/cancel-behind-moving-remote.exp760
-rw-r--r--cts/scheduler/exp/clone-anon-failcount.exp2
-rw-r--r--cts/scheduler/exp/clone-order-16instances.exp234
-rw-r--r--cts/scheduler/exp/clone-recover-no-shuffle-1.exp51
-rw-r--r--cts/scheduler/exp/clone-recover-no-shuffle-10.exp51
-rw-r--r--cts/scheduler/exp/clone-recover-no-shuffle-11.exp110
-rw-r--r--cts/scheduler/exp/clone-recover-no-shuffle-12.exp187
-rw-r--r--cts/scheduler/exp/clone-recover-no-shuffle-2.exp110
-rw-r--r--cts/scheduler/exp/clone-recover-no-shuffle-3.exp171
-rw-r--r--cts/scheduler/exp/clone-recover-no-shuffle-4.exp51
-rw-r--r--cts/scheduler/exp/clone-recover-no-shuffle-5.exp110
-rw-r--r--cts/scheduler/exp/clone-recover-no-shuffle-6.exp171
-rw-r--r--cts/scheduler/exp/clone-recover-no-shuffle-7.exp162
-rw-r--r--cts/scheduler/exp/clone-recover-no-shuffle-8.exp338
-rw-r--r--cts/scheduler/exp/clone-recover-no-shuffle-9.exp364
-rw-r--r--cts/scheduler/exp/coloc-with-inner-group-member.exp202
-rw-r--r--cts/scheduler/exp/group-anticolocation-2.exp148
-rw-r--r--cts/scheduler/exp/group-anticolocation-3.exp38
-rw-r--r--cts/scheduler/exp/group-anticolocation-4.exp148
-rw-r--r--cts/scheduler/exp/group-anticolocation-5.exp148
-rw-r--r--cts/scheduler/exp/group-anticolocation.exp204
-rw-r--r--cts/scheduler/exp/inc4.exp6
-rw-r--r--cts/scheduler/exp/no-promote-on-unrunnable-guest.exp110
-rw-r--r--cts/scheduler/exp/node-pending-timeout.exp38
-rw-r--r--cts/scheduler/exp/pending-node-no-uname.exp11
-rw-r--r--cts/scheduler/exp/promoted-failed-demote-2.exp2
-rw-r--r--cts/scheduler/exp/promoted-failed-demote.exp2
-rw-r--r--cts/scheduler/exp/promoted-ordering.exp40
-rw-r--r--cts/scheduler/exp/promoted-probed-score.exp336
-rw-r--r--cts/scheduler/exp/shutdown-lock-expiration.exp2
-rw-r--r--cts/scheduler/exp/timeout-by-node.exp228
-rw-r--r--cts/scheduler/exp/unfence-definition.exp12
-rw-r--r--cts/scheduler/exp/unfence-parameters.exp12
-rw-r--r--cts/scheduler/scores/594.scores3
-rw-r--r--cts/scheduler/scores/a-promote-then-b-migrate.scores2
-rw-r--r--cts/scheduler/scores/asymmetric.scores1
-rw-r--r--cts/scheduler/scores/bug-1822.scores2
-rw-r--r--cts/scheduler/scores/bug-5014-CLONE-A-stop-B-started.scores1
-rw-r--r--cts/scheduler/scores/bug-5143-ms-shuffle.scores12
-rw-r--r--cts/scheduler/scores/bug-5186-partial-migrate.scores6
-rw-r--r--cts/scheduler/scores/bug-cl-5168.scores2
-rw-r--r--cts/scheduler/scores/bug-lf-2106.scores36
-rw-r--r--cts/scheduler/scores/bug-lf-2153.scores6
-rw-r--r--cts/scheduler/scores/bug-lf-2171.scores4
-rw-r--r--cts/scheduler/scores/bug-lf-2422.scores16
-rw-r--r--cts/scheduler/scores/bug-lf-2453.scores4
-rw-r--r--cts/scheduler/scores/bug-lf-2551.scores42
-rw-r--r--cts/scheduler/scores/bug-lf-2574.scores7
-rw-r--r--cts/scheduler/scores/bug-lf-2581.scores4
-rw-r--r--cts/scheduler/scores/bug-lf-2619.scores2
-rw-r--r--cts/scheduler/scores/bug-n-387749.scores9
-rw-r--r--cts/scheduler/scores/bug-suse-707150.scores52
-rw-r--r--cts/scheduler/scores/bundle-connection-with-container.scores144
-rw-r--r--cts/scheduler/scores/bundle-interleave-promote.scores186
-rw-r--r--cts/scheduler/scores/bundle-interleave-start.scores196
-rw-r--r--cts/scheduler/scores/bundle-nested-colocation.scores228
-rw-r--r--cts/scheduler/scores/bundle-order-fencing.scores246
-rw-r--r--cts/scheduler/scores/bundle-order-partial-start-2.scores52
-rw-r--r--cts/scheduler/scores/bundle-order-partial-start.scores52
-rw-r--r--cts/scheduler/scores/bundle-order-partial-stop.scores50
-rw-r--r--cts/scheduler/scores/bundle-order-startup-clone-2.scores184
-rw-r--r--cts/scheduler/scores/bundle-order-startup-clone.scores88
-rw-r--r--cts/scheduler/scores/bundle-order-startup.scores50
-rw-r--r--cts/scheduler/scores/bundle-order-stop-clone.scores188
-rw-r--r--cts/scheduler/scores/bundle-order-stop-on-remote.scores612
-rw-r--r--cts/scheduler/scores/bundle-order-stop.scores50
-rw-r--r--cts/scheduler/scores/bundle-probe-order-1.scores60
-rw-r--r--cts/scheduler/scores/bundle-probe-order-2.scores60
-rw-r--r--cts/scheduler/scores/bundle-probe-order-3.scores60
-rw-r--r--cts/scheduler/scores/bundle-probe-remotes.scores192
-rw-r--r--cts/scheduler/scores/bundle-promoted-anticolocation-1.scores70
-rw-r--r--cts/scheduler/scores/bundle-promoted-anticolocation-2.scores70
-rw-r--r--cts/scheduler/scores/bundle-promoted-anticolocation-3.scores70
-rw-r--r--cts/scheduler/scores/bundle-promoted-anticolocation-4.scores70
-rw-r--r--cts/scheduler/scores/bundle-promoted-anticolocation-5.scores160
-rw-r--r--cts/scheduler/scores/bundle-promoted-anticolocation-6.scores160
-rw-r--r--cts/scheduler/scores/bundle-promoted-colocation-1.scores70
-rw-r--r--cts/scheduler/scores/bundle-promoted-colocation-2.scores70
-rw-r--r--cts/scheduler/scores/bundle-promoted-colocation-3.scores70
-rw-r--r--cts/scheduler/scores/bundle-promoted-colocation-4.scores70
-rw-r--r--cts/scheduler/scores/bundle-promoted-colocation-5.scores160
-rw-r--r--cts/scheduler/scores/bundle-promoted-colocation-6.scores160
-rw-r--r--cts/scheduler/scores/bundle-promoted-location-1.scores70
-rw-r--r--cts/scheduler/scores/bundle-promoted-location-2.scores67
-rw-r--r--cts/scheduler/scores/bundle-promoted-location-3.scores67
-rw-r--r--cts/scheduler/scores/bundle-promoted-location-4.scores67
-rw-r--r--cts/scheduler/scores/bundle-promoted-location-5.scores67
-rw-r--r--cts/scheduler/scores/bundle-promoted-location-6.scores67
-rw-r--r--cts/scheduler/scores/bundle-replicas-change.scores34
-rw-r--r--cts/scheduler/scores/cancel-behind-moving-remote.scores1005
-rw-r--r--cts/scheduler/scores/clone-anon-failcount.scores96
-rw-r--r--cts/scheduler/scores/clone-fail-block-colocation.scores2
-rw-r--r--cts/scheduler/scores/clone-max-zero.scores8
-rw-r--r--cts/scheduler/scores/clone-recover-no-shuffle-1.scores25
-rw-r--r--cts/scheduler/scores/clone-recover-no-shuffle-10.scores31
-rw-r--r--cts/scheduler/scores/clone-recover-no-shuffle-11.scores82
-rw-r--r--cts/scheduler/scores/clone-recover-no-shuffle-12.scores67
-rw-r--r--cts/scheduler/scores/clone-recover-no-shuffle-2.scores79
-rw-r--r--cts/scheduler/scores/clone-recover-no-shuffle-3.scores64
-rw-r--r--cts/scheduler/scores/clone-recover-no-shuffle-4.scores31
-rw-r--r--cts/scheduler/scores/clone-recover-no-shuffle-5.scores109
-rw-r--r--cts/scheduler/scores/clone-recover-no-shuffle-6.scores70
-rw-r--r--cts/scheduler/scores/clone-recover-no-shuffle-7.scores34
-rw-r--r--cts/scheduler/scores/clone-recover-no-shuffle-8.scores82
-rw-r--r--cts/scheduler/scores/clone-recover-no-shuffle-9.scores67
-rw-r--r--cts/scheduler/scores/cloned-group-stop.scores4
-rw-r--r--cts/scheduler/scores/coloc-clone-stays-active.scores22
-rw-r--r--cts/scheduler/scores/coloc-with-inner-group-member.scores46
-rw-r--r--cts/scheduler/scores/colocate-primitive-with-clone.scores48
-rw-r--r--cts/scheduler/scores/colocation-influence.scores264
-rw-r--r--cts/scheduler/scores/complex_enforce_colo.scores9
-rw-r--r--cts/scheduler/scores/enforce-colo1.scores9
-rw-r--r--cts/scheduler/scores/group-anticolocation-2.scores23
-rw-r--r--cts/scheduler/scores/group-anticolocation-3.scores23
-rw-r--r--cts/scheduler/scores/group-anticolocation-4.scores23
-rw-r--r--cts/scheduler/scores/group-anticolocation-5.scores34
-rw-r--r--cts/scheduler/scores/group-anticolocation.scores6
-rw-r--r--cts/scheduler/scores/group-dependents.scores10
-rw-r--r--cts/scheduler/scores/guest-host-not-fenceable.scores122
-rw-r--r--cts/scheduler/scores/load-stopped-loop-2.scores4
-rw-r--r--cts/scheduler/scores/load-stopped-loop.scores30
-rw-r--r--cts/scheduler/scores/migrate-begin.scores2
-rw-r--r--cts/scheduler/scores/migrate-fail-2.scores2
-rw-r--r--cts/scheduler/scores/migrate-fail-3.scores4
-rw-r--r--cts/scheduler/scores/migrate-fail-4.scores2
-rw-r--r--cts/scheduler/scores/migrate-fail-5.scores2
-rw-r--r--cts/scheduler/scores/migrate-fail-6.scores2
-rw-r--r--cts/scheduler/scores/migrate-fail-7.scores4
-rw-r--r--cts/scheduler/scores/migrate-fail-8.scores2
-rw-r--r--cts/scheduler/scores/migrate-fail-9.scores2
-rw-r--r--cts/scheduler/scores/migrate-partial-1.scores4
-rw-r--r--cts/scheduler/scores/migrate-partial-2.scores2
-rw-r--r--cts/scheduler/scores/migrate-partial-3.scores3
-rw-r--r--cts/scheduler/scores/migrate-start-complex.scores18
-rw-r--r--cts/scheduler/scores/migrate-start.scores12
-rw-r--r--cts/scheduler/scores/migrate-stop-start-complex.scores4
-rw-r--r--cts/scheduler/scores/migrate-success.scores4
-rw-r--r--cts/scheduler/scores/nested-remote-recovery.scores648
-rw-r--r--cts/scheduler/scores/no-promote-on-unrunnable-guest.scores254
-rw-r--r--cts/scheduler/scores/node-pending-timeout.scores3
-rw-r--r--cts/scheduler/scores/notifs-for-unrunnable.scores230
-rw-r--r--cts/scheduler/scores/notify-behind-stopping-remote.scores60
-rw-r--r--cts/scheduler/scores/novell-239087.scores4
-rw-r--r--cts/scheduler/scores/on_fail_demote1.scores166
-rw-r--r--cts/scheduler/scores/on_fail_demote4.scores166
-rw-r--r--cts/scheduler/scores/order-expired-failure.scores376
-rw-r--r--cts/scheduler/scores/params-6.scores23
-rw-r--r--cts/scheduler/scores/pending-node-no-uname.scores3
-rw-r--r--cts/scheduler/scores/probe-2.scores2
-rw-r--r--cts/scheduler/scores/promoted-13.scores2
-rw-r--r--cts/scheduler/scores/promoted-asymmetrical-order.scores4
-rw-r--r--cts/scheduler/scores/promoted-demote.scores12
-rw-r--r--cts/scheduler/scores/promoted-failed-demote-2.scores6
-rw-r--r--cts/scheduler/scores/promoted-failed-demote.scores6
-rw-r--r--cts/scheduler/scores/promoted-move.scores2
-rw-r--r--cts/scheduler/scores/promoted-ordering.scores10
-rw-r--r--cts/scheduler/scores/promoted-partially-demoted-group.scores10
-rw-r--r--cts/scheduler/scores/promoted-probed-score.scores92
-rw-r--r--cts/scheduler/scores/remote-connection-shutdown.scores974
-rw-r--r--cts/scheduler/scores/remote-fence-unclean-3.scores348
-rw-r--r--cts/scheduler/scores/route-remote-notify.scores122
-rw-r--r--cts/scheduler/scores/rsc-sets-clone-1.scores18
-rw-r--r--cts/scheduler/scores/start-then-stop-with-unfence.scores6
-rw-r--r--cts/scheduler/scores/stop-all-resources.scores56
-rw-r--r--cts/scheduler/scores/timeout-by-node.scores61
-rw-r--r--cts/scheduler/scores/unrunnable-2.scores2
-rw-r--r--cts/scheduler/scores/utilization-complex.scores176
-rw-r--r--cts/scheduler/scores/utilization-order2.scores2
-rw-r--r--cts/scheduler/scores/utilization-order4.scores6
-rw-r--r--cts/scheduler/scores/utilization-shuffle.scores48
-rw-r--r--cts/scheduler/scores/year-2038.scores376
-rw-r--r--cts/scheduler/summary/11-a-then-bm-b-move-a-clone-starting.summary2
-rw-r--r--cts/scheduler/summary/5-am-then-bm-a-not-migratable.summary2
-rw-r--r--cts/scheduler/summary/7-migrate-group-one-unmigratable.summary2
-rw-r--r--cts/scheduler/summary/bundle-interleave-start.summary70
-rw-r--r--cts/scheduler/summary/bundle-order-fencing.summary4
-rw-r--r--cts/scheduler/summary/bundle-order-stop-on-remote.summary6
-rw-r--r--cts/scheduler/summary/bundle-promoted-anticolocation-1.summary33
-rw-r--r--cts/scheduler/summary/bundle-promoted-anticolocation-2.summary33
-rw-r--r--cts/scheduler/summary/bundle-promoted-anticolocation-3.summary45
-rw-r--r--cts/scheduler/summary/bundle-promoted-anticolocation-4.summary45
-rw-r--r--cts/scheduler/summary/bundle-promoted-anticolocation-5.summary51
-rw-r--r--cts/scheduler/summary/bundle-promoted-anticolocation-6.summary51
-rw-r--r--cts/scheduler/summary/bundle-promoted-colocation-1.summary33
-rw-r--r--cts/scheduler/summary/bundle-promoted-colocation-2.summary33
-rw-r--r--cts/scheduler/summary/bundle-promoted-colocation-3.summary45
-rw-r--r--cts/scheduler/summary/bundle-promoted-colocation-4.summary45
-rw-r--r--cts/scheduler/summary/bundle-promoted-colocation-5.summary51
-rw-r--r--cts/scheduler/summary/bundle-promoted-colocation-6.summary51
-rw-r--r--cts/scheduler/summary/bundle-promoted-location-1.summary27
-rw-r--r--cts/scheduler/summary/bundle-promoted-location-2.summary54
-rw-r--r--cts/scheduler/summary/bundle-promoted-location-3.summary27
-rw-r--r--cts/scheduler/summary/bundle-promoted-location-4.summary27
-rw-r--r--cts/scheduler/summary/bundle-promoted-location-5.summary27
-rw-r--r--cts/scheduler/summary/bundle-promoted-location-6.summary40
-rw-r--r--cts/scheduler/summary/cancel-behind-moving-remote.summary78
-rw-r--r--cts/scheduler/summary/clone-recover-no-shuffle-1.summary29
-rw-r--r--cts/scheduler/summary/clone-recover-no-shuffle-10.summary29
-rw-r--r--cts/scheduler/summary/clone-recover-no-shuffle-11.summary34
-rw-r--r--cts/scheduler/summary/clone-recover-no-shuffle-12.summary43
-rw-r--r--cts/scheduler/summary/clone-recover-no-shuffle-2.summary32
-rw-r--r--cts/scheduler/summary/clone-recover-no-shuffle-3.summary42
-rw-r--r--cts/scheduler/summary/clone-recover-no-shuffle-4.summary29
-rw-r--r--cts/scheduler/summary/clone-recover-no-shuffle-5.summary32
-rw-r--r--cts/scheduler/summary/clone-recover-no-shuffle-6.summary42
-rw-r--r--cts/scheduler/summary/clone-recover-no-shuffle-7.summary38
-rw-r--r--cts/scheduler/summary/clone-recover-no-shuffle-8.summary52
-rw-r--r--cts/scheduler/summary/clone-recover-no-shuffle-9.summary56
-rw-r--r--cts/scheduler/summary/coloc-with-inner-group-member.summary45
-rw-r--r--cts/scheduler/summary/group-anticolocation-2.summary41
-rw-r--r--cts/scheduler/summary/group-anticolocation-3.summary33
-rw-r--r--cts/scheduler/summary/group-anticolocation-4.summary41
-rw-r--r--cts/scheduler/summary/group-anticolocation-5.summary41
-rw-r--r--cts/scheduler/summary/group-anticolocation.summary16
-rw-r--r--cts/scheduler/summary/migrate-fencing.summary2
-rw-r--r--cts/scheduler/summary/no-promote-on-unrunnable-guest.summary2
-rw-r--r--cts/scheduler/summary/node-pending-timeout.summary26
-rw-r--r--cts/scheduler/summary/pending-node-no-uname.summary23
-rw-r--r--cts/scheduler/summary/promoted-ordering.summary24
-rw-r--r--cts/scheduler/summary/promoted-probed-score.summary124
-rw-r--r--cts/scheduler/summary/timeout-by-node.summary43
-rw-r--r--cts/scheduler/summary/unfence-definition.summary2
-rw-r--r--cts/scheduler/summary/unfence-parameters.summary2
-rw-r--r--cts/scheduler/xml/anon-instance-pending.xml2
-rw-r--r--cts/scheduler/xml/bundle-interleave-start.xml3
-rw-r--r--cts/scheduler/xml/bundle-promoted-anticolocation-1.xml238
-rw-r--r--cts/scheduler/xml/bundle-promoted-anticolocation-2.xml238
-rw-r--r--cts/scheduler/xml/bundle-promoted-anticolocation-3.xml238
-rw-r--r--cts/scheduler/xml/bundle-promoted-anticolocation-4.xml238
-rw-r--r--cts/scheduler/xml/bundle-promoted-anticolocation-5.xml368
-rw-r--r--cts/scheduler/xml/bundle-promoted-anticolocation-6.xml368
-rw-r--r--cts/scheduler/xml/bundle-promoted-colocation-1.xml237
-rw-r--r--cts/scheduler/xml/bundle-promoted-colocation-2.xml237
-rw-r--r--cts/scheduler/xml/bundle-promoted-colocation-3.xml237
-rw-r--r--cts/scheduler/xml/bundle-promoted-colocation-4.xml237
-rw-r--r--cts/scheduler/xml/bundle-promoted-colocation-5.xml367
-rw-r--r--cts/scheduler/xml/bundle-promoted-colocation-6.xml367
-rw-r--r--cts/scheduler/xml/bundle-promoted-location-1.xml221
-rw-r--r--cts/scheduler/xml/bundle-promoted-location-2.xml218
-rw-r--r--cts/scheduler/xml/bundle-promoted-location-3.xml225
-rw-r--r--cts/scheduler/xml/bundle-promoted-location-4.xml225
-rw-r--r--cts/scheduler/xml/bundle-promoted-location-5.xml231
-rw-r--r--cts/scheduler/xml/bundle-promoted-location-6.xml224
-rw-r--r--cts/scheduler/xml/cancel-behind-moving-remote.xml14
-rw-r--r--cts/scheduler/xml/clone-recover-no-shuffle-1.xml113
-rw-r--r--cts/scheduler/xml/clone-recover-no-shuffle-10.xml120
-rw-r--r--cts/scheduler/xml/clone-recover-no-shuffle-11.xml153
-rw-r--r--cts/scheduler/xml/clone-recover-no-shuffle-12.xml186
-rw-r--r--cts/scheduler/xml/clone-recover-no-shuffle-2.xml141
-rw-r--r--cts/scheduler/xml/clone-recover-no-shuffle-3.xml180
-rw-r--r--cts/scheduler/xml/clone-recover-no-shuffle-4.xml115
-rw-r--r--cts/scheduler/xml/clone-recover-no-shuffle-5.xml143
-rw-r--r--cts/scheduler/xml/clone-recover-no-shuffle-6.xml182
-rw-r--r--cts/scheduler/xml/clone-recover-no-shuffle-7.xml120
-rw-r--r--cts/scheduler/xml/clone-recover-no-shuffle-8.xml153
-rw-r--r--cts/scheduler/xml/clone-recover-no-shuffle-9.xml186
-rw-r--r--cts/scheduler/xml/coloc-with-inner-group-member.xml258
-rw-r--r--cts/scheduler/xml/group-anticolocation-2.xml166
-rw-r--r--cts/scheduler/xml/group-anticolocation-3.xml165
-rw-r--r--cts/scheduler/xml/group-anticolocation-4.xml167
-rw-r--r--cts/scheduler/xml/group-anticolocation-5.xml188
-rw-r--r--cts/scheduler/xml/group-anticolocation.xml14
-rw-r--r--cts/scheduler/xml/node-pending-timeout.xml27
-rw-r--r--cts/scheduler/xml/pending-node-no-uname.xml26
-rw-r--r--cts/scheduler/xml/promoted-ordering.xml26
-rw-r--r--cts/scheduler/xml/promoted-probed-score.xml4
-rw-r--r--cts/scheduler/xml/timeout-by-node.xml139
-rw-r--r--cts/support/Makefile.am6
-rw-r--r--daemons/Makefile.am10
-rw-r--r--daemons/attrd/Makefile.am43
-rw-r--r--daemons/attrd/attrd_alerts.c21
-rw-r--r--daemons/attrd/attrd_attributes.c46
-rw-r--r--daemons/attrd/attrd_cib.c464
-rw-r--r--daemons/attrd/attrd_corosync.c21
-rw-r--r--daemons/attrd/attrd_elections.c14
-rw-r--r--daemons/attrd/attrd_ipc.c25
-rw-r--r--daemons/attrd/attrd_messages.c34
-rw-r--r--daemons/attrd/attrd_sync.c4
-rw-r--r--daemons/attrd/attrd_utils.c59
-rw-r--r--daemons/attrd/pacemaker-attrd.c140
-rw-r--r--daemons/attrd/pacemaker-attrd.h17
-rw-r--r--daemons/based/Makefile.am42
-rw-r--r--daemons/based/based_callbacks.c854
-rw-r--r--daemons/based/based_common.c352
-rw-r--r--daemons/based/based_io.c22
-rw-r--r--daemons/based/based_messages.c125
-rw-r--r--daemons/based/based_notify.c99
-rw-r--r--daemons/based/based_operation.c59
-rw-r--r--daemons/based/based_remote.c29
-rw-r--r--daemons/based/based_transaction.c167
-rw-r--r--daemons/based/based_transaction.h24
-rw-r--r--daemons/based/pacemaker-based.c17
-rw-r--r--daemons/based/pacemaker-based.h45
-rw-r--r--daemons/controld/Makefile.am36
-rw-r--r--daemons/controld/controld_callbacks.c20
-rw-r--r--daemons/controld/controld_cib.c298
-rw-r--r--daemons/controld/controld_cib.h12
-rw-r--r--daemons/controld/controld_control.c37
-rw-r--r--daemons/controld/controld_corosync.c8
-rw-r--r--daemons/controld/controld_election.c7
-rw-r--r--daemons/controld/controld_execd.c92
-rw-r--r--daemons/controld/controld_execd_state.c15
-rw-r--r--daemons/controld/controld_fencing.c87
-rw-r--r--daemons/controld/controld_fencing.h2
-rw-r--r--daemons/controld/controld_fsa.c7
-rw-r--r--daemons/controld/controld_globals.h6
-rw-r--r--daemons/controld/controld_join_client.c36
-rw-r--r--daemons/controld/controld_join_dc.c133
-rw-r--r--daemons/controld/controld_lrm.h5
-rw-r--r--daemons/controld/controld_membership.c40
-rw-r--r--daemons/controld/controld_messages.c197
-rw-r--r--daemons/controld/controld_metadata.c6
-rw-r--r--daemons/controld/controld_remote_ra.c99
-rw-r--r--daemons/controld/controld_schedulerd.c23
-rw-r--r--daemons/controld/controld_te_actions.c25
-rw-r--r--daemons/controld/controld_te_callbacks.c6
-rw-r--r--daemons/controld/controld_te_events.c12
-rw-r--r--daemons/controld/controld_te_utils.c175
-rw-r--r--daemons/controld/controld_throttle.c6
-rw-r--r--daemons/controld/controld_transition.c13
-rw-r--r--daemons/controld/controld_transition.h2
-rw-r--r--daemons/controld/controld_utils.c2
-rw-r--r--daemons/controld/pacemaker-controld.c2
-rw-r--r--daemons/controld/pacemaker-controld.h3
-rw-r--r--daemons/execd/Makefile.am43
-rw-r--r--daemons/execd/cts-exec-helper.c34
-rw-r--r--daemons/execd/execd_commands.c53
-rw-r--r--daemons/execd/pacemaker-execd.c10
-rw-r--r--daemons/execd/remoted_pidone.c18
-rw-r--r--daemons/execd/remoted_tls.c42
-rw-r--r--daemons/fenced/Makefile.am33
-rw-r--r--daemons/fenced/cts-fence-helper.c43
-rw-r--r--daemons/fenced/fenced_cib.c734
-rw-r--r--daemons/fenced/fenced_commands.c128
-rw-r--r--daemons/fenced/fenced_remote.c61
-rw-r--r--daemons/fenced/fenced_scheduler.c225
-rw-r--r--daemons/fenced/pacemaker-fenced.c929
-rw-r--r--daemons/fenced/pacemaker-fenced.h31
-rw-r--r--daemons/pacemakerd/Makefile.am8
-rw-r--r--daemons/pacemakerd/pacemakerd.c26
-rw-r--r--daemons/pacemakerd/pacemakerd.h6
-rw-r--r--daemons/pacemakerd/pcmkd_corosync.c16
-rw-r--r--daemons/pacemakerd/pcmkd_messages.c2
-rw-r--r--daemons/pacemakerd/pcmkd_subdaemons.c27
-rw-r--r--daemons/schedulerd/Makefile.am22
-rw-r--r--daemons/schedulerd/pacemaker-schedulerd.h4
-rw-r--r--daemons/schedulerd/schedulerd_messages.c34
-rw-r--r--devel/Makefile.am56
-rw-r--r--doc/Makefile.am25
-rwxr-xr-xdoc/abi-check.in10
-rw-r--r--doc/sphinx/Clusters_from_Scratch/apache.rst2
-rw-r--r--doc/sphinx/Clusters_from_Scratch/cluster-setup.rst8
-rw-r--r--doc/sphinx/Makefile.am44
-rw-r--r--doc/sphinx/Pacemaker_Administration/administrative.rst150
-rw-r--r--doc/sphinx/Pacemaker_Administration/alerts.rst6
-rw-r--r--doc/sphinx/Pacemaker_Administration/configuring.rst109
-rw-r--r--doc/sphinx/Pacemaker_Administration/index.rst2
-rw-r--r--doc/sphinx/Pacemaker_Administration/moving.rst (renamed from doc/sphinx/Pacemaker_Explained/advanced-options.rst)309
-rw-r--r--doc/sphinx/Pacemaker_Administration/pcs-crmsh.rst14
-rw-r--r--doc/sphinx/Pacemaker_Development/c.rst14
-rw-r--r--doc/sphinx/Pacemaker_Development/components.rst52
-rw-r--r--doc/sphinx/Pacemaker_Development/helpers.rst5
-rw-r--r--doc/sphinx/Pacemaker_Explained/acls.rst18
-rw-r--r--doc/sphinx/Pacemaker_Explained/cluster-options.rst921
-rw-r--r--doc/sphinx/Pacemaker_Explained/collective.rst (renamed from doc/sphinx/Pacemaker_Explained/advanced-resources.rst)22
-rw-r--r--doc/sphinx/Pacemaker_Explained/constraints.rst65
-rw-r--r--doc/sphinx/Pacemaker_Explained/index.rst7
-rw-r--r--doc/sphinx/Pacemaker_Explained/local-options.rst515
-rw-r--r--doc/sphinx/Pacemaker_Explained/nodes.rst48
-rw-r--r--doc/sphinx/Pacemaker_Explained/operations.rst623
-rw-r--r--doc/sphinx/Pacemaker_Explained/options.rst622
-rw-r--r--doc/sphinx/Pacemaker_Explained/resources.rst424
-rw-r--r--doc/sphinx/Pacemaker_Explained/reusing-configuration.rst5
-rw-r--r--doc/sphinx/Pacemaker_Explained/status.rst72
-rw-r--r--doc/sphinx/Pacemaker_Explained/utilization.rst38
-rw-r--r--doc/sphinx/Pacemaker_Remote/alternatives.rst9
-rw-r--r--doc/sphinx/Pacemaker_Remote/baremetal-tutorial.rst2
-rw-r--r--doc/sphinx/Pacemaker_Remote/kvm-tutorial.rst2
-rw-r--r--doc/sphinx/conf.py.in10
-rw-r--r--etc/Makefile.am7
-rw-r--r--etc/sysconfig/pacemaker.in68
-rw-r--r--include/Makefile.am22
-rw-r--r--include/crm/Makefile.am23
-rw-r--r--include/crm/cib/cib_types.h139
-rw-r--r--include/crm/cib/internal.h124
-rw-r--r--include/crm/cluster.h8
-rw-r--r--include/crm/cluster/Makefile.am5
-rw-r--r--include/crm/cluster/compat.h8
-rw-r--r--include/crm/cluster/internal.h8
-rw-r--r--include/crm/common/Makefile.am73
-rw-r--r--include/crm/common/action_relation_internal.h132
-rw-r--r--include/crm/common/actions.h467
-rw-r--r--include/crm/common/actions_internal.h57
-rw-r--r--include/crm/common/alerts_internal.h3
-rw-r--r--include/crm/common/cib_internal.h23
-rw-r--r--include/crm/common/clone_internal.h33
-rw-r--r--include/crm/common/digests_internal.h33
-rw-r--r--include/crm/common/failcounts_internal.h41
-rw-r--r--include/crm/common/group_internal.h27
-rw-r--r--include/crm/common/health_internal.h2
-rw-r--r--include/crm/common/internal.h23
-rw-r--r--include/crm/common/ipc.h6
-rw-r--r--include/crm/common/ipc_internal.h11
-rw-r--r--include/crm/common/logging.h8
-rw-r--r--include/crm/common/logging_compat.h4
-rw-r--r--include/crm/common/logging_internal.h102
-rw-r--r--include/crm/common/nodes.h144
-rw-r--r--include/crm/common/nvpair.h1
-rw-r--r--include/crm/common/options_internal.h42
-rw-r--r--include/crm/common/output_internal.h5
-rw-r--r--include/crm/common/remote_internal.h4
-rw-r--r--include/crm/common/resources.h502
-rw-r--r--include/crm/common/results.h4
-rw-r--r--include/crm/common/results_compat.h5
-rw-r--r--include/crm/common/results_internal.h3
-rw-r--r--include/crm/common/roles.h62
-rw-r--r--include/crm/common/roles_internal.h30
-rw-r--r--include/crm/common/scheduler.h238
-rw-r--r--include/crm/common/scheduler_internal.h67
-rw-r--r--include/crm/common/scheduler_types.h39
-rw-r--r--include/crm/common/tags.h35
-rw-r--r--include/crm/common/tickets.h39
-rw-r--r--include/crm/common/unittest_internal.h40
-rw-r--r--include/crm/common/util.h24
-rw-r--r--include/crm/common/util_compat.h9
-rw-r--r--include/crm/common/xml.h36
-rw-r--r--include/crm/common/xml_compat.h15
-rw-r--r--include/crm/common/xml_internal.h42
-rw-r--r--include/crm/compatibility.h24
-rw-r--r--include/crm/crm.h78
-rw-r--r--include/crm/crm_compat.h129
-rw-r--r--include/crm/lrmd.h69
-rw-r--r--include/crm/lrmd_events.h108
-rw-r--r--include/crm/lrmd_internal.h1
-rw-r--r--include/crm/msg_xml.h33
-rw-r--r--include/crm/msg_xml_compat.h53
-rw-r--r--include/crm/pengine/Makefile.am11
-rw-r--r--include/crm/pengine/common.h123
-rw-r--r--include/crm/pengine/common_compat.h35
-rw-r--r--include/crm/pengine/complex.h22
-rw-r--r--include/crm/pengine/internal.h615
-rw-r--r--include/crm/pengine/pe_types.h530
-rw-r--r--include/crm/pengine/pe_types_compat.h221
-rw-r--r--include/crm/pengine/remote_internal.h23
-rw-r--r--include/crm/pengine/status.h47
-rw-r--r--include/crm/services_compat.h7
-rw-r--r--include/crm_internal.h5
-rw-r--r--include/pacemaker-internal.h5
-rw-r--r--include/pacemaker.h53
-rw-r--r--include/pcmki/Makefile.am16
-rw-r--r--include/pcmki/pcmki_agents.h19
-rw-r--r--include/pcmki/pcmki_cluster_queries.h7
-rw-r--r--include/pcmki/pcmki_resource.h8
-rw-r--r--include/pcmki/pcmki_sched_allocate.h50
-rw-r--r--include/pcmki/pcmki_sched_utils.h33
-rw-r--r--include/pcmki/pcmki_scheduler.h31
-rw-r--r--include/pcmki/pcmki_simulate.h26
-rw-r--r--include/pcmki/pcmki_status.h8
-rw-r--r--include/pcmki/pcmki_transition.h2
-rw-r--r--include/portability.h41
-rw-r--r--lib/Makefile.am18
-rw-r--r--lib/cib/Makefile.am22
-rw-r--r--lib/cib/cib_attrs.c19
-rw-r--r--lib/cib/cib_client.c112
-rw-r--r--lib/cib/cib_file.c477
-rw-r--r--lib/cib/cib_native.c56
-rw-r--r--lib/cib/cib_ops.c228
-rw-r--r--lib/cib/cib_remote.c38
-rw-r--r--lib/cib/cib_utils.c511
-rw-r--r--lib/cluster/Makefile.am19
-rw-r--r--lib/cluster/cluster.c33
-rw-r--r--lib/cluster/cpg.c18
-rw-r--r--lib/cluster/crmcluster_private.h6
-rw-r--r--lib/cluster/membership.c85
-rw-r--r--lib/common/Makefile.am33
-rw-r--r--lib/common/acl.c41
-rw-r--r--lib/common/actions.c (renamed from lib/common/operations.c)28
-rw-r--r--lib/common/alerts.c87
-rw-r--r--lib/common/cib.c23
-rw-r--r--lib/common/crmcommon_private.h63
-rw-r--r--lib/common/digest.c4
-rw-r--r--lib/common/io.c8
-rw-r--r--lib/common/ipc_attrd.c37
-rw-r--r--lib/common/ipc_client.c461
-rw-r--r--lib/common/ipc_common.c2
-rw-r--r--lib/common/ipc_controld.c61
-rw-r--r--lib/common/ipc_pacemakerd.c4
-rw-r--r--lib/common/ipc_schedulerd.c4
-rw-r--r--lib/common/ipc_server.c48
-rw-r--r--lib/common/iso8601.c3
-rw-r--r--lib/common/logging.c151
-rw-r--r--lib/common/mainloop.c42
-rw-r--r--lib/common/mock.c26
-rw-r--r--lib/common/mock_private.h6
-rw-r--r--lib/common/nvpair.c92
-rw-r--r--lib/common/options.c19
-rw-r--r--lib/common/output_html.c4
-rw-r--r--lib/common/output_log.c130
-rw-r--r--lib/common/output_xml.c20
-rw-r--r--lib/common/patchset.c121
-rw-r--r--lib/common/patchset_display.c26
-rw-r--r--lib/common/remote.c39
-rw-r--r--lib/common/results.c133
-rw-r--r--lib/common/scheduler.c14
-rw-r--r--lib/common/schemas.c149
-rw-r--r--lib/common/strings.c16
-rw-r--r--lib/common/tests/Makefile.am4
-rw-r--r--lib/common/tests/acl/Makefile.am11
-rw-r--r--lib/common/tests/actions/Makefile.am (renamed from lib/common/tests/operations/Makefile.am)16
-rw-r--r--lib/common/tests/actions/copy_in_properties_test.c (renamed from lib/common/tests/operations/copy_in_properties_test.c)0
-rw-r--r--lib/common/tests/actions/expand_plus_plus_test.c (renamed from lib/common/tests/operations/expand_plus_plus_test.c)0
-rw-r--r--lib/common/tests/actions/fix_plus_plus_recursive_test.c (renamed from lib/common/tests/operations/fix_plus_plus_recursive_test.c)0
-rw-r--r--lib/common/tests/actions/parse_op_key_test.c (renamed from lib/common/tests/operations/parse_op_key_test.c)0
-rw-r--r--lib/common/tests/actions/pcmk_is_probe_test.c (renamed from lib/common/tests/operations/pcmk_is_probe_test.c)0
-rw-r--r--lib/common/tests/actions/pcmk_xe_is_probe_test.c (renamed from lib/common/tests/operations/pcmk_xe_is_probe_test.c)0
-rw-r--r--lib/common/tests/actions/pcmk_xe_mask_probe_failure_test.c (renamed from lib/common/tests/operations/pcmk_xe_mask_probe_failure_test.c)0
-rw-r--r--lib/common/tests/agents/Makefile.am12
-rw-r--r--lib/common/tests/agents/crm_parse_agent_spec_test.c18
-rw-r--r--lib/common/tests/cmdline/Makefile.am5
-rw-r--r--lib/common/tests/cmdline/pcmk__cmdline_preproc_test.c13
-rw-r--r--lib/common/tests/cmdline/pcmk__new_common_args_test.c62
-rw-r--r--lib/common/tests/flags/Makefile.am11
-rw-r--r--lib/common/tests/io/Makefile.am7
-rw-r--r--lib/common/tests/lists/Makefile.am9
-rw-r--r--lib/common/tests/nvpair/Makefile.am8
-rw-r--r--lib/common/tests/options/Makefile.am9
-rw-r--r--lib/common/tests/options/pcmk__set_env_option_test.c57
-rw-r--r--lib/common/tests/output/Makefile.am20
-rw-r--r--lib/common/tests/output/pcmk__output_new_test.c8
-rw-r--r--lib/common/tests/results/Makefile.am4
-rw-r--r--lib/common/tests/results/pcmk__results_test.c8
-rw-r--r--lib/common/tests/scores/Makefile.am9
-rw-r--r--lib/common/tests/scores/pcmk__add_scores_test.c4
-rw-r--r--lib/common/tests/strings/Makefile.am54
-rw-r--r--lib/common/tests/strings/pcmk__compress_test.c2
-rw-r--r--lib/common/tests/strings/pcmk__guint_from_hash_test.c4
-rw-r--r--lib/common/tests/strings/pcmk__scan_ll_test.c64
-rw-r--r--lib/common/tests/utils/Makefile.am22
-rw-r--r--lib/common/tests/utils/pcmk__fail_attr_name_test.c36
-rw-r--r--lib/common/tests/utils/pcmk__failcount_name_test.c35
-rw-r--r--lib/common/tests/utils/pcmk__lastfailure_name_test.c35
-rw-r--r--lib/common/tests/xml/Makefile.am6
-rw-r--r--lib/common/tests/xml/pcmk__xe_foreach_child_test.c13
-rw-r--r--lib/common/tests/xpath/Makefile.am4
-rw-r--r--lib/common/watchdog.c13
-rw-r--r--lib/common/xml.c527
-rw-r--r--lib/common/xml_attr.c84
-rw-r--r--lib/common/xml_display.c18
-rw-r--r--lib/common/xpath.c13
-rw-r--r--lib/fencing/Makefile.am12
-rw-r--r--lib/fencing/st_client.c50
-rw-r--r--lib/fencing/st_lha.c13
-rw-r--r--lib/fencing/st_rhcs.c15
-rw-r--r--lib/lrmd/Makefile.am17
-rw-r--r--lib/lrmd/lrmd_alerts.c4
-rw-r--r--lib/lrmd/lrmd_client.c67
-rw-r--r--lib/pacemaker/Makefile.am20
-rw-r--r--lib/pacemaker/libpacemaker_private.h712
-rw-r--r--lib/pacemaker/pcmk_acl.c142
-rw-r--r--lib/pacemaker/pcmk_agents.c243
-rw-r--r--lib/pacemaker/pcmk_cluster_queries.c23
-rw-r--r--lib/pacemaker/pcmk_fence.c59
-rw-r--r--lib/pacemaker/pcmk_graph_consumer.c52
-rw-r--r--lib/pacemaker/pcmk_graph_logging.c15
-rw-r--r--lib/pacemaker/pcmk_graph_producer.c420
-rw-r--r--lib/pacemaker/pcmk_injections.c60
-rw-r--r--lib/pacemaker/pcmk_output.c512
-rw-r--r--lib/pacemaker/pcmk_resource.c30
-rw-r--r--lib/pacemaker/pcmk_rule.c67
-rw-r--r--lib/pacemaker/pcmk_sched_actions.c860
-rw-r--r--lib/pacemaker/pcmk_sched_bundle.c1422
-rw-r--r--lib/pacemaker/pcmk_sched_clone.c684
-rw-r--r--lib/pacemaker/pcmk_sched_colocation.c1266
-rw-r--r--lib/pacemaker/pcmk_sched_constraints.c199
-rw-r--r--lib/pacemaker/pcmk_sched_fencing.c181
-rw-r--r--lib/pacemaker/pcmk_sched_group.c581
-rw-r--r--lib/pacemaker/pcmk_sched_instances.c738
-rw-r--r--lib/pacemaker/pcmk_sched_location.c216
-rw-r--r--lib/pacemaker/pcmk_sched_migration.c220
-rw-r--r--lib/pacemaker/pcmk_sched_nodes.c221
-rw-r--r--lib/pacemaker/pcmk_sched_ordering.c459
-rw-r--r--lib/pacemaker/pcmk_sched_primitive.c641
-rw-r--r--lib/pacemaker/pcmk_sched_probes.c350
-rw-r--r--lib/pacemaker/pcmk_sched_promotable.c443
-rw-r--r--lib/pacemaker/pcmk_sched_recurring.c240
-rw-r--r--lib/pacemaker/pcmk_sched_remote.c252
-rw-r--r--lib/pacemaker/pcmk_sched_resource.c517
-rw-r--r--lib/pacemaker/pcmk_sched_tickets.c142
-rw-r--r--lib/pacemaker/pcmk_sched_utilization.c102
-rw-r--r--lib/pacemaker/pcmk_scheduler.c421
-rw-r--r--lib/pacemaker/pcmk_simulate.c269
-rw-r--r--lib/pacemaker/pcmk_status.c73
-rw-r--r--lib/pengine/Makefile.am52
-rw-r--r--lib/pengine/bundle.c415
-rw-r--r--lib/pengine/clone.c428
-rw-r--r--lib/pengine/common.c339
-rw-r--r--lib/pengine/complex.c338
-rw-r--r--lib/pengine/failcounts.c247
-rw-r--r--lib/pengine/group.c102
-rw-r--r--lib/pengine/native.c335
-rw-r--r--lib/pengine/pe_actions.c1303
-rw-r--r--lib/pengine/pe_digest.c162
-rw-r--r--lib/pengine/pe_health.c16
-rw-r--r--lib/pengine/pe_notif.c226
-rw-r--r--lib/pengine/pe_output.c552
-rw-r--r--lib/pengine/pe_status_private.h83
-rw-r--r--lib/pengine/remote.c100
-rw-r--r--lib/pengine/rules.c47
-rw-r--r--lib/pengine/rules_alerts.c13
-rw-r--r--lib/pengine/status.c268
-rw-r--r--lib/pengine/tags.c37
-rw-r--r--lib/pengine/tests/Makefile.am15
-rw-r--r--lib/pengine/tests/native/Makefile.am4
-rw-r--r--lib/pengine/tests/native/native_find_rsc_test.c724
-rw-r--r--lib/pengine/tests/native/pe_base_name_eq_test.c31
-rw-r--r--lib/pengine/tests/status/Makefile.am12
-rw-r--r--lib/pengine/tests/status/pe_find_node_any_test.c6
-rw-r--r--lib/pengine/tests/status/pe_find_node_id_test.c6
-rw-r--r--lib/pengine/tests/status/pe_find_node_test.c6
-rw-r--r--lib/pengine/tests/status/pe_new_working_set_test.c10
-rw-r--r--lib/pengine/tests/status/set_working_set_defaults_test.c27
-rw-r--r--lib/pengine/tests/utils/Makefile.am5
-rw-r--r--lib/pengine/tests/utils/pe__cmp_node_name_test.c6
-rw-r--r--lib/pengine/tests/utils/pe__cmp_rsc_priority_test.c4
-rw-r--r--lib/pengine/unpack.c1794
-rw-r--r--lib/pengine/utils.c331
-rw-r--r--lib/pengine/variant.h91
-rw-r--r--lib/services/Makefile.am13
-rw-r--r--lib/services/dbus.c2
-rw-r--r--lib/services/services.c8
-rw-r--r--lib/services/services_linux.c22
-rw-r--r--lib/services/services_lsb.c5
-rw-r--r--lib/services/services_nagios.c4
-rw-r--r--lib/services/systemd.c26
-rw-r--r--lib/services/upstart.c21
-rw-r--r--m4/REQUIRE_PROG.m418
-rw-r--r--m4/version.m42
-rw-r--r--maint/Makefile.am44
-rw-r--r--maint/bumplibs.in57
-rw-r--r--mk/common.mk8
-rw-r--r--mk/release.mk21
-rw-r--r--mk/tap.mk13
-rw-r--r--po/zh_CN.po397
-rw-r--r--python/Makefile.am10
-rw-r--r--python/pacemaker/Makefile.am4
-rw-r--r--python/pacemaker/_cts/CTS.py31
-rw-r--r--python/pacemaker/_cts/Makefile.am14
-rw-r--r--python/pacemaker/_cts/audits.py1029
-rw-r--r--python/pacemaker/_cts/cib.py425
-rw-r--r--python/pacemaker/_cts/cibxml.py734
-rw-r--r--python/pacemaker/_cts/clustermanager.py916
-rw-r--r--python/pacemaker/_cts/cmcorosync.py80
-rw-r--r--python/pacemaker/_cts/environment.py35
-rw-r--r--python/pacemaker/_cts/input.py18
-rw-r--r--python/pacemaker/_cts/logging.py2
-rw-r--r--python/pacemaker/_cts/network.py59
-rw-r--r--python/pacemaker/_cts/patterns.py14
-rw-r--r--python/pacemaker/_cts/process.py2
-rw-r--r--python/pacemaker/_cts/remote.py8
-rw-r--r--python/pacemaker/_cts/scenarios.py422
-rw-r--r--python/pacemaker/_cts/test.py35
-rw-r--r--python/pacemaker/_cts/tests/Makefile.am14
-rw-r--r--python/pacemaker/_cts/tests/__init__.py87
-rw-r--r--python/pacemaker/_cts/tests/componentfail.py167
-rw-r--r--python/pacemaker/_cts/tests/ctstest.py252
-rw-r--r--python/pacemaker/_cts/tests/fliptest.py61
-rw-r--r--python/pacemaker/_cts/tests/maintenancemode.py238
-rw-r--r--python/pacemaker/_cts/tests/nearquorumpointtest.py125
-rw-r--r--python/pacemaker/_cts/tests/partialstart.py75
-rw-r--r--python/pacemaker/_cts/tests/reattach.py221
-rw-r--r--python/pacemaker/_cts/tests/remotebasic.py39
-rw-r--r--python/pacemaker/_cts/tests/remotedriver.py556
-rw-r--r--python/pacemaker/_cts/tests/remotemigrate.py63
-rw-r--r--python/pacemaker/_cts/tests/remoterscfailure.py73
-rw-r--r--python/pacemaker/_cts/tests/remotestonithd.py62
-rw-r--r--python/pacemaker/_cts/tests/resourcerecover.py175
-rw-r--r--python/pacemaker/_cts/tests/restartonebyone.py58
-rw-r--r--python/pacemaker/_cts/tests/restarttest.py49
-rw-r--r--python/pacemaker/_cts/tests/resynccib.py75
-rw-r--r--python/pacemaker/_cts/tests/simulstart.py42
-rw-r--r--python/pacemaker/_cts/tests/simulstartlite.py133
-rw-r--r--python/pacemaker/_cts/tests/simulstop.py42
-rw-r--r--python/pacemaker/_cts/tests/simulstoplite.py91
-rw-r--r--python/pacemaker/_cts/tests/splitbraintest.py215
-rw-r--r--python/pacemaker/_cts/tests/standbytest.py110
-rw-r--r--python/pacemaker/_cts/tests/startonebyone.py55
-rw-r--r--python/pacemaker/_cts/tests/starttest.py54
-rw-r--r--python/pacemaker/_cts/tests/stonithdtest.py145
-rw-r--r--python/pacemaker/_cts/tests/stoponebyone.py56
-rw-r--r--python/pacemaker/_cts/tests/stoptest.py99
-rw-r--r--python/pacemaker/_cts/timer.py63
-rw-r--r--python/pacemaker/_cts/watcher.py10
-rw-r--r--python/pacemaker/buildoptions.py.in3
-rw-r--r--python/pylintrc3
-rw-r--r--python/setup.py.in2
-rw-r--r--python/tests/Makefile.am3
-rw-r--r--python/tests/__init__.py0
-rw-r--r--python/tests/test_cts_network.py37
-rw-r--r--replace/Makefile.am28
-rw-r--r--replace/NoSuchFunctionName.c31
-rw-r--r--replace/alphasort.c55
-rw-r--r--replace/scandir.c233
-rw-r--r--replace/strchrnul.c15
-rw-r--r--replace/strerror.c37
-rw-r--r--replace/strndup.c38
-rw-r--r--replace/strnlen.c31
-rw-r--r--rpm/Makefile.am62
-rw-r--r--rpm/pacemaker.spec.in19
-rw-r--r--tests/Makefile.am9
-rw-r--r--tools/Makefile.am171
-rw-r--r--tools/attrd_updater.c16
-rw-r--r--tools/cibadmin.c12
-rwxr-xr-xtools/cluster-helper.in2
-rwxr-xr-xtools/cluster-init.in537
-rw-r--r--tools/crm_attribute.c4
-rw-r--r--tools/crm_diff.c43
-rw-r--r--tools/crm_mon.c6
-rw-r--r--tools/crm_mon.h2
-rw-r--r--tools/crm_mon_curses.c8
-rw-r--r--tools/crm_node.c751
-rw-r--r--tools/crm_resource.c339
-rw-r--r--tools/crm_resource.h61
-rw-r--r--tools/crm_resource_ban.c75
-rw-r--r--tools/crm_resource_print.c96
-rw-r--r--tools/crm_resource_runtime.c540
-rw-r--r--tools/crm_shadow.c49
-rw-r--r--tools/crm_simulate.c31
-rw-r--r--tools/crm_ticket.c74
-rw-r--r--tools/crm_verify.c53
-rw-r--r--tools/stonith_admin.c18
-rw-r--r--xml/Makefile.am85
-rw-r--r--xml/README.md1
-rw-r--r--xml/api/crm_node-2.32.rng53
-rw-r--r--xml/version-diff.sh.in60
860 files changed, 60392 insertions, 33942 deletions
diff --git a/ChangeLog b/ChangeLog
index a0a5419..e5ecf98 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,436 @@
+* Tue Dec 19 2023 Ken Gaillot <kgaillot@redhat.com> Pacemaker-2.1.7
+- 1388 commits with 358 files changed, 23771 insertions(+), 17219 deletions(-)
+
+- Features added since Pacemaker-2.1.6
+ + build: allow building with libxml2 2.12.0 and greater
+ + CIB: deprecate "ordering" attribute of "resource_set"
+ + CIB: new cluster option "node-pending-timeout" (defaulting to 0, meaning
+ no timeout, to preserve existing behavior) allows fencing of nodes that do
+ not join Pacemaker's controller group within this much time after joining
+ the cluster
+ + controller: PCMK_node_start_state now works with Pacemaker Remote nodes
+ + tools: crm_verify now supports --quiet option (currently same as default
+ behavior, but in the future, verbose behavior might become the default,
+ so script writers are recommended to explicitly add --quiet if they do not
+ want output)
+ + tools: crm_node supports standard --output-as/--output-to arguments
+ + tests: CTSlab.py was renamed to cts-lab
+
+- Fixes since Pacemaker-2.1.6
+ + logging: restore ability to enable XML trace logs by file and function
+ (regression introduced in 2.1.6)
+ + scheduler: avoid double free with disabled recurring actions
+ (regression introduced in 2.1.5)
+ + tools: consider dampening argument when setting values with attrd_updater
+ (regression introduced in 2.1.5)
+ + tools: wait for reply from crm_node -R (regression introduced in 2.0.5)
+ + agents: handle dampening parameter consistently and correctly
+ + CIB: be more strict about ignoring colocation elements without an ID
+ + controller: do not check whether watchdog fencing is enabled
+ if "stonith-watchdog-timeout" is not configured
+ + controller: don't try to execute agent action at shutdown
+ + controller: avoid race condition when updating node state during join
+ + controller: correctly determine state of a fenced node without a name
+ + controller: wait a second between fencer connection attempts
+ + libpacemaker: avoid shuffling clone instances unnecessarily
+ + libpacemaker: get bundle container's promotion score from correct node
+ + libpacemaker: role-based colocations now work with bundles
+ + libpacemaker: clone-node-max now works with cloned groups
+ + scheduler: compare anti-colocation dependent negative preferences against
+ stickiness
+ + scheduler: consider explicit colocations with group members
+ + scheduler: avoid fencing a pending node without a name
+ + scheduler: properly evaluate rules in action meta-attributes
+ + scheduler: properly sort rule-based blocks when overwriting values
+ + tools: crm_resource --wait will now wait if any actions are pending
+ (previously it would wait only if new actions were planned)
+ + tools: crm_verify --output-as=xml now includes detailed messages
+ + tools: avoid showing pending nodes as having "<3.15.1" feature set in
+ crm_mon
+ + tools: fix display of clone descriptions
+ + tools: crm_resource now reports an error rather than timing out when
+ trying to restart an unmanaged resource
+ + tools: crm_resource now properly detects which promoted role name to use
+ in ban and move constraints
+
+- Public API changes since Pacemaker-2.1.6 (all API/ABI backward-compatible)
+ + libcib: cib_t now supports transactions via new cib_api_operations_t
+ methods, new cib_transaction value in enum cib_call_options, and new
+ cib_t transaction and user members
+ + libcib: cib_t now supports setting the ACL user for methods via new
+ cib_api_operations_t set_user() method
+ + libcib: deprecate cib_api_operations_t methods inputfd(), noop(), quit(),
+ set_op_callback(), and signon_raw()
+ + libcib: deprecate cib_call_options values cib_mixed_update,
+ cib_scope_local, and cib_zero_copy
+ + libcib: deprecate cib_t op_callback member
+ + libcrmcluster: deprecate set_uuid()
+ + libcrmcluster: send_cluster_message()'s data argument is const
+ + libcrmcommon: add enum pcmk_rc_e values pcmk_rc_compression,
+ pcmk_rc_ns_resolution, and pcmk_rc_no_transaction
+ + libcrmcommon,libpe_rules,libpe_status: many APIs have been moved from
+ libpe_rules and libpe_status to libcrmcommon, sometimes with new names
+ (deprecating the old ones), as described below
+ + libcrmcommon: add (and deprecate) PCMK_DEFAULT_METADATA_TIMEOUT_MS defined
+ constant
+ + libcrmcommon: add enum pcmk_rsc_flags
+ + libcrmcommon: add enum pcmk_scheduler_flags
+ + libcrmcommon: add pcmk_action_added_to_graph
+ + libcrmcommon: add pcmk_action_always_in_graph
+ + libcrmcommon: add pcmk_action_attrs_evaluated
+ + libcrmcommon: add PCMK_ACTION_CANCEL string constant
+ + libcrmcommon: add PCMK_ACTION_CLEAR_FAILCOUNT string constant
+ + libcrmcommon: add PCMK_ACTION_CLONE_ONE_OR_MORE string constant
+ + libcrmcommon: add PCMK_ACTION_DELETE string constant
+ + libcrmcommon: add PCMK_ACTION_DEMOTE string constant
+ + libcrmcommon: add pcmk_action_demote to enum action_tasks
+ + libcrmcommon: add PCMK_ACTION_DEMOTED string constant
+ + libcrmcommon: add pcmk_action_demoted to enum action_tasks
+ + libcrmcommon: add pcmk_action_detect_loop
+ + libcrmcommon: add PCMK_ACTION_DO_SHUTDOWN string constant
+ + libcrmcommon: add pcmk_action_fence to enum action_tasks
+ + libcrmcommon: add pcmk_action_inputs_deduplicated
+ + libcrmcommon: add PCMK_ACTION_LIST string constant
+ + libcrmcommon: add PCMK_ACTION_LOAD_STOPPED string constant
+ + libcrmcommon: add PCMK_ACTION_LRM_DELETE string constant
+ + libcrmcommon: add PCMK_ACTION_MAINTENANCE_NODES string constant
+ + libcrmcommon: add PCMK_ACTION_META_DATA string constant
+ + libcrmcommon: add pcmk_action_migratable
+ + libcrmcommon: add PCMK_ACTION_MIGRATE_FROM string constant
+ + libcrmcommon: add PCMK_ACTION_MIGRATE_TO string constant
+ + libcrmcommon: add pcmk_action_migration_abort
+ + libcrmcommon: add pcmk_action_min_runnable
+ + libcrmcommon: add PCMK_ACTION_MONITOR string constant
+ + libcrmcommon: add pcmk_action_monitor to enum action_tasks
+ + libcrmcommon: add PCMK_ACTION_NOTIFIED string constant
+ + libcrmcommon: add pcmk_action_notified to enum action_tasks
+ + libcrmcommon: add PCMK_ACTION_NOTIFY string constant
+ + libcrmcommon: add pcmk_action_notify to enum action_tasks
+ + libcrmcommon: add PCMK_ACTION_OFF string constant
+ + libcrmcommon: add PCMK_ACTION_ON string constant
+ + libcrmcommon: add PCMK_ACTION_ONE_OR_MORE string constant
+ + libcrmcommon: add pcmk_action_on_dc
+ + libcrmcommon: add pcmk_action_optional
+ + libcrmcommon: add PCMK_ACTION_PROMOTE string constant
+ + libcrmcommon: add pcmk_action_promote to enum action_tasks
+ + libcrmcommon: add PCMK_ACTION_PROMOTED string constant
+ + libcrmcommon: add pcmk_action_promoted to enum action_tasks
+ + libcrmcommon: add pcmk_action_pseudo
+ + libcrmcommon: add PCMK_ACTION_REBOOT string constant
+ + libcrmcommon: add PCMK_ACTION_RELOAD string constant
+ + libcrmcommon: add PCMK_ACTION_RELOAD_AGENT string constant
+ + libcrmcommon: add pcmk_action_reschedule
+ + libcrmcommon: add pcmk_action_runnable
+ + libcrmcommon: add PCMK_ACTION_RUNNING string constant
+ + libcrmcommon: add pcmk_action_shutdown to enum action_tasks
+ + libcrmcommon: add PCMK_ACTION_START string constant
+ + libcrmcommon: add pcmk_action_start to enum action_tasks
+ + libcrmcommon: add pcmk_action_started to enum action_tasks
+ + libcrmcommon: add PCMK_ACTION_STATUS string constant
+ + libcrmcommon: add PCMK_ACTION_STONITH string constant
+ + libcrmcommon: add PCMK_ACTION_STOP string constant
+ + libcrmcommon: add pcmk_action_stop to enum action_tasks
+ + libcrmcommon: add PCMK_ACTION_STOPPED string constant
+ + libcrmcommon: add pcmk_action_stopped to enum action_tasks
+ + libcrmcommon: add pcmk_action_t type
+ + libcrmcommon: add pcmk_action_unspecified to enum action_tasks
+ + libcrmcommon: add PCMK_ACTION_VALIDATE_ALL string constant
+ + libcrmcommon: add pcmk_assignment_methods_t type
+ + libcrmcommon: add PCMK_DEFAULT_ACTION_TIMEOUT_MS defined constant
+ + libcrmcommon: add pcmk_log_xml_as()
+ + libcrmcommon: add PCMK_META_CLONE_MAX string constant
+ + libcrmcommon: add PCMK_META_CLONE_MIN string constant
+ + libcrmcommon: add PCMK_META_CLONE_NODE_MAX string constant
+ + libcrmcommon: add PCMK_META_FAILURE_TIMEOUT string constant
+ + libcrmcommon: add PCMK_META_MIGRATION_THRESHOLD string constant
+ + libcrmcommon: add PCMK_META_PROMOTED_MAX string constant
+ + libcrmcommon: add PCMK_META_PROMOTED_NODE_MAX string constant
+ + libcrmcommon: add pcmk_multiply_active_block to enum rsc_recovery_type
+ + libcrmcommon: add pcmk_multiply_active_restart to enum rsc_recovery_type
+ + libcrmcommon: add pcmk_multiply_active_stop to enum rsc_recovery_type
+ + libcrmcommon: add pcmk_multiply_active_unexpected to enum rsc_recovery_type
+ + libcrmcommon: add PCMK_NODE_ATTR_TERMINATE string constant
+ + libcrmcommon: add pcmk_node_t type
+ + libcrmcommon: add pcmk_node_variant_cluster
+ + libcrmcommon: add pcmk_node_variant_remote
+ + libcrmcommon: add pcmk_no_action_flags
+ + libcrmcommon: add pcmk_no_quorum_demote
+ + libcrmcommon: add pcmk_no_quorum_fence
+ + libcrmcommon: add pcmk_no_quorum_freeze
+ + libcrmcommon: add pcmk_no_quorum_ignore
+ + libcrmcommon: add pcmk_no_quorum_stop
+ + libcrmcommon: add pcmk_on_fail_ban to enum action_fail_response
+ + libcrmcommon: add pcmk_on_fail_block to enum action_fail_response
+ + libcrmcommon: add pcmk_on_fail_demote to enum action_fail_response
+ + libcrmcommon: add pcmk_on_fail_fence_node to enum action_fail_response
+ + libcrmcommon: add pcmk_on_fail_ignore to enum action_fail_response
+ + libcrmcommon: add pcmk_on_fail_reset_remote to enum action_fail_response
+ + libcrmcommon: add pcmk_on_fail_restart to enum action_fail_response
+ + libcrmcommon: add pcmk_on_fail_restart_container to enum action_fail_response
+ + libcrmcommon: add pcmk_on_fail_standby_node to action_fail_response
+ + libcrmcommon: add pcmk_on_fail_stop to enum action_fail_response
+ + libcrmcommon: add pcmk_probe_always to enum pe_discover_e
+ + libcrmcommon: add pcmk_probe_exclusive to enum pe_discover_e
+ + libcrmcommon: add pcmk_probe_never to enum pe_discover_e
+ + libcrmcommon: add pcmk_requires_fencing to enum rsc_start_requirement
+ + libcrmcommon: add pcmk_requires_nothing to enum rsc_start_requirement
+ + libcrmcommon: add pcmk_requires_quorum to enum rsc_start_requirement
+ + libcrmcommon: add pcmk_resource_t type
+ + libcrmcommon: add pcmk_role_promoted to enum rsc_role_e
+ + libcrmcommon: add pcmk_role_started to enum rsc_role_e
+ + libcrmcommon: add pcmk_role_stopped to enum rsc_role_e
+ + libcrmcommon: add pcmk_role_unknown to enum rsc_role_e
+ + libcrmcommon: add pcmk_role_unpromoted to enum rsc_role_e
+ + libcrmcommon: add pcmk_rsc_match_anon_basename
+ + libcrmcommon: add pcmk_rsc_match_basename
+ + libcrmcommon: add pcmk_rsc_match_clone_only
+ + libcrmcommon: add pcmk_rsc_match_current_node
+ + libcrmcommon: add pcmk_rsc_match_history
+ + libcrmcommon: add pcmk_rsc_methods_t type
+ + libcrmcommon: add pcmk_rsc_variant_bundle
+ + libcrmcommon: add pcmk_rsc_variant_clone
+ + libcrmcommon: add pcmk_rsc_variant_group
+ + libcrmcommon: add pcmk_rsc_variant_primitive
+ + libcrmcommon: add pcmk_rsc_variant_unknown
+ + libcrmcommon: add pcmk_scheduler_t type
+ + libcrmcommon: add pcmk_tag_t type
+ + libcrmcommon: add pcmk_ticket_t type
+ + libcrmcommon: add PCMK_XA_FORMAT string constant
+ + libcrmcommon: crm_ipc_send()'s message argument is now const
+ + libcrmcommon: deprecate action_demote in enum action_tasks
+ + libcrmcommon: deprecate action_demoted in enum action_tasks
+ + libcrmcommon: deprecate action_fail_block in enum action_fail_response
+ + libcrmcommon: deprecate action_fail_demote in enum action_fail_response
+ + libcrmcommon: deprecate action_fail_fence in enum action_fail_response
+ + libcrmcommon: deprecate action_fail_ignore in enum action_fail_response
+ + libcrmcommon: deprecate action_fail_migrate in enum action_fail_response
+ + libcrmcommon: deprecate action_fail_recover in enum action_fail_response
+ + libcrmcommon: deprecate action_fail_reset_remote in enum action_fail_response
+ + libcrmcommon: deprecate action_fail_standby in enum action_fail_response
+ + libcrmcommon: deprecate action_fail_stop in action_fail_response
+ + libcrmcommon: deprecate action_notified in enum action_tasks
+ + libcrmcommon: deprecate action_notify in enum action_tasks
+ + libcrmcommon: deprecate action_promote in enum action_tasks
+ + libcrmcommon: deprecate action_promoted in enum action_tasks
+ + libcrmcommon: deprecate action_restart_container in enum action_fail_response
+ + libcrmcommon: deprecate CRMD_ACTION_CANCEL string constant
+ + libcrmcommon: deprecate CRMD_ACTION_DELETE string constant
+ + libcrmcommon: deprecate CRMD_ACTION_DEMOTE string constant
+ + libcrmcommon: deprecate CRMD_ACTION_DEMOTED string constant
+ + libcrmcommon: deprecate CRMD_ACTION_METADATA string constant
+ + libcrmcommon: deprecate CRMD_ACTION_MIGRATE string constant
+ + libcrmcommon: deprecate CRMD_ACTION_MIGRATED string constant
+ + libcrmcommon: deprecate CRMD_ACTION_NOTIFIED string constant
+ + libcrmcommon: deprecate CRMD_ACTION_NOTIFY string constant
+ + libcrmcommon: deprecate CRMD_ACTION_PROMOTE string constant
+ + libcrmcommon: deprecate CRMD_ACTION_PROMOTED string constant
+ + libcrmcommon: deprecate CRMD_ACTION_RELOAD string constant
+ + libcrmcommon: deprecate CRMD_ACTION_RELOAD_AGENT string constant
+ + libcrmcommon: deprecate CRMD_ACTION_START string constant
+ + libcrmcommon: deprecate CRMD_ACTION_STARTED string constant
+ + libcrmcommon: deprecate CRMD_ACTION_STATUS string constant
+ + libcrmcommon: deprecate CRMD_ACTION_STOP string constant
+ + libcrmcommon: deprecate CRMD_ACTION_STOPPED string constant
+ + libcrmcommon: deprecate CRMD_METADATA_CALL_TIMEOUT defined constant
+ + libcrmcommon: deprecate crm_action_str()
+ + libcrmcommon: deprecate CRM_DEFAULT_OP_TIMEOUT_S string constant
+ + libcrmcommon: deprecate crm_element_name()
+ + libcrmcommon: deprecate CRM_OP_FENCE string constant
+ + libcrmcommon: deprecate CRM_OP_RELAXED_CLONE string constant
+ + libcrmcommon: deprecate CRM_OP_RELAXED_SET string constant
+ + libcrmcommon: deprecate crm_xml_replace()
+ + libcrmcommon: deprecate enum pe_link_state
+ + libcrmcommon: deprecate getDocPtr()
+ + libcrmcommon: deprecate monitor_rsc in enum action_tasks
+ + libcrmcommon: deprecate node_member
+ + libcrmcommon: deprecate node_remote
+ + libcrmcommon: deprecate no_action in enum action_tasks
+ + libcrmcommon: deprecate no_quorum_demote
+ + libcrmcommon: deprecate no_quorum_freeze
+ + libcrmcommon: deprecate no_quorum_ignore
+ + libcrmcommon: deprecate no_quorum_stop
+ + libcrmcommon: deprecate no_quorum_suicide
+ + libcrmcommon: deprecate pcmk_log_xml_impl()
+ + libcrmcommon: deprecate pcmk_scheduler_t localhost member
+ + libcrmcommon: deprecate pe_action_dangle
+ + libcrmcommon: deprecate pe_action_dc
+ + libcrmcommon: deprecate pe_action_dedup
+ + libcrmcommon: deprecate pe_action_dumped
+ + libcrmcommon: deprecate pe_action_have_node_attrs
+ + libcrmcommon: deprecate pe_action_implied_by_stonith
+ + libcrmcommon: deprecate pe_action_migrate_runnable
+ + libcrmcommon: deprecate pe_action_optional
+ + libcrmcommon: deprecate pe_action_print_always
+ + libcrmcommon: deprecate pe_action_processed
+ + libcrmcommon: deprecate pe_action_pseudo
+ + libcrmcommon: deprecate pe_action_requires_any
+ + libcrmcommon: deprecate pe_action_reschedule
+ + libcrmcommon: deprecate pe_action_runnable
+ + libcrmcommon: deprecate pe_action_tracking
+ + libcrmcommon: deprecate pe_clone
+ + libcrmcommon: deprecate pe_container
+ + libcrmcommon: deprecate pe_discover_always in enum pe_discover_e
+ + libcrmcommon: deprecate pe_discover_exclusive in enum pe_discover_e
+ + libcrmcommon: deprecate pe_discover_never in enum pe_discover_e
+ + libcrmcommon: deprecate pe_find_anon
+ + libcrmcommon: deprecate pe_find_any
+ + libcrmcommon: deprecate pe_find_clone
+ + libcrmcommon: deprecate pe_find_current
+ + libcrmcommon: deprecate pe_find_inactive
+ + libcrmcommon: deprecate pe_find_renamed
+ + libcrmcommon: deprecate pe_group
+ + libcrmcommon: deprecate pe_native
+ + libcrmcommon: deprecate pe_unknown
+ + libcrmcommon: deprecate recovery_block in enum rsc_recovery_type
+ + libcrmcommon: deprecate recovery_stop_only in enum rsc_recovery_type
+ + libcrmcommon: deprecate recovery_stop_start in enum rsc_recovery_type
+ + libcrmcommon: deprecate recovery_stop_unexpected in enum rsc_recovery_type
+ + libcrmcommon: deprecate RSC_CANCEL string constant
+ + libcrmcommon: deprecate RSC_DELETE string constant
+ + libcrmcommon: deprecate RSC_DEMOTE string constant
+ + libcrmcommon: deprecate RSC_DEMOTED string constant
+ + libcrmcommon: deprecate RSC_METADATA string constant
+ + libcrmcommon: deprecate RSC_MIGRATE string constant
+ + libcrmcommon: deprecate RSC_MIGRATED string constant
+ + libcrmcommon: deprecate RSC_NOTIFIED string constant
+ + libcrmcommon: deprecate RSC_NOTIFY string constant
+ + libcrmcommon: deprecate RSC_PROMOTE string constant
+ + libcrmcommon: deprecate RSC_PROMOTED string constant
+ + libcrmcommon: deprecate rsc_req_nothing in enum rsc_start_requirement
+ + libcrmcommon: deprecate rsc_req_quorum in enum rsc_start_requirement
+ + libcrmcommon: deprecate rsc_req_stonith in enum rsc_start_requirement
+ + libcrmcommon: deprecate RSC_ROLE_PROMOTED in enum rsc_role_e
+ + libcrmcommon: deprecate RSC_ROLE_STARTED in enum rsc_role_e
+ + libcrmcommon: deprecate RSC_ROLE_STOPPED in enum rsc_role_e
+ + libcrmcommon: deprecate RSC_ROLE_UNKNOWN in enum rsc_role_e
+ + libcrmcommon: deprecate RSC_ROLE_UNPROMOTED
+ + libcrmcommon: deprecate RSC_START string constant
+ + libcrmcommon: deprecate RSC_STARTED string constant
+ + libcrmcommon: deprecate RSC_STATUS string constant
+ + libcrmcommon: deprecate RSC_STOP string constant
+ + libcrmcommon: deprecate RSC_STOPPED string constant
+ + libcrmcommon: deprecate shutdown_crm in enum action_tasks
+ + libcrmcommon: deprecate started_rsc in enum action_tasks
+ + libcrmcommon: deprecate start_rsc in enum action_tasks
+ + libcrmcommon: deprecate stonith_node in enum action_tasks
+ + libcrmcommon: deprecate stopped_rsc in enum action_tasks
+ + libcrmcommon: deprecate stop_rsc in enum action_tasks
+ + libcrmcommon: deprecate TYPE() macro
+ + libcrmcommon: deprecate XML_ATTR_VERBOSE string constant
+ + libcrmcommon: deprecate XML_CIB_ATTR_SOURCE string constant
+ + libcrmcommon: deprecate XML_CIB_TAG_DOMAINS string constant
+ + libcrmcommon: deprecate xml_has_children()
+ + libcrmcommon: deprecate XML_NODE_EXPECTED string constant
+ + libcrmcommon: deprecate XML_NODE_IN_CLUSTER string constant
+ + libcrmcommon: deprecate XML_NODE_IS_PEER string constant
+ + libcrmcommon: deprecate XML_NODE_JOIN_STATE string constant
+ + libcrmcommon: deprecate XML_RSC_ATTR_FAIL_STICKINESS string constant
+ + libcrmcommon: deprecate XML_RSC_ATTR_FAIL_TIMEOUT string constant
+ + libcrmcommon: deprecate XML_RSC_ATTR_INCARNATION_MAX string constant
+ + libcrmcommon: deprecate XML_RSC_ATTR_INCARNATION_MIN string constant
+ + libcrmcommon: deprecate XML_RSC_ATTR_INCARNATION_NODEMAX string constant
+ + libcrmcommon: deprecate XML_RSC_ATTR_PROMOTED_MAX string constant
+ + libcrmcommon: deprecate XML_RSC_ATTR_PROMOTED_NODEMAX string constant
+ + libcrmcommon: deprecate XML_TAG_DIFF_ADDED string constant
+ + libcrmcommon: deprecate XML_TAG_DIFF_REMOVED string constant
+ + libcrmcommon: deprecate XML_TAG_FRAGMENT
+ + libcrmcommon: dump_xml_formatted()'s argument is now const
+ + libcrmcommon: dump_xml_formatted_with_text()'s argument is const
+ + libcrmcommon: dump_xml_unformatted()'s argument is now const
+ + libcrmcommon: save_xml_to_file()'s xml argument is now const
+ + libcrmcommon: validate_xml_verbose()'s xml_blob argument is const
+ + libcrmcommon: write_xml_fd()'s xml argument is now const
+ + libcrmcommon: write_xml_file()'s xml argument is now const
+ + libcrmcommon: xml_top argument of xpath_search() is now const
+ + libcrmcommon,libpe_rules,libpe_status: move enum pe_ordering, struct
+ pe_action_wrapper_s, struct pe_tag_s, struct pe_ticket_s, struct
+ resource_object_functions_s, enum node_type, enum pe_action_flags, enum
+ pe_discover_e, enum pe_find, enum pe_link_state, enum pe_obj_types, enum
+ pe_quorum_policy, enum pe_restart, struct pe_action_s, struct pe_node_s,
+ struct pe_node_shared_s, struct pe_resource_s, struct pe_working_set_s,
+ enum action_fail_response, enum action_tasks, enum pe_print_options, enum
+ rsc_recovery_type, enum rsc_role_e, and enum rsc_start_requirement to
+ libcrmcommon
+ + libpacemaker,libpe_rules,libpe_status: use pcmk_action_t instead of
+ pe_action_t, pcmk_node_t instead of pe_node_t, pcmk_resource_t instead of
+ pe_resource_t, and pcmk_scheduler_t instead of pe_working_set_t in all API
+ structs and functions
+ + libpacemaker: add pcmk_list_alternatives(), pcmk_list_providers(),
+ pcmk_list_standards(), and pcmk_list_agents() for functionality equivalent
+ to crm_resource --list-ocf-alternatives, --list-ocf-providers,
+ --list-standards, and --list-agents
+ + libpe_rules,libpe_status: deprecate pe_action_t type
+ + libpe_rules,libpe_status: deprecate pe_action_wrapper_t
+ + libpe_rules,libpe_status: deprecate pe_node_t type
+ + libpe_rules,libpe_status: deprecate pe_resource_t type
+ + libpe_rules,libpe_status: deprecate pe_tag_t
+ + libpe_rules,libpe_status: deprecate pe_ticket_t
+ + libpe_rules,libpe_status: deprecate pe_working_set_t type
+ + libpe_rules,libpe_status: deprecate resource_alloc_functions_t type
+ + libpe_rules,libpe_status: deprecate resource_object_functions_t
+ + libpe_status,libpe_rules: deprecate enum pe_ordering and all its values
+ + libpe_status,libpe_rules: deprecate RSC_ROLE_MAX
+ + libpe_status,libpe_rules: deprecate RSC_ROLE_PROMOTED_LEGACY_S string constant
+ + libpe_status,libpe_rules: deprecate RSC_ROLE_PROMOTED_S string constant
+ + libpe_status,libpe_rules: deprecate RSC_ROLE_STARTED_S string constant
+ + libpe_status,libpe_rules: deprecate RSC_ROLE_STOPPED_S string constant
+ + libpe_status,libpe_rules: deprecate RSC_ROLE_UNKNOWN_S
+ + libpe_status,libpe_rules: deprecate RSC_ROLE_UNPROMOTED_LEGACY_S string constant
+ + libpe_status,libpe_rules: deprecate RSC_ROLE_UNPROMOTED_S string constant
+ + libpe_status: deprecate enum pe_check_parameters
+ + libpe_status: deprecate pe_flag_check_config
+ + libpe_status: deprecate pe_flag_concurrent_fencing
+ + libpe_status: deprecate pe_flag_enable_unfencing
+ + libpe_status: deprecate pe_flag_have_quorum
+ + libpe_status: deprecate pe_flag_have_remote_nodes
+ + libpe_status: deprecate pe_flag_have_status
+ + libpe_status: deprecate pe_flag_have_stonith_resource
+ + libpe_status: deprecate pe_flag_maintenance_mode
+ + libpe_status: deprecate pe_flag_no_compat
+ + libpe_status: deprecate pe_flag_no_counts
+ + libpe_status: deprecate pe_flag_quick_location
+ + libpe_status: deprecate pe_flag_sanitized
+ + libpe_status: deprecate pe_flag_show_scores
+ + libpe_status: deprecate pe_flag_show_utilization
+ + libpe_status: deprecate pe_flag_shutdown_lock
+ + libpe_status: deprecate pe_flag_startup_fencing
+ + libpe_status: deprecate pe_flag_startup_probes
+ + libpe_status: deprecate pe_flag_start_failure_fatal
+ + libpe_status: deprecate pe_flag_stonith_enabled
+ + libpe_status: deprecate pe_flag_stop_action_orphans
+ + libpe_status: deprecate pe_flag_stop_everything
+ + libpe_status: deprecate pe_flag_stop_rsc_orphans
+ + libpe_status: deprecate pe_flag_symmetric_cluster
+ + libpe_status: deprecate pe_rsc_allow_migrate
+ + libpe_status: deprecate pe_rsc_allow_remote_remotes
+ + libpe_status: deprecate pe_rsc_assigning
+ + libpe_status: deprecate pe_rsc_block
+ + libpe_status: deprecate pe_rsc_critical
+ + libpe_status: deprecate pe_rsc_detect_loop
+ + libpe_status: deprecate pe_rsc_failed
+ + libpe_status: deprecate pe_rsc_failure_ignored
+ + libpe_status: deprecate pe_rsc_fence_device
+ + libpe_status: deprecate pe_rsc_is_container
+ + libpe_status: deprecate pe_rsc_maintenance
+ + libpe_status: deprecate pe_rsc_managed
+ + libpe_status: deprecate pe_rsc_merging
+ + libpe_status: deprecate pe_rsc_needs_fencing
+ + libpe_status: deprecate pe_rsc_needs_quorum
+ + libpe_status: deprecate pe_rsc_needs_unfencing
+ + libpe_status: deprecate pe_rsc_notify
+ + libpe_status: deprecate pe_rsc_orphan
+ + libpe_status: deprecate pe_rsc_orphan_container_filler
+ + libpe_status: deprecate pe_rsc_promotable
+ + libpe_status: deprecate pe_rsc_provisional
+ + libpe_status: deprecate pe_rsc_reload
+ + libpe_status: deprecate pe_rsc_replica_container
+ + libpe_status: deprecate pe_rsc_restarting
+ + libpe_status: deprecate pe_rsc_runnable
+ + libpe_status: deprecate pe_rsc_start_pending
+ + libpe_status: deprecate pe_rsc_stop
+ + libpe_status: deprecate pe_rsc_stop_unexpected
+ + libpe_status: deprecate pe_rsc_unique
+
* Wed May 24 2023 Ken Gaillot <kgaillot@redhat.com> Pacemaker-2.1.6
- 1124 commits with 402 files changed, 25220 insertions(+), 14751 deletions(-)
diff --git a/GNUmakefile b/GNUmakefile
index 8cb5d3e..8cac498 100644
--- a/GNUmakefile
+++ b/GNUmakefile
@@ -18,8 +18,6 @@ default: build
# directory if a relevant variable hasn't been defined.
abs_srcdir ?= $(shell pwd)
-GLIB_CFLAGS ?= $(pkg-config --cflags glib-2.0)
-
PACKAGE ?= pacemaker
.PHONY: init
@@ -40,8 +38,7 @@ USE_FILE = $(shell test -e rpm/Makefile || echo "-f Makefile.am")
$(PACKAGE).spec chroot dirty export mock rc release rpm rpmlint srpm:
$(MAKE) $(AM_MAKEFLAGS) -C rpm $(USE_FILE) "$@"
-.PHONY: mock-% rpm-% spec-% srpm-%
-mock-% rpm-% spec-% srpm-%:
+mock-% rpm-% spec-% srpm-%: FORCE
$(MAKE) $(AM_MAKEFLAGS) -C rpm $(USE_FILE) "$@"
## Development-related targets
@@ -59,3 +56,6 @@ clang $(COVERAGE_TARGETS) $(COVERITY_TARGETS) cppcheck indent:
COVLEVEL=$(COVLEVEL) \
CPPCHECK_ARGS=$(CPPCHECK_ARGS) \
-C devel "$@"
+
+.PHONY: FORCE
+FORCE:
diff --git a/INSTALL.md b/INSTALL.md
index 5bd2975..e03c594 100644
--- a/INSTALL.md
+++ b/INSTALL.md
@@ -11,7 +11,7 @@
| | libuuid-devel | libuuid-devel | uuid-dev |
| 0.27 or later | pkgconfig | pkgconfig | pkg-config |
| 2.42.0 or later | glib2-devel | glib2-devel | libglib2.0-dev |
-| | libxml2-devel | libxml2-devel | libxml2-dev |
+| 2.6.0 or later | libxml2-devel | libxml2-devel | libxml2-dev |
| | libxslt-devel | libxslt-devel | libxslt-dev |
| | bzip2-devel | libbz2-devel | libbz2-dev |
| 0.17.0 or later | libqb-devel | libqb-devel | libqb-dev |
@@ -55,16 +55,12 @@ Also:
| documentation (PDF) | | latexmk texlive texlive-capt-of texlive-collection-xetex texlive-fncychap texlive-framed texlive-multirow texlive-needspace texlive-tabulary texlive-titlesec texlive-threeparttable texlive-upquote texlive-wrapfig texlive-xetex | texlive texlive-latex | texlive texlive-latex-extra |
| annotated source code as HTML via "make global" | | global | global | global |
| RPM packages via "make rpm" | 4.11 or later | rpm | rpm | (n/a) |
-| unit tests | | libcmocka-devel | libcmocka-devel | libcmocka-dev |
+| unit tests | 1.1.0 or later | libcmocka-devel | libcmocka-devel | libcmocka-dev |
## Optional testing dependencies
* procps and psmisc (if running cts-exec, cts-fencing, or CTS)
* valgrind (if running CTS valgrind tests)
* python3-systemd (if using CTS on cluster nodes running systemd)
-* rsync (if running CTS container tests)
-* libvirt-daemon-driver-lxc (if running CTS container tests)
-* libvirt-daemon-lxc (if running CTS container tests)
-* libvirt-login-shell (if running CTS container tests)
* nmap (if not specifying an IP address base)
* oprofile (if running CTS profiling tests)
* dlm (to log DLM debugging info after CTS tests)
diff --git a/Makefile.am b/Makefile.am
index fd9db82..c3e39b9 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -10,31 +10,31 @@
# This directory must be same as in configure.ac's AC_CONFIG_MACRO_DIR
ACLOCAL_AMFLAGS = -I m4
-EXTRA_DIST = CONTRIBUTING.md \
- GNUmakefile \
- INSTALL.md \
- README.markdown \
- autogen.sh \
+EXTRA_DIST = CONTRIBUTING.md \
+ GNUmakefile \
+ INSTALL.md \
+ README.markdown \
+ autogen.sh \
m4/CC_CHECK_LDFLAGS.m4 \
m4/CHECK_ENUM_VALUE.m4 \
- m4/gnulib-cache.m4 \
- m4/gnulib-tool.m4 \
- m4/PKG_CHECK_VAR.m4 \
- m4/REQUIRE_HEADER.m4 \
+ m4/gnulib-cache.m4 \
+ m4/gnulib-tool.m4 \
+ m4/PKG_CHECK_VAR.m4 \
+ m4/REQUIRE_HEADER.m4 \
m4/version.m4
DISTCLEANFILES = config.status
-MAINTAINERCLEANFILES = Makefile.in \
- aclocal.m4 \
- config.guess \
- config.sub \
- configure \
- depcomp \
- install-sh \
- ltmain.sh \
- missing \
- py-compile \
+MAINTAINERCLEANFILES = Makefile.in \
+ aclocal.m4 \
+ config.guess \
+ config.sub \
+ configure \
+ depcomp \
+ install-sh \
+ ltmain.sh \
+ missing \
+ py-compile \
test-driver
# Don't try to install files outside build directory for "make distcheck".
@@ -45,13 +45,28 @@ AM_DISTCHECK_CONFIGURE_FLAGS = --prefix="$$dc_install_base/usr" \
--with-systemdsystemunitdir="$$dc_install_base$(systemdsystemunitdir)"
# Only these will get built with a plain "make"
-CORE = replace include lib daemons tools xml po python cts rpm
-
-SUBDIRS = $(CORE) agents devel doc etc maint tests
+CORE = include \
+ lib \
+ daemons \
+ tools \
+ xml \
+ po \
+ python \
+ cts \
+ rpm
+
+SUBDIRS = $(CORE) \
+ agents \
+ devel \
+ doc \
+ etc \
+ maint \
+ tests
AM_CPPFLAGS = -I$(top_srcdir)/include
-doc_DATA = README.markdown COPYING
+doc_DATA = README.markdown \
+ COPYING
licensedir = $(docdir)/licenses/
dist_license_DATA = $(wildcard licenses/*)
@@ -67,6 +82,7 @@ DAEMON_R_DIRS = $(CRM_CONFIG_DIR) \
DAEMON_RW_DIRS = $(CRM_BUNDLE_DIR) \
$(CRM_LOG_DIR)
+.PHONY: core
core:
@echo "Building only core components and tests: $(CORE)"
@for subdir in $(CORE); do \
@@ -74,6 +90,7 @@ core:
$(MAKE) $(AM_MAKEFLAGS) -C $$subdir all || exit 1; \
done
+.PHONY: core-clean
core-clean:
@echo "Cleaning only core components and tests: $(CORE)"
@for subdir in $(CORE); do \
@@ -81,6 +98,7 @@ core-clean:
$(MAKE) $(AM_MAKEFLAGS) -C $$subdir clean || exit 1; \
done
+.PHONY: install-exec-local
install-exec-local:
for DIR in $(ROOT_DIRS) $(DAEMON_R_DIRS); do \
$(INSTALL) -d -m 750 "$(DESTDIR)/$$DIR"; \
@@ -96,18 +114,25 @@ install-exec-local:
done
# Remove created directories only if they're empty
+.PHONY: uninstall-hook
uninstall-hook:
-for DIR in $(ROOT_DIRS) $(DAEMON_R_DIRS) $(DAEMON_RW_DIRS); do \
rmdir "$(DESTDIR)/$$DIR"; \
done
+.PHONY: clean-generic
clean-generic:
-rm -f *.tar.bz2 *.sed
PACKAGE ?= pacemaker
+.PHONY: clean-local
clean-local:
-rm -f $(builddir)/$(PACKAGE)-*.tar.gz
+ -if [ "x$(top_srcdir)" != "x$(top_builddir)" ]; then \
+ rm -rf $(top_builddir)/python; \
+ fi
+.PHONY: distclean-local
distclean-local:
-rm -rf libltdl autom4te.cache
diff --git a/agents/Makefile.am b/agents/Makefile.am
index 3cbd7c6..af0d970 100644
--- a/agents/Makefile.am
+++ b/agents/Makefile.am
@@ -9,4 +9,6 @@
include $(top_srcdir)/mk/common.mk
-SUBDIRS = alerts ocf stonith
+SUBDIRS = alerts \
+ ocf \
+ stonith
diff --git a/agents/alerts/Makefile.am b/agents/alerts/Makefile.am
index fdb294f..a3fe891 100644
--- a/agents/alerts/Makefile.am
+++ b/agents/alerts/Makefile.am
@@ -10,6 +10,4 @@
include $(top_srcdir)/mk/common.mk
samplesdir = $(datadir)/$(PACKAGE)/alerts/
-dist_samples_DATA = alert_file.sh.sample \
- alert_smtp.sh.sample \
- alert_snmp.sh.sample
+dist_samples_DATA = $(wildcard *.sample)
diff --git a/agents/ocf/HealthCPU.in b/agents/ocf/HealthCPU.in
index 4a8e7c3..14e4b07 100755
--- a/agents/ocf/HealthCPU.in
+++ b/agents/ocf/HealthCPU.in
@@ -67,6 +67,15 @@ the #health-cpu will go red if the %idle of the CPU falls below 10%.
<content type="string" default="10"/>
</parameter>
+<parameter name="dampening" reloadable="1">
+<longdesc lang="en">
+The time to wait (dampening) in seconds for further changes before writing
+</longdesc>
+<shortdesc lang="en">The time to wait (dampening) in seconds for further changes
+before writing</shortdesc>
+<content type="string" default="30s"/>
+</parameter>
+
</parameters>
<actions>
@@ -117,16 +126,16 @@ healthcpu_monitor() {
if [ $IDLE -lt ${OCF_RESKEY_red_limit} ] ; then
# echo "System state RED!"
- attrd_updater -n "#health-cpu" -U "red" -d "30s"
+ attrd_updater -n "#health-cpu" -B "red" -d "${OCF_RESKEY_dampening}"
return $OCF_SUCCESS
fi
if [ $IDLE -lt ${OCF_RESKEY_yellow_limit} ] ; then
# echo "System state yellow."
- attrd_updater -n "#health-cpu" -U "yellow" -d "30s"
+ attrd_updater -n "#health-cpu" -B "yellow" -d "${OCF_RESKEY_dampening}"
else
# echo "System state green."
- attrd_updater -n "#health-cpu" -U "green" -d "30s"
+ attrd_updater -n "#health-cpu" -B "green" -d "${OCF_RESKEY_dampening}"
fi
return $OCF_SUCCESS
@@ -136,8 +145,7 @@ healthcpu_monitor() {
}
healthcpu_reload_agent() {
- # No action required
- :;
+ return $OCF_SUCCESS
}
healthcpu_validate() {
@@ -188,6 +196,9 @@ fi
if [ -z "${OCF_RESKEY_yellow_limit}" ] ; then
OCF_RESKEY_yellow_limit=50
fi
+if [ -z "${OCF_RESKEY_dampening}" ]; then
+ OCF_RESKEY_dampening="30s"
+fi
case "$__OCF_ACTION" in
meta-data) meta_data
@@ -195,7 +206,7 @@ meta-data) meta_data
;;
start) healthcpu_start;;
stop) healthcpu_stop;;
-monitor) healthcpu_monitor;;
+monitor) healthcpu_validate && healthcpu_monitor;;
reload-agent) healthcpu_reload_agent;;
validate-all) healthcpu_validate;;
usage|help) healthcpu_usage
diff --git a/agents/ocf/HealthSMART.in b/agents/ocf/HealthSMART.in
index b6edac2..b2f37de 100755
--- a/agents/ocf/HealthSMART.in
+++ b/agents/ocf/HealthSMART.in
@@ -139,25 +139,25 @@ check_temperature() {
if [ $1 -lt ${lower_red_limit} ] ; then
ocf_log info "Drive ${DRIVE} ${DEVICE} too cold: ${1} C"
- attrd_updater -n "#health-smart" -U "red" -d "${OCF_RESKEY_dampen}"
+ attrd_updater -n "#health-smart" -B "red" -d "${OCF_RESKEY_dampen}"
return 1
fi
if [ $1 -gt ${upper_red_limit} ] ; then
ocf_log info "Drive ${DRIVE} ${DEVICE} too hot: ${1} C"
- attrd_updater -n "#health-smart" -U "red" -d "${OCF_RESKEY_dampen}"
+ attrd_updater -n "#health-smart" -B "red" -d "${OCF_RESKEY_dampen}"
return 1
fi
if [ $1 -lt ${lower_yellow_limit} ] ; then
ocf_log info "Drive ${DRIVE} ${DEVICE} quite cold: ${1} C"
- attrd_updater -n "#health-smart" -U "yellow" -d "${OCF_RESKEY_dampen}"
+ attrd_updater -n "#health-smart" -B "yellow" -d "${OCF_RESKEY_dampen}"
return 1
fi
if [ $1 -gt ${upper_yellow_limit} ] ; then
ocf_log info "Drive ${DRIVE} ${DEVICE} quite hot: ${1} C"
- attrd_updater -n "#health-smart" -U "yellow" -d "${OCF_RESKEY_dampen}"
+ attrd_updater -n "#health-smart" -B "yellow" -d "${OCF_RESKEY_dampen}"
return 1
fi
}
@@ -244,7 +244,7 @@ HealthSMART_start() {
}
HealthSMART_stop() {
- attrd_updater -D -n "#health-smart" -d "${OCF_RESKEY_dampen}"
+ attrd_updater -D -n "#health-smart"
rm "${OCF_RESKEY_state}"
@@ -278,7 +278,7 @@ HealthSMART_monitor() {
# Check overall S.M.A.R.T. status
"${OCF_RESKEY_smartctl}" -d "${DEVICE}" -H ${DRIVE} | grep -q "SMART overall-health self-assessment test result: PASSED"
if [ $? -ne 0 ]; then
- attrd_updater -n "#health-smart" -U "red" -d "${OCF_RESKEY_dampen}"
+ attrd_updater -n "#health-smart" -B "red" -d "${OCF_RESKEY_dampen}"
return $OCF_SUCCESS
fi
@@ -290,7 +290,7 @@ HealthSMART_monitor() {
else
"${OCF_RESKEY_smartctl}" -H "${DRIVE}" | grep -q "SMART overall-health self-assessment test result: PASSED"
if [ $? -ne 0 ]; then
- attrd_updater -n "#health-smart" -U "red" -d "${OCF_RESKEY_dampen}"
+ attrd_updater -n "#health-smart" -B "red" -d "${OCF_RESKEY_dampen}"
return $OCF_SUCCESS
fi
@@ -301,7 +301,7 @@ HealthSMART_monitor() {
fi
done
- attrd_updater -n "#health-smart" -U "green" -d "${OCF_RESKEY_dampen}"
+ attrd_updater -n "#health-smart" -B "green" -d "${OCF_RESKEY_dampen}"
return $OCF_SUCCESS
fi
diff --git a/agents/ocf/Makefile.am b/agents/ocf/Makefile.am
index 823e67e..0b18bb1 100644
--- a/agents/ocf/Makefile.am
+++ b/agents/ocf/Makefile.am
@@ -27,8 +27,11 @@ ocf_SCRIPTS = ClusterMon \
if BUILD_XML_HELP
-man7_MANS = $(ocf_SCRIPTS:%=ocf_pacemaker_%.7) $(dist_ocf_SCRIPTS:%=ocf_pacemaker_%.7)
-DBOOK_OPTS = --stringparam command.prefix ocf_pacemaker_ --stringparam variable.prefix OCF_RESKEY_ --param man.vol 7
+man7_MANS = $(ocf_SCRIPTS:%=ocf_pacemaker_%.7) \
+ $(dist_ocf_SCRIPTS:%=ocf_pacemaker_%.7)
+DBOOK_OPTS = --stringparam command.prefix ocf_pacemaker_ \
+ --stringparam variable.prefix OCF_RESKEY_ \
+ --param man.vol 7
ocf_pacemaker_%.xml: %
$(AM_V_GEN)OCF_FUNCTIONS=/dev/null OCF_ROOT=$(OCF_ROOT_DIR) $(abs_builddir)/$< meta-data > $@
@@ -50,4 +53,6 @@ validate: all
| xmllint --noout --relaxng $(RNG) - || break; \
done
-CLEANFILES = $(man7_MANS) $(ocf_SCRIPTS:%=%.xml) $(dist_ocf_SCRIPTS:%=%.xml)
+CLEANFILES = $(man7_MANS) \
+ $(ocf_SCRIPTS:%=%.xml) \
+ $(dist_ocf_SCRIPTS:%=%.xml)
diff --git a/agents/ocf/ifspeed.in b/agents/ocf/ifspeed.in
index 5fbaf89..8c07c3d 100755
--- a/agents/ocf/ifspeed.in
+++ b/agents/ocf/ifspeed.in
@@ -123,7 +123,7 @@ Can be used to tune how big attribute value will be.
<content type="integer" default="${OCF_RESKEY_weight_base_default}"/>
</parameter>
-<parameter name="dampen">
+<parameter name="dampen" reloadable="1">
<longdesc lang="en">
The time to wait (dampening) for further changes to occur.
</longdesc>
@@ -147,6 +147,7 @@ Log more verbosely.
<action name="monitor" depth="0" timeout="30s" interval="10s"/>
<action name="meta-data" timeout="5s" />
<action name="validate-all" timeout="30s" depth="0" />
+<action name="reload-agent" timeout="20s" />
</actions>
</resource-agent>
END
@@ -154,7 +155,7 @@ END
usage() {
cat <<END
-Usage: $0 {start|stop|monitor|validate-all|meta-data}
+Usage: $0 {start|stop|monitor|validate-all|meta-data|reload-agent}
Expects to have a fully populated OCF RA-compliant environment set.
END
@@ -172,7 +173,7 @@ start() {
stop() {
ha_pseudo_resource "${ha_pseudo_resource_name}" stop
- attrd_updater -D -n "${OCF_RESKEY_name}" -d "${OCF_RESKEY_dampen}" ${attrd_options}
+ attrd_updater -D -n "${OCF_RESKEY_name}" ${attrd_options}
return $OCF_SUCCESS
}
@@ -491,11 +492,7 @@ update() {
speed=$( iface_get_speed "${nic}" )
: $(( score = speed * ${OCF_RESKEY_weight_base} / 1000 ))
- if [ "$__OCF_ACTION" = "start" ] ; then
- attrd_updater -n "${OCF_RESKEY_name}" -B "${score}" -d "${OCF_RESKEY_dampen}" ${attrd_options}
- else
- attrd_updater -n "${OCF_RESKEY_name}" -v "${score}" -d "${OCF_RESKEY_dampen}" ${attrd_options}
- fi
+ attrd_updater -n "${OCF_RESKEY_name}" -B "${score}" -d "${OCF_RESKEY_dampen}" ${attrd_options}
rc=$?
case ${rc} in
0)
@@ -508,6 +505,10 @@ update() {
return ${rc}
}
+reload_agent() {
+ return $OCF_SUCCESS
+}
+
case $__OCF_ACTION in
meta-data)
meta_data
@@ -542,6 +543,9 @@ case "$__OCF_ACTION" in
validate-all)
validate "$OCF_CHECK_LEVEL"
;;
+ reload-agent)
+ reload_agent
+ ;;
*)
usage
exit $OCF_ERR_UNIMPLEMENTED
diff --git a/agents/ocf/ping.in b/agents/ocf/ping.in
index 4855e5b..73a3677 100755
--- a/agents/ocf/ping.in
+++ b/agents/ocf/ping.in
@@ -178,7 +178,7 @@ ping_stop() {
rm -f "${OCF_RESKEY_pidfile}"
- attrd_updater -D -n "$OCF_RESKEY_name" -d "$OCF_RESKEY_dampen"
+ attrd_updater -D -n "$OCF_RESKEY_name"
return $OCF_SUCCESS
}
@@ -237,8 +237,7 @@ ping_validate() {
}
ping_reload_agent() {
- # No action required
- :;
+ return $OCF_SUCCESS
}
fping_check() {
@@ -319,11 +318,7 @@ ping_update() {
fi
score=$(expr $active \* $OCF_RESKEY_multiplier)
- if [ "$__OCF_ACTION" = "start" ] ; then
- attrd_updater -n "$OCF_RESKEY_name" -B "$score" -d "$OCF_RESKEY_dampen"
- else
- attrd_updater -n "$OCF_RESKEY_name" -v "$score" -d "$OCF_RESKEY_dampen"
- fi
+ attrd_updater -n "$OCF_RESKEY_name" -B "$score" -d "$OCF_RESKEY_dampen"
rc=$?
case $rc in
0) ping_conditional_log debug "Updated $OCF_RESKEY_name = $score" ;;
@@ -423,7 +418,7 @@ start) ping_start;;
stop) ping_stop;;
monitor) ping_monitor;;
validate-all) ping_validate;;
-reload-agent) ping_reload_agent;;
+reload-agent) ping_reload_agent;;
usage|help) ping_usage
exit $OCF_SUCCESS
;;
diff --git a/agents/stonith/Makefile.am b/agents/stonith/Makefile.am
index e231775..3e074f1 100644
--- a/agents/stonith/Makefile.am
+++ b/agents/stonith/Makefile.am
@@ -16,4 +16,5 @@ if BUILD_LHA_SUPPORT
sbin_SCRIPTS += fence_legacy
endif
-CLEANFILES = $(man7_MANS) $(man8_MANS)
+CLEANFILES = $(man7_MANS) \
+ $(man8_MANS)
diff --git a/configure.ac b/configure.ac
index db80de2..6bff02e 100644
--- a/configure.ac
+++ b/configure.ac
@@ -8,9 +8,12 @@ dnl
dnl This source code is licensed under the GNU General Public License version 2
dnl or later (GPLv2+) WITHOUT ANY WARRANTY.
-dnl ===============================================
-dnl Bootstrap
-dnl ===============================================
+
+dnl ==============================================
+dnl Bootstrap autotools
+dnl ==============================================
+
+# Require a minimum version of autoconf itself
AC_PREREQ(2.64)
dnl AC_CONFIG_MACRO_DIR is deprecated as of autoconf 2.70 (2020-12-08).
@@ -20,41 +23,26 @@ m4_ifdef([AC_CONFIG_MACRO_DIRS],
[AC_CONFIG_MACRO_DIRS([m4])],
[AC_CONFIG_MACRO_DIR([m4])])
-AC_DEFUN([AC_DATAROOTDIR_CHECKED])
-
-dnl Suggested structure:
-dnl information on the package
-dnl checks for programs
-dnl checks for libraries
-dnl checks for header files
-dnl checks for types
-dnl checks for structures
-dnl checks for compiler characteristics
-dnl checks for library functions
-dnl checks for system services
-
m4_include([m4/version.m4])
AC_INIT([pacemaker], VERSION_NUMBER, [users@clusterlabs.org], [pacemaker],
PCMK_URL)
-PCMK_FEATURES=""
-
LT_CONFIG_LTDL_DIR([libltdl])
AC_CONFIG_AUX_DIR([libltdl/config])
-AC_CANONICAL_HOST
dnl Where #defines that autoconf makes (e.g. HAVE_whatever) go
dnl
-dnl Internal header: include/config.h
-dnl - Contains ALL defines
+dnl include/config.h
+dnl - Internal API
+dnl - Contains all defines
dnl - include/config.h.in is generated automatically by autoheader
-dnl - NOT to be included in any header files except crm_internal.h
+dnl - Not to be included in any header files except crm_internal.h
dnl (which is also not to be included in any other header files)
dnl
-dnl External header: include/crm_config.h
-dnl - Contains a subset of defines checked here
-dnl - Manually edit include/crm_config.h.in to have configure include
-dnl new defines
+dnl include/crm_config.h
+dnl - External API
+dnl - Contains a subset of defines
+dnl - include/crm_config.h.in is manually edited to select the subset
dnl - Should not include HAVE_* defines
dnl - Safe to include anywhere
AC_CONFIG_HEADERS([include/config.h include/crm_config.h])
@@ -72,27 +60,57 @@ AS_IF([test x"${PKG_CONFIG}" != x""], [],
PKG_INSTALLDIR
PKG_NOARCH_INSTALLDIR
-dnl Example 2.4. Silent Custom Rule to Generate a File
-dnl %-bar.pc: %.pc
-dnl $(AM_V_GEN)$(LN_S) $(notdir $^) $@
-
-CC_IN_CONFIGURE=yes
-export CC_IN_CONFIGURE
-LDD=ldd
-
-dnl ========================================================================
-dnl Compiler characteristics
-dnl ========================================================================
+dnl ==============================================
+dnl Compiler checks and helpers
+dnl ==============================================
dnl A particular compiler can be forced by setting the CC environment variable
AC_PROG_CC
+dnl C++ is needed only to run maintainer utilities, not to build
+AC_PROG_CXX
+
dnl Use at least C99 if possible (automatic for autoconf >= 2.70)
m4_version_prereq([2.70], [:], [AC_PROG_CC_STDC])
-dnl C++ is not needed for build, just maintainer utilities
-AC_PROG_CXX
+# cc_supports_flag <compiler-flag>
+# Return success if the C compiler supports the given flag
+cc_supports_flag() {
+ local CFLAGS="-Werror $@"
+ AC_MSG_CHECKING([whether $CC supports $@])
+ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ ]], [[ ]])],
+ [RC=0; AC_MSG_RESULT([yes])],
+ [RC=1; AC_MSG_RESULT([no])])
+ return $RC
+}
+
+# cc_temp_flags <compiler-flags>
+# Use the given flags for subsequent C compilation. These can be reverted to
+# what was used previously with cc_restore_flags. This allows certain tests to
+# use specific flags without affecting anything else.
+cc_temp_flags() {
+ ac_save_CFLAGS="$CFLAGS"
+ CFLAGS="$*"
+}
+
+# cc_restore_flags
+# Restore C compiler flags to what they were before the last cc_temp_flags
+# call.
+cc_restore_flags() {
+ CFLAGS=$ac_save_CFLAGS
+}
+
+# Check for fatal warning support
+AS_IF([test $enable_fatal_warnings -ne $DISABLED dnl
+ && test x"$GCC" = x"yes" && cc_supports_flag -Werror],
+ [WERROR="-Werror"],
+ [
+ WERROR=""
+ AS_CASE([$enable_fatal_warnings],
+ [$REQUIRED], [AC_MSG_ERROR([Compiler does not support fatal warnings])],
+ [$OPTIONAL], [enable_fatal_warnings=$DISABLED])
+ ])
dnl We use md5.c from gnulib, which has its own m4 macros. Per its docs:
dnl "The macro gl_EARLY must be called as soon as possible after verifying that
@@ -103,8 +121,15 @@ gl_EARLY
gl_SET_CRYPTO_CHECK_DEFAULT([no])
gl_INIT
-# --enable-new-dtags: Use RUNPATH instead of RPATH.
-# It is necessary to have this done before libtool does linker detection.
+AC_CHECK_SIZEOF(long)
+
+
+dnl ==============================================
+dnl Linker checks
+dnl ==============================================
+
+# Check whether linker supports --enable-new-dtags to use RUNPATH instead of
+# RPATH. It is necessary to do this before libtool does linker detection.
# See also: https://github.com/kronosnet/kronosnet/issues/107
AX_CHECK_LINK_FLAG([-Wl,--enable-new-dtags],
[AM_LDFLAGS=-Wl,--enable-new-dtags],
@@ -117,65 +142,14 @@ LT_INIT([dlopen])
LDFLAGS="$saved_LDFLAGS"
LTDL_INIT([convenience])
-AC_TYPE_SIZE_T
-AC_CHECK_SIZEOF(char)
-AC_CHECK_SIZEOF(short)
-AC_CHECK_SIZEOF(int)
-AC_CHECK_SIZEOF(long)
-AC_CHECK_SIZEOF(long long)
-dnl ===============================================
-dnl Helpers
-dnl ===============================================
-cc_supports_flag() {
- local CFLAGS="-Werror $@"
- AC_MSG_CHECKING([whether $CC supports $@])
- AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ ]], [[ ]])],
- [RC=0; AC_MSG_RESULT([yes])],
- [RC=1; AC_MSG_RESULT([no])])
- return $RC
-}
-
-# Some tests need to use their own CFLAGS
-
-cc_temp_flags() {
- ac_save_CFLAGS="$CFLAGS"
- CFLAGS="$*"
-}
-
-cc_restore_flags() {
- CFLAGS=$ac_save_CFLAGS
-}
-
-# expand_path_option $path_variable_name $default
-expand_path_option() {
- # The first argument is the variable *name* (not value)
- ac_path_varname="$1"
-
- # Get the original value of the variable
- ac_path_value=$(eval echo "\${${ac_path_varname}}")
-
- # Expand any literal variable expressions in the value so that we don't
- # end up with something like '${prefix}' in #defines etc.
- #
- # Autoconf deliberately leaves values unexpanded to allow overriding
- # the configure script choices in make commands (for example,
- # "make exec_prefix=/foo install"). No longer being able to do this seems
- # like no great loss.
- eval ac_path_value=$(eval echo "${ac_path_value}")
+dnl ==============================================
+dnl Define configure options
+dnl ==============================================
- # Use (expanded) default if necessary
- AS_IF([test x"${ac_path_value}" = x""],
- [eval ac_path_value=$(eval echo "$2")])
-
- # Require a full path
- AS_CASE(["$ac_path_value"],
- [/*], [eval ${ac_path_varname}="$ac_path_value"],
- [*], [AC_MSG_ERROR([$ac_path_varname value "$ac_path_value" is not a full path])]
- )
-}
-
-# yes_no_try $user_response $default
+# yes_no_try <user-response> <default>
+# Map a yes/no/try user selection to $REQUIRED for yes, $DISABLED for no, and
+# $OPTIONAL for try.
DISABLED=0
REQUIRED=1
OPTIONAL=2
@@ -190,17 +164,9 @@ yes_no_try() {
AC_MSG_ERROR([Invalid option value "$value"])
}
-check_systemdsystemunitdir() {
- AC_MSG_CHECKING([which system unit file directory to use])
- PKG_CHECK_VAR([systemdsystemunitdir], [systemd], [systemdsystemunitdir])
- AC_MSG_RESULT([${systemdsystemunitdir}])
- test x"$systemdsystemunitdir" != x""
- return $?
-}
-
#
-# Fix the defaults of certain built-in variables so they can be used in our
-# custom argument defaults
+# Fix the defaults of certain built-in variables so they can be used in the
+# defaults for our custom arguments
#
AC_MSG_NOTICE([Sanitizing prefix: ${prefix}])
@@ -234,12 +200,12 @@ AS_CASE([$libdir],
AC_MSG_RESULT([$libdir])
])
-dnl ===============================================
-dnl Configure Options
-dnl ===============================================
+# Start a list of optional features this build supports
+PCMK_FEATURES=""
-dnl Actual library checks come later, but pkg-config can be used here to grab
-dnl external values to use as defaults for configure options
+dnl This section should include only the definition of configure script
+dnl options and determining their values. Processing should be done later when
+dnl possible, other than what's needed to determine values and defaults.
dnl Per the autoconf docs, --enable-*/--disable-* options should control
dnl features inherent to Pacemaker, while --with-*/--without-* options should
@@ -299,13 +265,6 @@ AC_ARG_ENABLE([compat-2.0],
)
yes_no_try "$enable_compat_2_0" "no"
enable_compat_2_0=$?
-AS_IF([test $enable_compat_2_0 -ne $DISABLED],
- [
- AC_DEFINE_UNQUOTED([PCMK__COMPAT_2_0], [1],
- [Keep certain output compatible with 2.0 release series])
- PCMK_FEATURES="$PCMK_FEATURES compat-2.0"
- ]
-)
# Add an option to create symlinks at the pre-2.0.0 daemon name locations, so
# that users and tools can continue to invoke those names directly (e.g., for
@@ -316,7 +275,6 @@ AC_ARG_ENABLE([legacy-links],
)
yes_no_try "$enable_legacy_links" "no"
enable_legacy_links=$?
-AM_CONDITIONAL([BUILD_LEGACY_LINKS], [test $enable_legacy_links -ne $DISABLED])
# AM_GNU_GETTEXT calls AM_NLS which defines the nls option, but it defaults
# to enabled. We override the definition of AM_NLS to flip the default and mark
@@ -330,12 +288,9 @@ AC_DEFUN([AM_NLS],
AC_MSG_RESULT([$USE_NLS])
AC_SUBST([USE_NLS])]
)
-
AM_GNU_GETTEXT([external])
AM_GNU_GETTEXT_VERSION([0.18])
-AS_IF([test x"$enable_nls" = x"yes"], [PCMK_FEATURES="$PCMK_FEATURES nls"])
-
dnl --with-* options: external software support, and custom locations
dnl This argument is defined via an M4 macro so default can be a variable
@@ -348,31 +303,14 @@ AC_DEFUN([VERSION_ARG],
)
VERSION_ARG(VERSION_NUMBER)
-# Redefine PACKAGE_VERSION and VERSION according to PACEMAKER_VERSION in case
-# the user used --with-version. Unfortunately, this can only affect the
-# substitution variables and later uses in this file, not the config.h
-# constants, so we have to be careful to use only PACEMAKER_VERSION in C code.
-PACKAGE_VERSION=$PACEMAKER_VERSION
-VERSION=$PACEMAKER_VERSION
-
-# Detect highest API schema version (use git if available to list managed RNGs,
-# in case there are leftover schema files from an earlier build of a different
-# version, otherwise check all RNGs)
-API_VERSION=$({ git ls-files xml/api/*.rng 2>/dev/null || ls -1 xml/api/*.rng ; } dnl
- | sed -n -e 's/^.*-\([[0-9]][[0-9.]]*\).rng$/\1/p' | sort -V | tail -1)
-AC_DEFINE_UNQUOTED([PCMK__API_VERSION], ["$API_VERSION"],
- [Highest API schema version])
-
-# Re-run configure at next make if any RNG changes, to re-detect highest
-AC_SUBST([CONFIG_STATUS_DEPENDENCIES],
- [$(echo '$(wildcard $(top_srcdir)/xml/api/*.rng)')])
-
CRM_DAEMON_USER=""
AC_ARG_WITH([daemon-user],
[AS_HELP_STRING([--with-daemon-user=USER],
[user to run unprivileged Pacemaker daemons as (advanced option: changing this may break other cluster components unless similarly configured) @<:@hacluster@:>@])],
[ CRM_DAEMON_USER="$withval" ]
)
+AS_IF([test x"${CRM_DAEMON_USER}" = x""],
+ [CRM_DAEMON_USER="hacluster"])
CRM_DAEMON_GROUP=""
AC_ARG_WITH([daemon-group],
@@ -380,6 +318,8 @@ AC_ARG_WITH([daemon-group],
[group to run unprivileged Pacemaker daemons as (advanced option: changing this may break other cluster components unless similarly configured) @<:@haclient@:>@])],
[ CRM_DAEMON_GROUP="$withval" ]
)
+AS_IF([test x"${CRM_DAEMON_GROUP}" = x""],
+ [CRM_DAEMON_GROUP="haclient"])
BUG_URL=""
AC_ARG_WITH([bug-url],
@@ -388,6 +328,8 @@ AC_ARG_WITH([bug-url],
@<:@https://bugs.clusterlabs.org/enter_bug.cgi?product=Pacemaker@:>@]))],
[ BUG_URL="$withval" ]
)
+AS_IF([test x"${BUG_URL}" = x""],
+ [BUG_URL="https://bugs.clusterlabs.org/enter_bug.cgi?product=Pacemaker"])
dnl --with-* options: features
@@ -422,9 +364,6 @@ AS_CASE([$with_concurrent_fencing_default],
[true], [PCMK_FEATURES="$PCMK_FEATURES default-concurrent-fencing"],
[AC_MSG_ERROR([Invalid value "$with_concurrent_fencing_default" for --with-concurrent-fencing-default])]
)
-AC_DEFINE_UNQUOTED([PCMK__CONCURRENT_FENCING_DEFAULT],
- ["$with_concurrent_fencing_default"],
- [Default value for concurrent-fencing cluster option])
AC_ARG_WITH([sbd-sync-default],
[AS_HELP_STRING([--with-sbd-sync-default], m4_normalize([
@@ -437,9 +376,6 @@ AS_CASE([$with_sbd_sync_default],
[true], [PCMK_FEATURES="$PCMK_FEATURES default-sbd-sync"],
[AC_MSG_ERROR([Invalid value "$with_sbd_sync_default" for --with-sbd-sync-default])]
)
-AC_DEFINE_UNQUOTED([PCMK__SBD_SYNC_DEFAULT],
- [$with_sbd_sync_default],
- [Default value for SBD_SYNC_RESOURCE_STARTUP environment variable])
AC_ARG_WITH([resource-stickiness-default],
[AS_HELP_STRING([--with-resource-stickiness-default],
@@ -451,9 +387,6 @@ AS_CASE([$with_resource_stickiness_default],
[*[[!0-9]]*], [AC_MSG_ERROR([$errmsg])],
[PCMK_FEATURES="$PCMK_FEATURES default-resource-stickiness"]
)
-AC_DEFINE_UNQUOTED([PCMK__RESOURCE_STICKINESS_DEFAULT],
- [$with_resource_stickiness_default],
- [Default value for resource-stickiness resource meta-attribute])
AC_ARG_WITH([corosync],
[AS_HELP_STRING([--with-corosync],
@@ -462,7 +395,7 @@ AC_ARG_WITH([corosync],
yes_no_try "$with_corosync" "try"
with_corosync=$?
-dnl Get default from corosync if possible.
+dnl Get default from Corosync if possible
PKG_CHECK_VAR([PCMK__COROSYNC_CONF], [corosync], [corosysconfdir],
[PCMK__COROSYNC_CONF="$PCMK__COROSYNC_CONF/corosync.conf"],
[PCMK__COROSYNC_CONF="${sysconfdir}/corosync/corosync.conf"])
@@ -553,10 +486,8 @@ AC_ARG_WITH([ocfdir],
/usr/lib/ocf@:>@]))],
[ OCF_ROOT_DIR="$withval" ]
)
-AC_SUBST(OCF_ROOT_DIR)
-AC_DEFINE_UNQUOTED([OCF_ROOT_DIR], ["$OCF_ROOT_DIR"],
- [OCF root directory for resource agents and libraries])
+dnl Get default from resource-agents if possible
PKG_CHECK_VAR([OCF_RA_PATH], [resource-agents], [ocfrapath], [],
[OCF_RA_PATH="$OCF_ROOT_DIR/resource.d"])
AC_ARG_WITH([ocfrapath],
@@ -566,7 +497,6 @@ AC_ARG_WITH([ocfrapath],
OCFDIR/resource.d@:>@]))],
[ OCF_RA_PATH="$withval" ]
)
-AC_SUBST(OCF_RA_PATH)
OCF_RA_INSTALL_DIR="$OCF_ROOT_DIR/resource.d"
AC_ARG_WITH([ocfrainstalldir],
@@ -575,7 +505,6 @@ AC_ARG_WITH([ocfrainstalldir],
@<:@OCFDIR/resource.d@:>@]))],
[ OCF_RA_INSTALL_DIR="$withval" ]
)
-AC_SUBST(OCF_RA_INSTALL_DIR)
dnl Get default from fence-agents if available
PKG_CHECK_VAR([FA_PREFIX], [fence-agents], [prefix],
@@ -587,7 +516,6 @@ AC_ARG_WITH([fence-bindir],
package if available otherwise SBINDIR@:>@]))],
[ PCMK__FENCE_BINDIR="$withval" ]
)
-AC_SUBST(PCMK__FENCE_BINDIR)
dnl --with-* options: non-production testing
@@ -620,31 +548,198 @@ AC_ARG_VAR([CFLAGS_HARDENED_EXE], [extra C compiler flags for hardened executabl
AC_ARG_VAR([LDFLAGS_HARDENED_EXE], [extra linker flags for hardened executables])
-dnl ===============================================
-dnl General Processing
-dnl ===============================================
+dnl ==============================================
+dnl Locate essential tools
+dnl ==============================================
-AC_DEFINE_UNQUOTED(PACEMAKER_VERSION, "$VERSION",
- [Version number of this Pacemaker build])
+PATH="$PATH:/sbin:/usr/sbin:/usr/local/sbin:/usr/local/bin"
+export PATH
-PACKAGE_SERIES=`echo $VERSION | awk -F. '{ print $1"."$2 }'`
-AC_SUBST(PACKAGE_SERIES)
+dnl Pacemaker's executable python scripts will invoke the python specified by
+dnl configure's PYTHON variable. If not specified, AM_PATH_PYTHON will check a
+dnl built-in list with (unversioned) "python" having precedence. To configure
+dnl Pacemaker to use a specific python interpreter version, define PYTHON
+dnl when calling configure, for example: ./configure PYTHON=/usr/bin/python3.6
+
+dnl If PYTHON was specified, ensure it is an absolute path
+AS_IF([test x"${PYTHON}" != x""], [AC_PATH_PROG([PYTHON], [$PYTHON])])
+
+dnl Require a minimum Python version
+AM_PATH_PYTHON([3.4])
AC_PROG_LN_S
AC_PROG_MKDIR_P
-# Check for fatal warning support
-AS_IF([test $enable_fatal_warnings -ne $DISABLED && test x"$GCC" = x"yes" && cc_supports_flag -Werror],
- [WERROR="-Werror"],
+AC_PATH_PROG([GIT], [git], [false])
+
+dnl Bash is needed for building man pages and running regression tests.
+dnl We set "BASH_PATH" because "BASH" is already an environment variable.
+REQUIRE_PROG([BASH_PATH], [bash])
+
+AC_PATH_PROGS(VALGRIND_BIN, valgrind, /usr/bin/valgrind)
+AC_DEFINE_UNQUOTED(VALGRIND_BIN, "$VALGRIND_BIN", Valgrind command)
+
+
+dnl ==============================================
+dnl Package and schema versioning
+dnl ==============================================
+
+# Redefine PACKAGE_VERSION and VERSION according to PACEMAKER_VERSION in case
+# the user used --with-version. Unfortunately, this can only affect the
+# substitution variables and later uses in this file, not the config.h
+# constants, so we have to be careful to use only PACEMAKER_VERSION in C code.
+PACKAGE_VERSION=$PACEMAKER_VERSION
+VERSION=$PACEMAKER_VERSION
+
+AC_DEFINE_UNQUOTED(PACEMAKER_VERSION, "$VERSION",
+ [Version number of this Pacemaker build])
+
+AC_MSG_CHECKING([build version])
+AS_IF([test "$GIT" != "false" && test -d .git],
[
- WERROR=""
- AS_CASE([$enable_fatal_warnings],
- [$REQUIRED], [AC_MSG_ERROR([Compiler does not support fatal warnings])],
- [$OPTIONAL], [
- AC_MSG_NOTICE([Compiler does not support fatal warnings])
- enable_fatal_warnings=$DISABLED
- ])
+ BUILD_VERSION=`"$GIT" log --pretty="format:%h" -n 1`
+ AC_MSG_RESULT([$BUILD_VERSION (git hash)])
+ ],
+ [
+ # The current directory name make a reasonable default
+ # Most generated archives will include the hash or tag
+ BASE=`basename $PWD`
+ BUILD_VERSION=`echo $BASE | sed s:.*[[Pp]]acemaker-::`
+ AC_MSG_RESULT([$BUILD_VERSION (directory name)])
])
+AC_DEFINE_UNQUOTED(BUILD_VERSION, "$BUILD_VERSION", Build version)
+AC_SUBST(BUILD_VERSION)
+
+# schema_files <schema-dir>
+# List all manually edited RNG schemas (as opposed to auto-generated via make)
+# in the given directory. Use git if available to list managed RNGs, in case
+# there are leftover schema files from an earlier build of a different
+# version. Otherwise, check all RNGs.
+schema_files() {
+ local files="$("$GIT" ls-files "$1"/*.rng 2>/dev/null)"
+
+ AS_IF([test x"$files" = x""],
+ [
+ files="$(ls -1 "$1"/*.rng | grep -E -v \
+ '/(pacemaker|api-result|crm_mon|versions)[^/]*\.rng')"
+ ])
+ echo "$files"
+}
+
+# latest_schema_version <schema-dir>
+# Determine highest RNG version in the given schema directory.
+latest_schema_version() {
+ schema_files "$1" | sed -n -e 's/^.*-\([[0-9]][[0-9.]]*\).rng$/\1/p' dnl
+ | sort -V | tail -1
+}
+
+# schemas_for_make <schema-dir>
+# Like schema_files, but suitable for use in make variables.
+schemas_for_make() {
+ local file
+
+ for file in $(schema_files "$1"); do
+ AS_ECHO_N(["\$(top_srcdir)/$file "])
+ done
+}
+
+# Detect highest API schema version
+API_VERSION=$(latest_schema_version "xml/api")
+AC_DEFINE_UNQUOTED([PCMK__API_VERSION], ["$API_VERSION"],
+ [Highest API schema version])
+
+# Detect highest CIB schema version
+CIB_VERSION=$(latest_schema_version "xml")
+AC_SUBST(CIB_VERSION)
+
+# Re-run configure at next make if schema files change, to re-detect versions
+cib_schemas="$(schemas_for_make "xml")"
+api_schemas="$(schemas_for_make "xml/api")"
+CONFIG_STATUS_DEPENDENCIES="$cib_schemas $api_schemas"
+AC_SUBST(CONFIG_STATUS_DEPENDENCIES)
+
+
+dnl ==============================================
+dnl Process simple options
+dnl ==============================================
+
+AS_IF([test $enable_compat_2_0 -ne $DISABLED],
+ [
+ AC_DEFINE_UNQUOTED([PCMK__COMPAT_2_0], [1],
+ [Keep certain output compatible with 2.0 release series])
+ PCMK_FEATURES="$PCMK_FEATURES compat-2.0"
+ ]
+)
+
+AM_CONDITIONAL([BUILD_LEGACY_LINKS], [test $enable_legacy_links -ne $DISABLED])
+
+AS_IF([test x"$enable_nls" = x"yes"], [PCMK_FEATURES="$PCMK_FEATURES nls"])
+
+AC_DEFINE_UNQUOTED([PCMK__CONCURRENT_FENCING_DEFAULT],
+ ["$with_concurrent_fencing_default"],
+ [Default value for concurrent-fencing cluster option])
+
+AC_DEFINE_UNQUOTED([PCMK__SBD_SYNC_DEFAULT],
+ [$with_sbd_sync_default],
+ [Default value for SBD_SYNC_RESOURCE_STARTUP environment variable])
+
+AC_DEFINE_UNQUOTED([PCMK__RESOURCE_STICKINESS_DEFAULT],
+ [$with_resource_stickiness_default],
+ [Default value for resource-stickiness resource meta-attribute])
+
+AS_IF([test x"${PCMK_GNUTLS_PRIORITIES}" != x""], [],
+ [AC_MSG_ERROR([--with-gnutls-priorities value must not be empty])])
+AC_DEFINE_UNQUOTED([PCMK_GNUTLS_PRIORITIES], ["$PCMK_GNUTLS_PRIORITIES"],
+ [GnuTLS cipher priorities])
+AC_SUBST(PCMK_GNUTLS_PRIORITIES)
+
+AC_SUBST(BUG_URL)
+AC_DEFINE_UNQUOTED([PCMK__BUG_URL], ["$BUG_URL"],
+ [Where bugs should be reported])
+
+AC_DEFINE_UNQUOTED([CRM_DAEMON_USER], ["$CRM_DAEMON_USER"],
+ [User to run Pacemaker daemons as])
+AC_SUBST(CRM_DAEMON_USER)
+
+AC_DEFINE_UNQUOTED([CRM_DAEMON_GROUP], ["$CRM_DAEMON_GROUP"],
+ [Group to run Pacemaker daemons as])
+AC_SUBST(CRM_DAEMON_GROUP)
+
+
+dnl ==============================================
+dnl Process file paths
+dnl ==============================================
+
+# expand_path_option <path-variable-name> [<default>]
+# Given the name of a file path variable, expand any variable references
+# inside it, use the specified default if it is not specified, and ensure it
+# is a full path.
+expand_path_option() {
+ # The first argument is the variable *name* (not value)
+ ac_path_varname="$1"
+
+ # Get the original value of the variable
+ ac_path_value=$(eval echo "\${${ac_path_varname}}")
+
+ # Expand any literal variable expressions in the value so that we don't
+ # end up with something like '${prefix}' in #defines etc.
+ #
+ # Autoconf deliberately leaves values unexpanded to allow overriding
+ # the configure script choices in make commands (for example,
+ # "make exec_prefix=/foo install"). No longer being able to do this seems
+ # like no great loss.
+ eval ac_path_value=$(eval echo "${ac_path_value}")
+
+ # Use (expanded) default if necessary
+ AS_IF([test x"${ac_path_value}" = x""],
+ [eval ac_path_value=$(eval echo "$2")])
+
+ # Require a full path
+ AS_CASE(["$ac_path_value"],
+ [/*], [eval ${ac_path_varname}="$ac_path_value"],
+ [*], [AC_MSG_ERROR([$ac_path_varname value "$ac_path_value" is not a full path])]
+ )
+}
AC_MSG_NOTICE([Sanitizing INITDIR: ${INITDIR}])
AS_CASE([$INITDIR],
@@ -670,6 +765,7 @@ expand_path_option exec_prefix
expand_path_option bindir
expand_path_option sbindir
expand_path_option libexecdir
+expand_path_option datarootdir
expand_path_option datadir
expand_path_option sysconfdir
expand_path_option sharedstatedir
@@ -680,10 +776,13 @@ expand_path_option oldincludedir
expand_path_option infodir
expand_path_option mandir
-dnl Home-grown variables
+AC_DEFUN([AC_DATAROOTDIR_CHECKED])
+
+dnl Expand values of custom directory options
expand_path_option localedir "${datadir}/locale"
-AC_DEFINE_UNQUOTED([PCMK__LOCALE_DIR],["$localedir"], [Base directory for message catalogs])
+AC_DEFINE_UNQUOTED([PCMK__LOCALE_DIR],["$localedir"],
+ [Base directory for message catalogs])
AS_IF([test x"${runstatedir}" = x""], [runstatedir="${pcmk_runstatedir}"])
expand_path_option runstatedir "${localstatedir}/run"
@@ -705,33 +804,88 @@ expand_path_option PCMK__COROSYNC_CONF "${sysconfdir}/corosync/corosync.conf"
AC_SUBST(PCMK__COROSYNC_CONF)
expand_path_option CRM_LOG_DIR "${localstatedir}/log/pacemaker"
-AC_DEFINE_UNQUOTED(CRM_LOG_DIR,"$CRM_LOG_DIR", Location for Pacemaker log file)
+AC_DEFINE_UNQUOTED([CRM_LOG_DIR], ["$CRM_LOG_DIR"],
+ [Location for Pacemaker log file])
AC_SUBST(CRM_LOG_DIR)
expand_path_option CRM_BUNDLE_DIR "${localstatedir}/log/pacemaker/bundles"
-AC_DEFINE_UNQUOTED(CRM_BUNDLE_DIR,"$CRM_BUNDLE_DIR", Location for Pacemaker bundle logs)
+AC_DEFINE_UNQUOTED([CRM_BUNDLE_DIR], ["$CRM_BUNDLE_DIR"],
+ [Location for Pacemaker bundle logs])
AC_SUBST(CRM_BUNDLE_DIR)
expand_path_option PCMK__FENCE_BINDIR
-AC_DEFINE_UNQUOTED(PCMK__FENCE_BINDIR,"$PCMK__FENCE_BINDIR",
+AC_SUBST(PCMK__FENCE_BINDIR)
+AC_DEFINE_UNQUOTED([PCMK__FENCE_BINDIR], ["$PCMK__FENCE_BINDIR"],
[Location for executable fence agents])
+expand_path_option OCF_ROOT_DIR
+AC_SUBST(OCF_ROOT_DIR)
+AC_DEFINE_UNQUOTED([OCF_ROOT_DIR], ["$OCF_ROOT_DIR"],
+ [OCF root directory for resource agents and libraries])
+
expand_path_option OCF_RA_PATH
+AC_SUBST(OCF_RA_PATH)
AC_DEFINE_UNQUOTED([OCF_RA_PATH], ["$OCF_RA_PATH"],
[OCF directories to search for resource agents ])
-AS_IF([test x"${PCMK_GNUTLS_PRIORITIES}" != x""], [],
- [AC_MSG_ERROR([--with-gnutls-priorities value must not be empty])])
-AC_DEFINE_UNQUOTED([PCMK_GNUTLS_PRIORITIES], ["$PCMK_GNUTLS_PRIORITIES"],
- [GnuTLS cipher priorities])
-AC_SUBST(PCMK_GNUTLS_PRIORITIES)
+expand_path_option OCF_RA_INSTALL_DIR
+AC_SUBST(OCF_RA_INSTALL_DIR)
-AS_IF([test x"${BUG_URL}" = x""],
- [BUG_URL="https://bugs.clusterlabs.org/enter_bug.cgi?product=Pacemaker"])
-AC_SUBST(BUG_URL)
-AC_DEFINE_UNQUOTED([PCMK__BUG_URL], ["$BUG_URL"],
- [Where bugs should be reported])
+# Derived paths
+
+CRM_SCHEMA_DIRECTORY="${datadir}/pacemaker"
+AC_DEFINE_UNQUOTED([CRM_SCHEMA_DIRECTORY], ["$CRM_SCHEMA_DIRECTORY"],
+ [Location for the Pacemaker Relax-NG Schema])
+AC_SUBST(CRM_SCHEMA_DIRECTORY)
+CRM_CORE_DIR="${localstatedir}/lib/pacemaker/cores"
+AC_DEFINE_UNQUOTED([CRM_CORE_DIR], ["$CRM_CORE_DIR"],
+ [Directory Pacemaker daemons should change to (without systemd, core files will go here)])
+AC_SUBST(CRM_CORE_DIR)
+
+CRM_PACEMAKER_DIR="${localstatedir}/lib/pacemaker"
+AC_DEFINE_UNQUOTED([CRM_PACEMAKER_DIR], ["$CRM_PACEMAKER_DIR"],
+ [Location to store directory produced by Pacemaker daemons])
+AC_SUBST(CRM_PACEMAKER_DIR)
+
+CRM_BLACKBOX_DIR="${localstatedir}/lib/pacemaker/blackbox"
+AC_DEFINE_UNQUOTED([CRM_BLACKBOX_DIR], ["$CRM_BLACKBOX_DIR"],
+ [Where to keep blackbox dumps])
+AC_SUBST(CRM_BLACKBOX_DIR)
+
+PE_STATE_DIR="${localstatedir}/lib/pacemaker/pengine"
+AC_DEFINE_UNQUOTED([PE_STATE_DIR], ["$PE_STATE_DIR"],
+ [Where to keep scheduler outputs])
+AC_SUBST(PE_STATE_DIR)
+
+CRM_CONFIG_DIR="${localstatedir}/lib/pacemaker/cib"
+AC_DEFINE_UNQUOTED([CRM_CONFIG_DIR], ["$CRM_CONFIG_DIR"],
+ [Where to keep configuration files])
+AC_SUBST(CRM_CONFIG_DIR)
+
+CRM_DAEMON_DIR="${libexecdir}/pacemaker"
+AC_DEFINE_UNQUOTED([CRM_DAEMON_DIR], ["$CRM_DAEMON_DIR"],
+ [Location for Pacemaker daemons])
+AC_SUBST(CRM_DAEMON_DIR)
+
+CRM_STATE_DIR="${runstatedir}/crm"
+AC_DEFINE_UNQUOTED([CRM_STATE_DIR], ["$CRM_STATE_DIR"],
+ [Where to keep state files and sockets])
+AC_SUBST(CRM_STATE_DIR)
+
+CRM_RSCTMP_DIR="${runstatedir}/resource-agents"
+AC_DEFINE_UNQUOTED([CRM_RSCTMP_DIR], ["$CRM_RSCTMP_DIR"],
+ [Where resource agents should keep state files])
+AC_SUBST(CRM_RSCTMP_DIR)
+
+PACEMAKER_CONFIG_DIR="${sysconfdir}/pacemaker"
+AC_DEFINE_UNQUOTED([PACEMAKER_CONFIG_DIR], ["$PACEMAKER_CONFIG_DIR"],
+ [Where to keep configuration files like authkey])
+AC_SUBST(PACEMAKER_CONFIG_DIR)
+
+AC_DEFINE_UNQUOTED([SBIN_DIR], ["$sbindir"], [Location for system binaries])
+
+# Warn about any directories that don't exist (which may be OK)
for j in prefix exec_prefix bindir sbindir libexecdir datadir sysconfdir \
sharedstatedir localstatedir libdir includedir oldincludedir infodir \
mandir INITDIR docdir CONFIGDIR localedir
@@ -741,6 +895,10 @@ do
[AC_MSG_WARN([$j directory ($dirname) does not exist (yet)])])
done
+dnl ===============================================
+dnl General Processing
+dnl ===============================================
+
us_auth=
AC_CHECK_HEADER([sys/socket.h], [
AC_CHECK_DECL([SO_PEERCRED], [
@@ -786,6 +944,10 @@ AS_IF([test -z "${us_auth}"], [
dnl OS-based decision-making is poor autotools practice; feature-based
dnl mechanisms are strongly preferred. Keep this section to a bare minimum;
dnl regard as a "necessary evil".
+
+dnl Set host_os and host_cpu
+AC_CANONICAL_HOST
+
INIT_EXT=""
PROCFS=0
dnl Solaris and some *BSD versions support procfs but not files we need
@@ -810,24 +972,10 @@ AS_CASE(["$host_cpu"],
])
])
-dnl ===============================================
-dnl Program Paths
-dnl ===============================================
-
-PATH="$PATH:/sbin:/usr/sbin:/usr/local/sbin:/usr/local/bin"
-export PATH
-
-dnl Pacemaker's executable python scripts will invoke the python specified by
-dnl configure's PYTHON variable. If not specified, AM_PATH_PYTHON will check a
-dnl built-in list with (unversioned) "python" having precedence. To configure
-dnl Pacemaker to use a specific python interpreter version, define PYTHON
-dnl when calling configure, for example: ./configure PYTHON=/usr/bin/python3.6
-
-dnl Ensure PYTHON is an absolute path
-AS_IF([test x"${PYTHON}" != x""], [AC_PATH_PROG([PYTHON], [$PYTHON])])
-dnl Require a minimum Python version
-AM_PATH_PYTHON([3.4])
+dnl ==============================================
+dnl Documentation build dependencies and checks
+dnl ==============================================
AC_PATH_PROGS([ASCIIDOC_CONV], [asciidoc asciidoctor])
AC_PATH_PROG([HELP2MAN], [help2man])
@@ -836,15 +984,6 @@ AC_PATH_PROG([INKSCAPE], [inkscape])
AC_PATH_PROG([XSLTPROC], [xsltproc])
AC_PATH_PROG([XMLCATALOG], [xmlcatalog])
-dnl Bash is needed for building man pages and running regression tests.
-dnl BASH is already an environment variable, so use something else.
-AC_PATH_PROG([BASH_PATH], [bash])
-AS_IF([test x"${BASH_PATH}" != x""], [],
- [AC_MSG_FAILURE([Could not find required build tool bash])])
-
-AC_PATH_PROGS(VALGRIND_BIN, valgrind, /usr/bin/valgrind)
-AC_DEFINE_UNQUOTED(VALGRIND_BIN, "$VALGRIND_BIN", Valgrind command)
-
AM_CONDITIONAL(BUILD_HELP, test x"${HELP2MAN}" != x"")
AS_IF([test x"${HELP2MAN}" != x""],
[PCMK_FEATURES="$PCMK_FEATURES generated-manpages"])
@@ -913,28 +1052,6 @@ AS_IF([test -n "$GETOPT_PATH"], [AC_MSG_RESULT([$GETOPT_PATH])],
])
AC_SUBST([GETOPT_PATH])
-dnl ========================================================================
-dnl checks for library functions to replace them
-dnl
-dnl NoSuchFunctionName:
-dnl is a dummy function which no system supplies. It is here to make
-dnl the system compile semi-correctly on OpenBSD which doesn't know
-dnl how to create an empty archive
-dnl
-dnl scandir: Only on BSD.
-dnl System-V systems may have it, but hidden and/or deprecated.
-dnl A replacement function is supplied for it.
-dnl
-dnl strerror: returns a string that corresponds to an errno.
-dnl A replacement function is supplied for it.
-dnl
-dnl strnlen: is a gnu function similar to strlen, but safer.
-dnl We wrote a tolerably-fast replacement function for it.
-dnl
-dnl strndup: is a gnu function similar to strdup, but safer.
-dnl We wrote a tolerably-fast replacement function for it.
-
-AC_REPLACE_FUNCS(alphasort NoSuchFunctionName scandir strerror strchrnul strnlen strndup)
dnl ===============================================
dnl Libraries
@@ -973,12 +1090,24 @@ AS_IF([test x"$ac_cv_lib_c_dlopen" = x"yes"],
[LIBADD_DL=-ldl],
[LIBADD_DL=${lt_cv_dlopen_libs}])
-PKG_CHECK_MODULES(LIBXML2, [libxml-2.0],
+PKG_CHECK_MODULES(LIBXML2, [libxml-2.0 >= 2.6.0],
[CPPFLAGS="${CPPFLAGS} ${LIBXML2_CFLAGS}"
LIBS="${LIBS} ${LIBXML2_LIBS}"])
REQUIRE_LIB([xslt], [xsltApplyStylesheet])
+AC_MSG_CHECKING([whether __progname and __progname_full are available])
+AC_LINK_IFELSE([AC_LANG_PROGRAM([[extern char *__progname, *__progname_full;]],
+ [[__progname = "foo";
+ __progname_full = "foo bar";]])],
+ [
+ have_progname="yes"
+ AC_DEFINE(HAVE_PROGNAME, 1,
+ [Define to 1 if processes can change their name])
+ ],
+ [have_progname="no"])
+AC_MSG_RESULT([$have_progname])
+
dnl ========================================================================
dnl Headers
dnl ========================================================================
@@ -1000,18 +1129,30 @@ AC_CHECK_HEADERS([security/pam_appl.h pam/pam_appl.h])
REQUIRE_HEADER([arpa/inet.h])
REQUIRE_HEADER([ctype.h])
REQUIRE_HEADER([dirent.h])
+REQUIRE_HEADER([dlfcn.h])
REQUIRE_HEADER([errno.h])
+REQUIRE_HEADER([fcntl.h])
+REQUIRE_HEADER([float.h])
REQUIRE_HEADER([glib.h])
REQUIRE_HEADER([grp.h])
+REQUIRE_HEADER([inttypes.h])
+REQUIRE_HEADER([libgen.h])
REQUIRE_HEADER([limits.h])
+REQUIRE_HEADER([locale.h])
REQUIRE_HEADER([netdb.h])
REQUIRE_HEADER([netinet/in.h])
REQUIRE_HEADER([netinet/ip.h], [
#include <sys/types.h>
#include <netinet/in.h>
])
+REQUIRE_HEADER([netinet/tcp.h])
REQUIRE_HEADER([pwd.h])
+REQUIRE_HEADER([regex.h])
+REQUIRE_HEADER([sched.h])
REQUIRE_HEADER([signal.h])
+REQUIRE_HEADER([stdarg.h])
+REQUIRE_HEADER([stdbool.h])
+REQUIRE_HEADER([stdint.h])
REQUIRE_HEADER([stdio.h])
REQUIRE_HEADER([stdlib.h])
REQUIRE_HEADER([string.h])
@@ -1024,8 +1165,10 @@ REQUIRE_HEADER([sys/socket.h])
REQUIRE_HEADER([sys/stat.h])
REQUIRE_HEADER([sys/time.h])
REQUIRE_HEADER([sys/types.h])
+REQUIRE_HEADER([sys/uio.h])
REQUIRE_HEADER([sys/utsname.h])
REQUIRE_HEADER([sys/wait.h])
+REQUIRE_HEADER([termios.h])
REQUIRE_HEADER([time.h])
REQUIRE_HEADER([unistd.h])
REQUIRE_HEADER([libxml/xpath.h])
@@ -1033,21 +1176,6 @@ REQUIRE_HEADER([libxslt/xslt.h])
cc_restore_flags
-AC_CHECK_FUNCS([uuid_unparse], [],
- [AC_MSG_FAILURE([Could not find required C function uuid_unparse()])])
-
-AC_CACHE_CHECK([whether __progname and __progname_full are available],
- [pf_cv_var_progname],
- [AC_LINK_IFELSE(
- [AC_LANG_PROGRAM([[extern char *__progname, *__progname_full;]],
- [[__progname = "foo"; __progname_full = "foo bar";]])],
- [pf_cv_var_progname="yes"],
- [pf_cv_var_progname="no"]
- )]
- )
-AS_IF([test x"$pf_cv_var_progname" = x"yes"],
- [AC_DEFINE(HAVE_PROGNAME,1,[Define to 1 if processes can change their name])])
-
dnl ========================================================================
dnl Generic declarations
dnl ========================================================================
@@ -1101,25 +1229,42 @@ dnl ========================================================================
dnl Functions
dnl ========================================================================
+REQUIRE_FUNC([alphasort])
REQUIRE_FUNC([getopt])
+REQUIRE_FUNC([scandir])
REQUIRE_FUNC([setenv])
+REQUIRE_FUNC([strndup])
+REQUIRE_FUNC([strnlen])
REQUIRE_FUNC([unsetenv])
+REQUIRE_FUNC([uuid_unparse])
REQUIRE_FUNC([vasprintf])
-AC_CACHE_CHECK(whether sscanf supports %m,
- pf_cv_var_sscanf,
- AC_RUN_IFELSE([AC_LANG_SOURCE([[
-#include <stdio.h>
-const char *s = "some-command-line-arg";
-int main(int argc, char **argv) {
-char *name = NULL;
-int n = sscanf(s, "%ms", &name);
-return n == 1 ? 0 : 1;
-}
-]])],
- pf_cv_var_sscanf="yes", pf_cv_var_sscanf="no", pf_cv_var_sscanf="no"))
-
-AS_IF([test x"$pf_cv_var_sscanf" = x"yes"],
+AC_CHECK_FUNCS([strchrnul])
+
+AC_CHECK_FUNCS([fopen64])
+AM_CONDITIONAL([WRAPPABLE_FOPEN64], [test x"$ac_cv_func_fopen64" = x"yes"])
+
+AC_MSG_CHECKING([whether strerror always returns non-NULL])
+AC_RUN_IFELSE([AC_LANG_PROGRAM([[
+ #include <stdio.h>
+ #include <string.h>
+ ]], [[
+ return strerror(-1) == NULL;
+ ]])],
+ [AC_MSG_RESULT([yes])],
+ [AC_MSG_ERROR([strerror() is not C99-compliant])],
+ [AC_MSG_ERROR([strerror() is not C99-compliant])])
+
+AC_RUN_IFELSE([AC_LANG_PROGRAM([[#include <stdio.h>]], [[
+ const char *s = "some-command-line-arg";
+ char *name = NULL;
+ int n = sscanf(s, "%ms", &name);
+ return n != 1;
+ ]])],
+ [have_sscanf_m="yes"],
+ [have_sscanf_m="no"],
+ [have_sscanf_m="no"])
+AS_IF([test x"$have_sscanf_m" = x"yes"],
[AC_DEFINE([HAVE_SSCANF_M], [1],
[Define to 1 if sscanf %m modifier is available])])
@@ -1308,84 +1453,10 @@ AC_CHECK_HEADERS([stonith/stonith.h],
])
AM_CONDITIONAL([BUILD_LHA_SUPPORT], [test x"$ac_cv_header_stonith_stonith_h" = x"yes"])
+
dnl ===============================================
-dnl Variables needed for substitution
+dnl Detect DBus, systemd, and Upstart support
dnl ===============================================
-CRM_SCHEMA_DIRECTORY="${datadir}/pacemaker"
-AC_DEFINE_UNQUOTED(CRM_SCHEMA_DIRECTORY,"$CRM_SCHEMA_DIRECTORY", Location for the Pacemaker Relax-NG Schema)
-AC_SUBST(CRM_SCHEMA_DIRECTORY)
-
-CRM_CORE_DIR="${localstatedir}/lib/pacemaker/cores"
-AC_DEFINE_UNQUOTED([CRM_CORE_DIR], ["$CRM_CORE_DIR"],
- [Directory Pacemaker daemons should change to (without systemd, core files will go here)])
-AC_SUBST(CRM_CORE_DIR)
-
-AS_IF([test x"${CRM_DAEMON_USER}" = x""],
- [CRM_DAEMON_USER="hacluster"])
-AC_DEFINE_UNQUOTED(CRM_DAEMON_USER,"$CRM_DAEMON_USER", User to run Pacemaker daemons as)
-AC_SUBST(CRM_DAEMON_USER)
-
-AS_IF([test x"${CRM_DAEMON_GROUP}" = x""],
- [CRM_DAEMON_GROUP="haclient"])
-AC_DEFINE_UNQUOTED(CRM_DAEMON_GROUP,"$CRM_DAEMON_GROUP", Group to run Pacemaker daemons as)
-AC_SUBST(CRM_DAEMON_GROUP)
-
-CRM_PACEMAKER_DIR=${localstatedir}/lib/pacemaker
-AC_DEFINE_UNQUOTED(CRM_PACEMAKER_DIR,"$CRM_PACEMAKER_DIR", Location to store directory produced by Pacemaker daemons)
-AC_SUBST(CRM_PACEMAKER_DIR)
-
-CRM_BLACKBOX_DIR=${localstatedir}/lib/pacemaker/blackbox
-AC_DEFINE_UNQUOTED(CRM_BLACKBOX_DIR,"$CRM_BLACKBOX_DIR", Where to keep blackbox dumps)
-AC_SUBST(CRM_BLACKBOX_DIR)
-
-PE_STATE_DIR="${localstatedir}/lib/pacemaker/pengine"
-AC_DEFINE_UNQUOTED(PE_STATE_DIR,"$PE_STATE_DIR", Where to keep scheduler outputs)
-AC_SUBST(PE_STATE_DIR)
-
-CRM_CONFIG_DIR="${localstatedir}/lib/pacemaker/cib"
-AC_DEFINE_UNQUOTED(CRM_CONFIG_DIR,"$CRM_CONFIG_DIR", Where to keep configuration files)
-AC_SUBST(CRM_CONFIG_DIR)
-
-CRM_DAEMON_DIR="${libexecdir}/pacemaker"
-AC_DEFINE_UNQUOTED(CRM_DAEMON_DIR,"$CRM_DAEMON_DIR", Location for Pacemaker daemons)
-AC_SUBST(CRM_DAEMON_DIR)
-
-CRM_STATE_DIR="${runstatedir}/crm"
-AC_DEFINE_UNQUOTED([CRM_STATE_DIR], ["$CRM_STATE_DIR"],
- [Where to keep state files and sockets])
-AC_SUBST(CRM_STATE_DIR)
-
-CRM_RSCTMP_DIR="${runstatedir}/resource-agents"
-AC_DEFINE_UNQUOTED(CRM_RSCTMP_DIR,"$CRM_RSCTMP_DIR", Where resource agents should keep state files)
-AC_SUBST(CRM_RSCTMP_DIR)
-
-PACEMAKER_CONFIG_DIR="${sysconfdir}/pacemaker"
-AC_DEFINE_UNQUOTED(PACEMAKER_CONFIG_DIR,"$PACEMAKER_CONFIG_DIR", Where to keep configuration files like authkey)
-AC_SUBST(PACEMAKER_CONFIG_DIR)
-
-AC_DEFINE_UNQUOTED(SBIN_DIR,"$sbindir",[Location for system binaries])
-
-AC_PATH_PROGS(GIT, git false)
-
-AC_MSG_CHECKING([build version])
-BUILD_VERSION=6fdc9deea29
-AS_IF([test $BUILD_VERSION != ":%h$"],
- [AC_MSG_RESULT([$BUILD_VERSION (archive hash)])],
- [test -x $GIT && test -d .git],
- [
- BUILD_VERSION=`$GIT log --pretty="format:%h" -n 1`
- AC_MSG_RESULT([$BUILD_VERSION (git hash)])
- ],
- [
- # The current directory name make a reasonable default
- # Most generated archives will include the hash or tag
- BASE=`basename $PWD`
- BUILD_VERSION=`echo $BASE | sed s:.*[[Pp]]acemaker-::`
- AC_MSG_RESULT([$BUILD_VERSION (directory name)])
- ])
-
-AC_DEFINE_UNQUOTED(BUILD_VERSION, "$BUILD_VERSION", Build version)
-AC_SUBST(BUILD_VERSION)
HAVE_dbus=1
PKG_CHECK_MODULES([DBUS], [dbus-1],
@@ -1400,6 +1471,14 @@ AS_IF([test $HAVE_dbus = 0],
[PC_NAME_DBUS="dbus-1"])
AC_SUBST(PC_NAME_DBUS)
+check_systemdsystemunitdir() {
+ AC_MSG_CHECKING([which system unit file directory to use])
+ PKG_CHECK_VAR([systemdsystemunitdir], [systemd], [systemdsystemunitdir])
+ AC_MSG_RESULT([${systemdsystemunitdir}])
+ test x"$systemdsystemunitdir" != x""
+ return $?
+}
+
AS_CASE([$enable_systemd],
[$REQUIRED], [
AS_IF([test $HAVE_dbus = 0],
@@ -1489,6 +1568,11 @@ AC_DEFINE_UNQUOTED([SUPPORT_UPSTART], [$enable_upstart],
AM_CONDITIONAL([BUILD_UPSTART], [test $enable_upstart -eq $REQUIRED])
AC_SUBST(SUPPORT_UPSTART)
+
+dnl ========================================================================
+dnl Detect Nagios support
+dnl ========================================================================
+
AS_CASE([$with_nagios],
[$REQUIRED], [
AS_IF([test x"$ac_cv_have_decl_CLOCK_MONOTONIC" = x"no"],
@@ -1977,19 +2061,17 @@ CONFIG_FILES_EXEC([agents/ocf/ClusterMon],
[agents/ocf/remote],
[agents/stonith/fence_legacy],
[agents/stonith/fence_watchdog],
+ [cts/cluster_test],
+ [cts/cts],
[cts/cts-attrd],
[cts/cts-cli],
[cts/cts-exec],
[cts/cts-fencing],
+ [cts/cts-lab],
+ [cts/cts-log-watcher],
[cts/cts-regression],
[cts/cts-scheduler],
- [cts/lxc_autogen.sh],
[cts/benchmark/clubench],
- [cts/lab/CTSlab.py],
- [cts/lab/OCFIPraTest.py],
- [cts/lab/cluster_test],
- [cts/lab/cts],
- [cts/lab/cts-log-watcher],
[cts/support/LSBDummy],
[cts/support/cts-support],
[cts/support/fence_dummy],
@@ -1998,13 +2080,13 @@ CONFIG_FILES_EXEC([agents/ocf/ClusterMon],
[maint/bumplibs],
[tools/cluster-clean],
[tools/cluster-helper],
- [tools/cluster-init],
[tools/crm_failcount],
[tools/crm_master],
[tools/crm_report],
[tools/crm_standby],
[tools/cibsecret],
- [tools/pcmk_simtimes])
+ [tools/pcmk_simtimes],
+ [xml/version-diff.sh])
dnl Other files we output
AC_CONFIG_FILES(Makefile \
@@ -2014,7 +2096,6 @@ AC_CONFIG_FILES(Makefile \
agents/stonith/Makefile \
cts/Makefile \
cts/benchmark/Makefile \
- cts/lab/Makefile \
cts/scheduler/Makefile \
cts/scheduler/dot/Makefile \
cts/scheduler/exp/Makefile \
@@ -2059,6 +2140,7 @@ AC_CONFIG_FILES(Makefile \
lib/common/Makefile \
lib/common/tests/Makefile \
lib/common/tests/acl/Makefile \
+ lib/common/tests/actions/Makefile \
lib/common/tests/agents/Makefile \
lib/common/tests/cmdline/Makefile \
lib/common/tests/flags/Makefile \
@@ -2067,7 +2149,6 @@ AC_CONFIG_FILES(Makefile \
lib/common/tests/iso8601/Makefile \
lib/common/tests/lists/Makefile \
lib/common/tests/nvpair/Makefile \
- lib/common/tests/operations/Makefile \
lib/common/tests/options/Makefile \
lib/common/tests/output/Makefile \
lib/common/tests/procfs/Makefile \
@@ -2104,9 +2185,9 @@ AC_CONFIG_FILES(Makefile \
python/setup.py \
python/pacemaker/Makefile \
python/pacemaker/_cts/Makefile \
+ python/pacemaker/_cts/tests/Makefile \
python/pacemaker/buildoptions.py \
python/tests/Makefile \
- replace/Makefile \
rpm/Makefile \
tests/Makefile \
tools/Makefile \
diff --git a/cts/Makefile.am b/cts/Makefile.am
index a2e6738..598ae32 100644
--- a/cts/Makefile.am
+++ b/cts/Makefile.am
@@ -12,42 +12,29 @@ MAINTAINERCLEANFILES = Makefile.in
# Test commands and globally applicable test files should be in $(testdir),
# and command-specific test data should be in a command-specific subdirectory.
testdir = $(datadir)/$(PACKAGE)/tests
-test_SCRIPTS = cts-attrd \
+test_SCRIPTS = cts-attrd \
cts-cli \
cts-exec \
cts-fencing \
+ cts-lab \
cts-regression \
cts-scheduler
dist_test_DATA = README.md \
valgrind-pcmk.suppressions
-ctsdir = $(testdir)/cts
-cts_SCRIPTS = lxc_autogen.sh
-
clidir = $(testdir)/cli
-dist_cli_DATA = cli/constraints.xml \
- cli/crmadmin-cluster-remote-guest-nodes.xml \
- cli/crm_diff_new.xml \
- cli/crm_diff_old.xml \
- cli/crm_mon.xml \
- cli/crm_mon-feature_set.xml \
- cli/crm_mon-partial.xml \
- cli/crm_mon-rsc-maint.xml \
- cli/crm_mon-T180.xml \
- cli/crm_mon-unmanaged.xml \
- cli/crm_resource_digests.xml \
- cli/regression.acls.exp \
- cli/regression.crm_mon.exp \
- cli/regression.daemons.exp \
- cli/regression.dates.exp \
- cli/regression.error_codes.exp \
- cli/regression.feature_set.exp \
- cli/regression.rules.exp \
- cli/regression.tools.exp \
- cli/regression.upgrade.exp \
- cli/regression.validity.exp \
- cli/regression.access_render.exp
+dist_cli_DATA = $(wildcard cli/*.xml cli/*.exp)
+
+ctsdir = $(datadir)/$(PACKAGE)/tests/cts
+cts_SCRIPTS = cts
+
+# Commands intended to be run only via other commands
+halibdir = $(CRM_DAEMON_DIR)
+dist_halib_SCRIPTS = cts-log-watcher
+noinst_SCRIPTS = cluster_test
+
+.PHONY: scheduler-list
scheduler-list:
@for T in "$(srcdir)"/scheduler/xml/*.xml; do \
echo $$(basename $$T .xml); \
@@ -55,15 +42,35 @@ scheduler-list:
CLEANFILES = $(builddir)/.regression.failed.diff
+.PHONY: clean-local
clean-local:
rm -f scheduler/*/*.pe
-SUBDIRS = benchmark lab scheduler support
+SUBDIRS = benchmark \
+ scheduler \
+ support
+.PHONY: cts-support-install
cts-support-install:
$(MAKE) $(AM_MAKEFLAGS) -C support cts-support
$(builddir)/support/cts-support install
+.PHONY: cts-support-uninstall
cts-support-uninstall:
$(MAKE) $(AM_MAKEFLAGS) -C support cts-support
$(builddir)/support/cts-support uninstall
+
+# Everything listed here is a python script, typically generated from a .in file
+# (though that is not a requirement). We want to run pylint on all of these
+# things after they've been built.
+python_files = cts-attrd \
+ cts-exec \
+ cts-fencing \
+ cts-lab \
+ cts-log-watcher \
+ cts-regression \
+ cts-scheduler
+
+.PHONY: pylint
+pylint: $(python_files)
+ PYTHONPATH=$(top_builddir)/python pylint --rcfile $(top_srcdir)/python/pylintrc $(python_files)
diff --git a/cts/README.md b/cts/README.md
index 0ff1065..cbf319a 100644
--- a/cts/README.md
+++ b/cts/README.md
@@ -21,11 +21,10 @@ CTS includes:
* The CTS lab: This is a cluster exerciser for intensively testing the behavior
of an entire working cluster. It is primarily for developers and packagers of
the Pacemaker source code, but it can be useful for users who wish to see how
- their cluster will react to various situations. In an installed deployment,
- the CTS lab is in the cts subdirectory of this directory; in a source
- distibution, it is in cts/lab.
+ their cluster will react to various situations. Most of the lab code is in
+ the Pacemaker Python module. The front end, cts-lab, is in this directory.
- The CTS lab runs a randomized series of predefined tests on the cluster. CTS
+ The CTS lab runs a randomized series of predefined tests on the cluster. It
can be run against a pre-existing cluster configuration or overwrite the
existing configuration with a test configuration.
@@ -46,15 +45,13 @@ CTS includes:
/usr/libexec/pacemaker/cts-support uninstall
+ (The actual directory location may vary depending on how Pacemaker was
+ built.)
+
* Cluster benchmark: The benchmark subdirectory of this directory contains some
cluster test environment benchmarking code. It is not particularly useful for
end users.
-* LXC generator: The lxc\_autogen.sh script can be used to create some guest
- nodes for testing using LXC containers. It is not particularly useful for end
- users. In an installed deployment, it is in the cts subdirectory of this
- directory; in a source distribution, it is in this directory.
-
* Valgrind suppressions: When memory-testing Pacemaker code with valgrind,
various bugs in non-Pacemaker libraries and such can clutter the results. The
valgrind-pcmk.suppressions file in this directory can be used with valgrind's
@@ -109,9 +106,11 @@ CTS includes:
### Run
-The primary interface to the CTS lab is the CTSlab.py executable:
+The primary interface to the CTS lab is the cts-lab executable:
- /usr/share/pacemaker/tests/cts/CTSlab.py [options] <number-of-tests-to-run>
+ /usr/share/pacemaker/tests/cts-lab [options] <number-of-tests-to-run>
+
+(The actual directory location may vary depending on how Pacemaker was built.)
As part of the options, specify the cluster nodes with --nodes, for example:
@@ -138,13 +137,13 @@ Configure some sort of fencing, for example to use fence\_xvm:
Putting all the above together, a command line might look like:
- /usr/share/pacemaker/tests/cts/CTSlab.py --nodes "pcmk-1 pcmk-2 pcmk-3" \
+ /usr/share/pacemaker/tests/cts-lab --nodes "pcmk-1 pcmk-2 pcmk-3" \
--outputfile ~/cts.log --clobber-cib --populate-resources \
--test-ip-base 192.168.9.100 --stonith xvm 50
For more options, run with the --help option.
-There are also a couple of wrappers for CTSlab.py that some users may find more
+There are also a couple of wrappers for cts-lab that some users may find more
convenient: cts, which is typically installed in the same place as the rest of
the testing code; and cluster\_test, which is in the source directory and
typically not installed.
@@ -172,7 +171,7 @@ setting the following environment variables on all cluster nodes:
--gen-suppressions=all"
If running the CTS lab with valgrind enabled on the cluster nodes, add these
-options to CTSlab.py:
+options to cts-lab:
--valgrind-tests --valgrind-procs "pacemaker-attrd pacemaker-based pacemaker-controld pacemaker-execd pacemaker-schedulerd pacemaker-fenced"
@@ -217,22 +216,22 @@ lab, but the C library variables may be set differently on different nodes.
### Optional: Remote node testing
-If the pacemaker-remoted daemon is installed on all cluster nodes, CTS will
-enable remote node tests.
+If the pacemaker-remoted daemon is installed on all cluster nodes, the CTS lab
+will enable remote node tests.
The remote node tests choose a random node, stop the cluster on it, start
pacemaker-remoted on it, and add an ocf:pacemaker:remote resource to turn it
-into a remote node. When the test is done, CTS will turn the node back into
+into a remote node. When the test is done, the lab will turn the node back into
a cluster node.
-To avoid conflicts, CTS will rename the node, prefixing the original node name
-with "remote-". For example, "pcmk-1" will become "remote-pcmk-1". These names
-do not need to be resolvable.
+To avoid conflicts, the lab will rename the node, prefixing the original node
+name with "remote-". For example, "pcmk-1" will become "remote-pcmk-1". These
+names do not need to be resolvable.
The name change may require special fencing configuration, if the fence agent
expects the node name to be the same as its hostname. A common approach is to
specify the "remote-" names in pcmk\_host\_list. If you use
-pcmk\_host\_list=all, CTS will expand that to all cluster nodes and their
+pcmk\_host\_list=all, the lab will expand that to all cluster nodes and their
"remote-" names. You may additionally need a pcmk\_host\_map argument to map
the "remote-" names to the hostnames. Example:
@@ -267,34 +266,9 @@ valgrind. For example:
EOF
-### Optional: Container testing
-
-If the --container-tests option is given to CTSlab.py, it will enable
-testing of LXC resources (currently only the RemoteLXC test,
-which starts a remote node using an LXC container).
-
-The container tests have additional package dependencies (see the toplevel
-INSTALL.md). Also, SELinux must be enabled (in either permissive or enforcing
-mode), libvirtd must be enabled and running, and root must be able to ssh
-without a password between all cluster nodes (not just from the exerciser).
-Before running the tests, you can verify your environment with:
-
- /usr/share/pacemaker/tests/cts/lxc_autogen.sh -v
-
-LXC tests will create two containers with hardcoded parameters: a NAT'ed bridge
-named virbr0 using the IP network 192.168.123.0/24 will be created on the
-cluster node hosting the containers; the host will be assigned
-52:54:00:A8:12:35 as the MAC address and 192.168.123.1 as the IP address.
-Each container will be assigned a random MAC address starting with 52:54:,
-the IP address 192.168.123.11 or 192.168.123.12, the hostname lxc1 or lxc2
-(which will be added to the host's /etc/hosts file), and 196MB RAM.
-
-The test will revert all of the configuration when it is done.
-
-
### Mini-HOWTO: Allow passwordless remote SSH connections
-The CTS scripts run "ssh -l root" so you don't have to do any of your testing
+The CTS lab runs "ssh -l root" so you don't have to do any of your testing
logged in as root on the exerciser. Here is how to allow such connections
without requiring a password to be entered each time:
@@ -328,42 +302,20 @@ without requiring a password to be entered each time:
If not, look at the documentation for your version of ssh.
-## Note on the maintenance
+## Upgrading scheduler test inputs for new XSLTs
-### Tests for scheduler
-
-The source `*.xml` files are preferably kept in sync with the newest
-major (and only major, which is enough) schema version, since these
-tests are not meant to double as schema upgrade ones (except some cases
+The scheduler/xml inputs should be kept in sync with the latest major schema
+version, since these tests are not meant to test schema upgrades (unless
expressly designated as such).
-Currently and unless something goes wrong, the procedure of upgrading
-these tests en masse is as easy as:
+To upgrade the inputs to a new major schema version:
- cd "$(git rev-parse --show-toplevel)/cts" # if not already
- pushd "$(git rev-parse --show-toplevel)/xml"
+ cd "$(git rev-parse --show-toplevel)/xml"
./regression.sh cts_scheduler -G
- popd
+ cd "$(git rev-parse --show-toplevel)/cts"
git add --interactive .
- git commit -m 'XML: upgrade-M.N.xsl: apply on scheduler CTS test cases'
- git reset HEAD && git checkout . # if some differences still remain
- ./cts-scheduler # absolutely vital to check nothing got broken!
-
-Now, sadly, there's no proved automated way to minimize instances like this:
-
- <primitive id="rsc1" class="ocf" provider="heartbeat" type="apache">
- </primitive>
-
-that may be left behind into more canonical:
-
- <primitive id="rsc1" class="ocf" provider="heartbeat" type="apache"/>
-
-so manual editing is tasked, or perhaps `--format` or `--c14n`
-to `xmllint` will be of help (without any other side effects).
+ git commit -m 'Test: scheduler: upgrade test inputs to schema $X.$Y'
+ ./cts-scheduler || echo 'Investigate what went wrong'
-If the overall process gets stuck anywhere, common sense to the rescue.
-The initial part of the above recipe can be repeated anytime to verify
-there's nothing to upgrade artificially like this, which is a desired
-state. Note that `regression.sh` script performs validation of both
-the input and output, should the upgrade take place, implicitly, so
-there's no need of revalidation in the happy case.
+The first two commands can be run anytime to verify no further upgrades are
+needed.
diff --git a/cts/benchmark/Makefile.am b/cts/benchmark/Makefile.am
index 532abd2..703f18d 100644
--- a/cts/benchmark/Makefile.am
+++ b/cts/benchmark/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2001-2017 the Pacemaker project contributors
+# Copyright 2001-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -9,5 +9,6 @@
MAINTAINERCLEANFILES = Makefile.in
benchdir = $(datadir)/$(PACKAGE)/tests/cts/benchmark
-dist_bench_DATA = README.benchmark control
+dist_bench_DATA = README.benchmark \
+ control
bench_SCRIPTS = clubench
diff --git a/cts/benchmark/clubench.in b/cts/benchmark/clubench.in
index e65b60d..d20e292 100644
--- a/cts/benchmark/clubench.in
+++ b/cts/benchmark/clubench.in
@@ -126,7 +126,7 @@ mkreports() {
runcts() {
RC_ODIR="$1"
msg "Running CTS"
- python "$CTSDIR/CTSlab.py" $CTSOPTS --nodes "$nodes" > "$RC_ODIR/ctsrun.out" 2>&1 &
+ python "$CTSDIR/cts-lab" $CTSOPTS --nodes "$nodes" > "$RC_ODIR/ctsrun.out" 2>&1 &
ctspid=$!
tail -f "$RC_ODIR/ctsrun.out" &
tailpid=$!
diff --git a/cts/cli/crm_verify_invalid_bz.xml b/cts/cli/crm_verify_invalid_bz.xml
new file mode 100644
index 0000000..b92e563
--- /dev/null
+++ b/cts/cli/crm_verify_invalid_bz.xml
@@ -0,0 +1,72 @@
+<cib crm_feature_set="3.17.4" validate-with="pacemaker-3.9" epoch="8" num_updates="0" admin_epoch="0" cib-last-written="Wed Sep 13 09:55:12 2023" update-origin="pcmk-2" update-client="cibadmin" update-user="root" have-quorum="1" dc-uuid="1">
+ <configuration>
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.1.6-2.el8-6fdc9deea29"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="mycluster"/>
+ <nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="false"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="pcmk-1"/>
+ <node id="2" uname="pcmk-2"/>
+ </nodes>
+ <resources>
+ <primitive class="systemd" id="test1" type="chronyd">
+ <operations>
+ <op id="test1-monitor-interval-60" interval="60" name="monitor" timeout="100"/>
+ <op id="test1-start-interval-0s" interval="0s" name="start" timeout="100"/>
+ <op id="test1-stop-interval-0s" interval="0s" name="stop" timeout="100"/>
+ </operations>
+ </primitive>
+ <clone id="test2-clone">
+ <primitive class="systemd" id="test2" type="chronyd">
+ <operations>
+ <op id="test2-monitor-interval-60" interval="60" name="monitor" timeout="100"/>
+ <op id="test2-start-interval-0s" interval="0s" name="start" timeout="100"/>
+ <op id="test2-stop-interval-0s" interval="0s" name="stop" timeout="100"/>
+ </operations>
+ </primitive>
+ <meta_attributes id="test2-clone-meta_attributes">
+ <nvpair id="test2-clone-meta_attributes-promotable" name="promotable" value="true"/>
+ </meta_attributes>
+ </clone>
+ </resources>
+ <constraints/>
+ </configuration>
+ <status>
+ <node_state id="2" uname="pcmk-2" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <transient_attributes id="2">
+ <instance_attributes id="status-2">
+ <nvpair id="status-2-.feature-set" name="#feature-set" value="3.17.4"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="2">
+ <lrm_resources>
+ <lrm_resource id="test1" class="systemd" type="chronyd">
+ <lrm_rsc_op id="test1_last_0" operation_key="test1_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="4:113:0:63958060-000d-4101-9f9f-104f70588675" transition-magic="0:0;4:113:0:63958060-000d-4101-9f9f-104f70588675" exit-reason="" on_node="pcmk-2" call-id="9" rc-code="0" op-status="0" interval="0" last-rc-change="1694613091" exec-time="2115" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="test1_last_failure_0" operation_key="test1_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="2:4:7:63958060-000d-4101-9f9f-104f70588675" transition-magic="0:0;2:4:7:63958060-000d-4101-9f9f-104f70588675" exit-reason="" on_node="pcmk-2" call-id="5" rc-code="0" op-status="0" interval="0" last-rc-change="1694009809" exec-time="3" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="test1_monitor_60000" operation_key="test1_monitor_60000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="1:113:0:63958060-000d-4101-9f9f-104f70588675" transition-magic="0:0;1:113:0:63958060-000d-4101-9f9f-104f70588675" exit-reason="" on_node="pcmk-2" call-id="10" rc-code="0" op-status="0" interval="60000" last-rc-change="1694613091" exec-time="1" queue-time="0" op-digest="2d296eeac3e5f7d1cfdb1557b8eb3457"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state id="1" uname="pcmk-1" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <lrm id="1">
+ <lrm_resources>
+ <lrm_resource id="test1" class="systemd" type="chronyd">
+ <lrm_rsc_op id="test1_last_0" operation_key="test1_stop_0" operation="stop" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="3:113:0:63958060-000d-4101-9f9f-104f70588675" transition-magic="0:0;3:113:0:63958060-000d-4101-9f9f-104f70588675" exit-reason="" on_node="pcmk-1" call-id="6" rc-code="0" op-status="0" interval="0" last-rc-change="1694613089" exec-time="2007" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="test1_last_failure_0" operation_key="test1_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="1:4:7:63958060-000d-4101-9f9f-104f70588675" transition-magic="0:0;1:4:7:63958060-000d-4101-9f9f-104f70588675" exit-reason="" on_node="pcmk-1" call-id="5" rc-code="0" op-status="0" interval="0" last-rc-change="1694009809" exec-time="3" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="1">
+ <instance_attributes id="status-1">
+ <nvpair id="status-1-.feature-set" name="#feature-set" value="3.17.4"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ </status>
+</cib> \ No newline at end of file
diff --git a/cts/cli/crm_verify_invalid_no_stonith.xml b/cts/cli/crm_verify_invalid_no_stonith.xml
new file mode 100644
index 0000000..ce1b3a5
--- /dev/null
+++ b/cts/cli/crm_verify_invalid_no_stonith.xml
@@ -0,0 +1,12 @@
+<cib crm_feature_set="3.17.4" validate-with="pacemaker-3.9" epoch="1" num_updates="0" admin_epoch="0" cib-last-written="Wed Sep 6 09:27:13 2023" update-origin="pcmk-2" update-client="crmd" update-user="hacluster">
+ <configuration>
+ <crm_config/>
+ <nodes>
+ <node id="1" uname="pcmk-1"/>
+ <node id="2" uname="pcmk-2"/>
+ </nodes>
+ <resources/>
+ <constraints/>
+ </configuration>
+ <status/>
+</cib> \ No newline at end of file
diff --git a/cts/cli/regression.daemons.exp b/cts/cli/regression.daemons.exp
index 66bd7b3..b34fba8 100644
--- a/cts/cli/regression.daemons.exp
+++ b/cts/cli/regression.daemons.exp
@@ -122,6 +122,11 @@
<shortdesc lang="en">Do not lock resources to a cleanly shut down node longer than this</shortdesc>
<content type="time" default=""/>
</parameter>
+ <parameter name="node-pending-timeout">
+ <longdesc lang="en">Fence nodes that do not join the controller process group within this much time after joining the cluster, to allow the cluster to continue managing resources. A value of 0 means never fence pending nodes. Setting the value to 2h means fence nodes after 2 hours.</longdesc>
+ <shortdesc lang="en">How long to wait for a node that has joined the cluster to join the controller process group</shortdesc>
+ <content type="time" default=""/>
+ </parameter>
</parameters>
</resource-agent>
=#=#=#= End test: Get controller metadata - OK (0) =#=#=#=
@@ -349,6 +354,11 @@
<shortdesc lang="en">Apply fencing delay targeting the lost nodes with the highest total resource priority</shortdesc>
<content type="time" default=""/>
</parameter>
+ <parameter name="node-pending-timeout">
+ <longdesc lang="en">Fence nodes that do not join the controller process group within this much time after joining the cluster, to allow the cluster to continue managing resources. A value of 0 means never fence pending nodes. Setting the value to 2h means fence nodes after 2 hours.</longdesc>
+ <shortdesc lang="en">How long to wait for a node that has joined the cluster to join the controller process group</shortdesc>
+ <content type="time" default=""/>
+ </parameter>
<parameter name="cluster-delay">
<longdesc lang="en">The node elected Designated Controller (DC) will consider an action failed if it does not get a response from the node executing the action within this time (after considering the action&apos;s own timeout). The &quot;correct&quot; value will depend on the speed and load of your network and cluster nodes.</longdesc>
<shortdesc lang="en">Maximum time for node-to-node communication</shortdesc>
diff --git a/cts/cli/regression.error_codes.exp b/cts/cli/regression.error_codes.exp
index 6c6f4e1..7d705e2 100644
--- a/cts/cli/regression.error_codes.exp
+++ b/cts/cli/regression.error_codes.exp
@@ -145,6 +145,9 @@ pcmk_rc_node_unknown - Node not found
=#=#=#= End test: Get negative Pacemaker return code (with name) (XML) - OK (0) =#=#=#=
* Passed: crm_error - Get negative Pacemaker return code (with name) (XML)
=#=#=#= Begin test: List Pacemaker return codes (non-positive) =#=#=#=
+-1039: Compression/decompression error
+-1038: Nameserver resolution error
+-1037: No active transaction found
-1036: Bad XML patch format
-1035: Bad input value provided
-1034: Disabled
@@ -186,6 +189,9 @@ pcmk_rc_node_unknown - Node not found
* Passed: crm_error - List Pacemaker return codes (non-positive)
=#=#=#= Begin test: List Pacemaker return codes (non-positive) (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_error -l -r --output-as=xml">
+ <result-code code="-1039" description="Compression/decompression error"/>
+ <result-code code="-1038" description="Nameserver resolution error"/>
+ <result-code code="-1037" description="No active transaction found"/>
<result-code code="-1036" description="Bad XML patch format"/>
<result-code code="-1035" description="Bad input value provided"/>
<result-code code="-1034" description="Disabled"/>
@@ -227,6 +233,9 @@ pcmk_rc_node_unknown - Node not found
=#=#=#= End test: List Pacemaker return codes (non-positive) (XML) - OK (0) =#=#=#=
* Passed: crm_error - List Pacemaker return codes (non-positive) (XML)
=#=#=#= Begin test: List Pacemaker return codes (non-positive) (with names) =#=#=#=
+-1039: pcmk_rc_compression Compression/decompression error
+-1038: pcmk_rc_ns_resolution Nameserver resolution error
+-1037: pcmk_rc_no_transaction No active transaction found
-1036: pcmk_rc_bad_xml_patch Bad XML patch format
-1035: pcmk_rc_bad_input Bad input value provided
-1034: pcmk_rc_disabled Disabled
@@ -268,6 +277,9 @@ pcmk_rc_node_unknown - Node not found
* Passed: crm_error - List Pacemaker return codes (non-positive) (with names)
=#=#=#= Begin test: List Pacemaker return codes (non-positive) (with names) (XML) =#=#=#=
<pacemaker-result api-version="X" request="crm_error -n -l -r --output-as=xml">
+ <result-code code="-1039" name="pcmk_rc_compression" description="Compression/decompression error"/>
+ <result-code code="-1038" name="pcmk_rc_ns_resolution" description="Nameserver resolution error"/>
+ <result-code code="-1037" name="pcmk_rc_no_transaction" description="No active transaction found"/>
<result-code code="-1036" name="pcmk_rc_bad_xml_patch" description="Bad XML patch format"/>
<result-code code="-1035" name="pcmk_rc_bad_input" description="Bad input value provided"/>
<result-code code="-1034" name="pcmk_rc_disabled" description="Disabled"/>
diff --git a/cts/cli/regression.rules.exp b/cts/cli/regression.rules.exp
index c3dccd7..cdfb5d1 100644
--- a/cts/cli/regression.rules.exp
+++ b/cts/cli/regression.rules.exp
@@ -33,6 +33,9 @@ crm_rule: --check requires use of --rule=
log_xmllib_err error: XML Error: Entity: line 1: parser error : Start tag expected, '<' not found
log_xmllib_err error: XML Error: invalidxml
log_xmllib_err error: XML Error: ^
+log_xmllib_err error: XML Error: Entity: line 1: parser error : Start tag expected, '<' not found
+log_xmllib_err error: XML Error: invalidxml
+log_xmllib_err error: XML Error: ^
crm_rule: Couldn't parse input string: invalidxml
=#=#=#= End test: crm_rule given invalid input XML - Invalid data given (65) =#=#=#=
@@ -41,6 +44,9 @@ crm_rule: Couldn't parse input string: invalidxml
log_xmllib_err error: XML Error: Entity: line 1: parser error : Start tag expected, '<' not found
log_xmllib_err error: XML Error: invalidxml
log_xmllib_err error: XML Error: ^
+log_xmllib_err error: XML Error: Entity: line 1: parser error : Start tag expected, '<' not found
+log_xmllib_err error: XML Error: invalidxml
+log_xmllib_err error: XML Error: ^
<pacemaker-result api-version="X" request="crm_rule -c -r blahblah -X invalidxml --output-as=xml">
<status code="65" message="Invalid data given">
<errors>
@@ -55,6 +61,9 @@ log_xmllib_err error: XML Error: ^
log_xmllib_err error: XML Error: Entity: line 1: parser error : Start tag expected, '<' not found
log_xmllib_err error: XML Error: invalidxml
log_xmllib_err error: XML Error: ^
+log_xmllib_err error: XML Error: Entity: line 1: parser error : Start tag expected, '<' not found
+log_xmllib_err error: XML Error: invalidxml
+log_xmllib_err error: XML Error: ^
crm_rule: Couldn't parse input from STDIN
=#=#=#= End test: crm_rule given invalid input XML on stdin - Invalid data given (65) =#=#=#=
@@ -63,6 +72,9 @@ crm_rule: Couldn't parse input from STDIN
log_xmllib_err error: XML Error: Entity: line 1: parser error : Start tag expected, '<' not found
log_xmllib_err error: XML Error: invalidxml
log_xmllib_err error: XML Error: ^
+log_xmllib_err error: XML Error: Entity: line 1: parser error : Start tag expected, '<' not found
+log_xmllib_err error: XML Error: invalidxml
+log_xmllib_err error: XML Error: ^
<pacemaker-result api-version="X" request="crm_rule -c -r blahblah -X - --output-as=xml">
<status code="65" message="Invalid data given">
<errors>
diff --git a/cts/cli/regression.tools.exp b/cts/cli/regression.tools.exp
index a8e2236..accf781 100644
--- a/cts/cli/regression.tools.exp
+++ b/cts/cli/regression.tools.exp
@@ -4706,7 +4706,7 @@ Resources prim2 is colocated with:
<pacemaker-result api-version="X" request="crm_resource -a -r prim2 --output-as=xml">
<constraints>
<rsc_location node="cluster01" rsc="prim2" id="prim2-on-cluster1" score="INFINITY"/>
- <rsc_colocation id="colocation-prim2-prim3-INFINITY" rsc="prim2" with-rsc="prim3" score="INFINITY"/>
+ <rsc_colocation id="colocation-prim2-prim3-INFINITY" rsc="prim2" with-rsc="prim3" score="INFINITY" node-attribute="#uname"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
@@ -4716,10 +4716,10 @@ Resources prim2 is colocated with:
<pacemaker-result api-version="X" request="crm_resource -A -r prim2 --output-as=xml">
<constraints>
<rsc_location node="cluster01" rsc="prim2" id="prim2-on-cluster1" score="INFINITY"/>
- <rsc_colocation id="colocation-prim2-prim3-INFINITY" rsc="prim2" with-rsc="prim3" score="INFINITY"/>
- <rsc_colocation id="colocation-prim3-prim4-INFINITY" rsc="prim3" with-rsc="prim4" score="INFINITY"/>
+ <rsc_colocation id="colocation-prim2-prim3-INFINITY" rsc="prim2" with-rsc="prim3" score="INFINITY" node-attribute="#uname"/>
+ <rsc_colocation id="colocation-prim3-prim4-INFINITY" rsc="prim3" with-rsc="prim4" score="INFINITY" node-attribute="#uname"/>
<rsc_location node="cluster02" rsc="prim4" id="prim4-on-cluster2" score="INFINITY"/>
- <rsc_colocation id="colocation-prim4-prim5-INFINITY" rsc="prim4" with-rsc="prim5" score="INFINITY"/>
+ <rsc_colocation id="colocation-prim4-prim5-INFINITY" rsc="prim4" with-rsc="prim5" score="INFINITY" node-attribute="#uname"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
@@ -4752,9 +4752,9 @@ Resources prim3 is colocated with:
=#=#=#= Begin test: Check locations and constraints for prim3 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -a -r prim3 --output-as=xml">
<constraints>
- <rsc_colocation id="colocation-prim2-prim3-INFINITY" rsc="prim2" with-rsc="prim3" score="INFINITY"/>
+ <rsc_colocation id="colocation-prim2-prim3-INFINITY" rsc="prim2" with-rsc="prim3" score="INFINITY" node-attribute="#uname"/>
<rsc_location node="cluster01" rsc="prim2" id="prim2-on-cluster1" score="INFINITY"/>
- <rsc_colocation id="colocation-prim3-prim4-INFINITY" rsc="prim3" with-rsc="prim4" score="INFINITY"/>
+ <rsc_colocation id="colocation-prim3-prim4-INFINITY" rsc="prim3" with-rsc="prim4" score="INFINITY" node-attribute="#uname"/>
<rsc_location node="cluster02" rsc="prim4" id="prim4-on-cluster2" score="INFINITY"/>
</constraints>
<status code="0" message="OK"/>
@@ -4764,11 +4764,11 @@ Resources prim3 is colocated with:
=#=#=#= Begin test: Recursively check locations and constraints for prim3 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -A -r prim3 --output-as=xml">
<constraints>
- <rsc_colocation id="colocation-prim2-prim3-INFINITY" rsc="prim2" with-rsc="prim3" score="INFINITY"/>
+ <rsc_colocation id="colocation-prim2-prim3-INFINITY" rsc="prim2" with-rsc="prim3" score="INFINITY" node-attribute="#uname"/>
<rsc_location node="cluster01" rsc="prim2" id="prim2-on-cluster1" score="INFINITY"/>
- <rsc_colocation id="colocation-prim3-prim4-INFINITY" rsc="prim3" with-rsc="prim4" score="INFINITY"/>
+ <rsc_colocation id="colocation-prim3-prim4-INFINITY" rsc="prim3" with-rsc="prim4" score="INFINITY" node-attribute="#uname"/>
<rsc_location node="cluster02" rsc="prim4" id="prim4-on-cluster2" score="INFINITY"/>
- <rsc_colocation id="colocation-prim4-prim5-INFINITY" rsc="prim4" with-rsc="prim5" score="INFINITY"/>
+ <rsc_colocation id="colocation-prim4-prim5-INFINITY" rsc="prim4" with-rsc="prim5" score="INFINITY" node-attribute="#uname"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
@@ -4802,9 +4802,9 @@ Resources prim4 is colocated with:
<pacemaker-result api-version="X" request="crm_resource -a -r prim4 --output-as=xml">
<constraints>
<rsc_location node="cluster02" rsc="prim4" id="prim4-on-cluster2" score="INFINITY"/>
- <rsc_colocation id="colocation-prim10-prim4-INFINITY" rsc="prim10" with-rsc="prim4" score="INFINITY"/>
- <rsc_colocation id="colocation-prim3-prim4-INFINITY" rsc="prim3" with-rsc="prim4" score="INFINITY"/>
- <rsc_colocation id="colocation-prim4-prim5-INFINITY" rsc="prim4" with-rsc="prim5" score="INFINITY"/>
+ <rsc_colocation id="colocation-prim10-prim4-INFINITY" rsc="prim10" with-rsc="prim4" score="INFINITY" node-attribute="#uname"/>
+ <rsc_colocation id="colocation-prim3-prim4-INFINITY" rsc="prim3" with-rsc="prim4" score="INFINITY" node-attribute="#uname"/>
+ <rsc_colocation id="colocation-prim4-prim5-INFINITY" rsc="prim4" with-rsc="prim5" score="INFINITY" node-attribute="#uname"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
@@ -4814,11 +4814,11 @@ Resources prim4 is colocated with:
<pacemaker-result api-version="X" request="crm_resource -A -r prim4 --output-as=xml">
<constraints>
<rsc_location node="cluster02" rsc="prim4" id="prim4-on-cluster2" score="INFINITY"/>
- <rsc_colocation id="colocation-prim10-prim4-INFINITY" rsc="prim10" with-rsc="prim4" score="INFINITY"/>
- <rsc_colocation id="colocation-prim3-prim4-INFINITY" rsc="prim3" with-rsc="prim4" score="INFINITY"/>
- <rsc_colocation id="colocation-prim2-prim3-INFINITY" rsc="prim2" with-rsc="prim3" score="INFINITY"/>
+ <rsc_colocation id="colocation-prim10-prim4-INFINITY" rsc="prim10" with-rsc="prim4" score="INFINITY" node-attribute="#uname"/>
+ <rsc_colocation id="colocation-prim3-prim4-INFINITY" rsc="prim3" with-rsc="prim4" score="INFINITY" node-attribute="#uname"/>
+ <rsc_colocation id="colocation-prim2-prim3-INFINITY" rsc="prim2" with-rsc="prim3" score="INFINITY" node-attribute="#uname"/>
<rsc_location node="cluster01" rsc="prim2" id="prim2-on-cluster1" score="INFINITY"/>
- <rsc_colocation id="colocation-prim4-prim5-INFINITY" rsc="prim4" with-rsc="prim5" score="INFINITY"/>
+ <rsc_colocation id="colocation-prim4-prim5-INFINITY" rsc="prim4" with-rsc="prim5" score="INFINITY" node-attribute="#uname"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
@@ -4848,7 +4848,7 @@ Resources colocated with prim5:
=#=#=#= Begin test: Check locations and constraints for prim5 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -a -r prim5 --output-as=xml">
<constraints>
- <rsc_colocation id="colocation-prim4-prim5-INFINITY" rsc="prim4" with-rsc="prim5" score="INFINITY"/>
+ <rsc_colocation id="colocation-prim4-prim5-INFINITY" rsc="prim4" with-rsc="prim5" score="INFINITY" node-attribute="#uname"/>
<rsc_location node="cluster02" rsc="prim4" id="prim4-on-cluster2" score="INFINITY"/>
</constraints>
<status code="0" message="OK"/>
@@ -4858,11 +4858,11 @@ Resources colocated with prim5:
=#=#=#= Begin test: Recursively check locations and constraints for prim5 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -A -r prim5 --output-as=xml">
<constraints>
- <rsc_colocation id="colocation-prim4-prim5-INFINITY" rsc="prim4" with-rsc="prim5" score="INFINITY"/>
+ <rsc_colocation id="colocation-prim4-prim5-INFINITY" rsc="prim4" with-rsc="prim5" score="INFINITY" node-attribute="#uname"/>
<rsc_location node="cluster02" rsc="prim4" id="prim4-on-cluster2" score="INFINITY"/>
- <rsc_colocation id="colocation-prim10-prim4-INFINITY" rsc="prim10" with-rsc="prim4" score="INFINITY"/>
- <rsc_colocation id="colocation-prim3-prim4-INFINITY" rsc="prim3" with-rsc="prim4" score="INFINITY"/>
- <rsc_colocation id="colocation-prim2-prim3-INFINITY" rsc="prim2" with-rsc="prim3" score="INFINITY"/>
+ <rsc_colocation id="colocation-prim10-prim4-INFINITY" rsc="prim10" with-rsc="prim4" score="INFINITY" node-attribute="#uname"/>
+ <rsc_colocation id="colocation-prim3-prim4-INFINITY" rsc="prim3" with-rsc="prim4" score="INFINITY" node-attribute="#uname"/>
+ <rsc_colocation id="colocation-prim2-prim3-INFINITY" rsc="prim2" with-rsc="prim3" score="INFINITY" node-attribute="#uname"/>
<rsc_location node="cluster01" rsc="prim2" id="prim2-on-cluster1" score="INFINITY"/>
</constraints>
<status code="0" message="OK"/>
@@ -4910,7 +4910,7 @@ Resources prim7 is colocated with:
=#=#=#= Begin test: Check locations and constraints for prim7 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -a -r prim7 --output-as=xml">
<constraints>
- <rsc_colocation id="colocation-prim7-group-INFINITY" rsc="prim7" with-rsc="group" score="INFINITY"/>
+ <rsc_colocation id="colocation-prim7-group-INFINITY" rsc="prim7" with-rsc="group" score="INFINITY" node-attribute="#uname"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
@@ -4919,7 +4919,7 @@ Resources prim7 is colocated with:
=#=#=#= Begin test: Recursively check locations and constraints for prim7 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -A -r prim7 --output-as=xml">
<constraints>
- <rsc_colocation id="colocation-prim7-group-INFINITY" rsc="prim7" with-rsc="group" score="INFINITY"/>
+ <rsc_colocation id="colocation-prim7-group-INFINITY" rsc="prim7" with-rsc="group" score="INFINITY" node-attribute="#uname"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
@@ -4938,7 +4938,7 @@ Resources prim8 is colocated with:
=#=#=#= Begin test: Check locations and constraints for prim8 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -a -r prim8 --output-as=xml">
<constraints>
- <rsc_colocation id="colocation-prim8-gr2-INFINITY" rsc="prim8" with-rsc="gr2" score="INFINITY"/>
+ <rsc_colocation id="colocation-prim8-gr2-INFINITY" rsc="prim8" with-rsc="gr2" score="INFINITY" node-attribute="#uname"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
@@ -4947,7 +4947,7 @@ Resources prim8 is colocated with:
=#=#=#= Begin test: Recursively check locations and constraints for prim8 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -A -r prim8 --output-as=xml">
<constraints>
- <rsc_colocation id="colocation-prim8-gr2-INFINITY" rsc="prim8" with-rsc="gr2" score="INFINITY"/>
+ <rsc_colocation id="colocation-prim8-gr2-INFINITY" rsc="prim8" with-rsc="gr2" score="INFINITY" node-attribute="#uname"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
@@ -4966,7 +4966,7 @@ Resources prim9 is colocated with:
=#=#=#= Begin test: Check locations and constraints for prim9 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -a -r prim9 --output-as=xml">
<constraints>
- <rsc_colocation id="colocation-prim9-clone-INFINITY" rsc="prim9" with-rsc="clone" score="INFINITY"/>
+ <rsc_colocation id="colocation-prim9-clone-INFINITY" rsc="prim9" with-rsc="clone" score="INFINITY" node-attribute="#uname"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
@@ -4975,7 +4975,7 @@ Resources prim9 is colocated with:
=#=#=#= Begin test: Recursively check locations and constraints for prim9 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -A -r prim9 --output-as=xml">
<constraints>
- <rsc_colocation id="colocation-prim9-clone-INFINITY" rsc="prim9" with-rsc="clone" score="INFINITY"/>
+ <rsc_colocation id="colocation-prim9-clone-INFINITY" rsc="prim9" with-rsc="clone" score="INFINITY" node-attribute="#uname"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
@@ -5000,7 +5000,7 @@ Resources prim10 is colocated with:
=#=#=#= Begin test: Check locations and constraints for prim10 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -a -r prim10 --output-as=xml">
<constraints>
- <rsc_colocation id="colocation-prim10-prim4-INFINITY" rsc="prim10" with-rsc="prim4" score="INFINITY"/>
+ <rsc_colocation id="colocation-prim10-prim4-INFINITY" rsc="prim10" with-rsc="prim4" score="INFINITY" node-attribute="#uname"/>
<rsc_location node="cluster02" rsc="prim4" id="prim4-on-cluster2" score="INFINITY"/>
</constraints>
<status code="0" message="OK"/>
@@ -5010,9 +5010,9 @@ Resources prim10 is colocated with:
=#=#=#= Begin test: Recursively check locations and constraints for prim10 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -A -r prim10 --output-as=xml">
<constraints>
- <rsc_colocation id="colocation-prim10-prim4-INFINITY" rsc="prim10" with-rsc="prim4" score="INFINITY"/>
+ <rsc_colocation id="colocation-prim10-prim4-INFINITY" rsc="prim10" with-rsc="prim4" score="INFINITY" node-attribute="#uname"/>
<rsc_location node="cluster02" rsc="prim4" id="prim4-on-cluster2" score="INFINITY"/>
- <rsc_colocation id="colocation-prim4-prim5-INFINITY" rsc="prim4" with-rsc="prim5" score="INFINITY"/>
+ <rsc_colocation id="colocation-prim4-prim5-INFINITY" rsc="prim4" with-rsc="prim5" score="INFINITY" node-attribute="#uname"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
@@ -5043,8 +5043,8 @@ Resources prim11 is colocated with:
=#=#=#= Begin test: Check locations and constraints for prim11 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -a -r prim11 --output-as=xml">
<constraints>
- <rsc_colocation id="colocation-prim13-prim11-INFINITY" rsc="prim13" with-rsc="prim11" score="INFINITY"/>
- <rsc_colocation id="colocation-prim11-prim12-INFINITY" rsc="prim11" with-rsc="prim12" score="INFINITY"/>
+ <rsc_colocation id="colocation-prim13-prim11-INFINITY" rsc="prim13" with-rsc="prim11" score="INFINITY" node-attribute="#uname"/>
+ <rsc_colocation id="colocation-prim11-prim12-INFINITY" rsc="prim11" with-rsc="prim12" score="INFINITY" node-attribute="#uname"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
@@ -5053,12 +5053,12 @@ Resources prim11 is colocated with:
=#=#=#= Begin test: Recursively check locations and constraints for prim11 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -A -r prim11 --output-as=xml">
<constraints>
- <rsc_colocation id="colocation-prim13-prim11-INFINITY" rsc="prim13" with-rsc="prim11" score="INFINITY"/>
- <rsc_colocation id="colocation-prim12-prim13-INFINITY" rsc="prim12" with-rsc="prim13" score="INFINITY"/>
- <rsc_colocation id="colocation-prim11-prim12-INFINITY" rsc="prim11" with-rsc="prim12" score="INFINITY"/>
- <rsc_colocation id="colocation-prim11-prim12-INFINITY" rsc="prim11" with-rsc="prim12" score="INFINITY"/>
- <rsc_colocation id="colocation-prim12-prim13-INFINITY" rsc="prim12" with-rsc="prim13" score="INFINITY"/>
- <rsc_colocation id="colocation-prim13-prim11-INFINITY" rsc="prim13" with-rsc="prim11" score="INFINITY"/>
+ <rsc_colocation id="colocation-prim13-prim11-INFINITY" rsc="prim13" with-rsc="prim11" score="INFINITY" node-attribute="#uname"/>
+ <rsc_colocation id="colocation-prim12-prim13-INFINITY" rsc="prim12" with-rsc="prim13" score="INFINITY" node-attribute="#uname"/>
+ <rsc_colocation id="colocation-prim11-prim12-INFINITY" rsc="prim11" with-rsc="prim12" score="INFINITY" node-attribute="#uname"/>
+ <rsc_colocation id="colocation-prim11-prim12-INFINITY" rsc="prim11" with-rsc="prim12" score="INFINITY" node-attribute="#uname"/>
+ <rsc_colocation id="colocation-prim12-prim13-INFINITY" rsc="prim12" with-rsc="prim13" score="INFINITY" node-attribute="#uname"/>
+ <rsc_colocation id="colocation-prim13-prim11-INFINITY" rsc="prim13" with-rsc="prim11" score="INFINITY" node-attribute="#uname"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
@@ -5089,8 +5089,8 @@ Resources prim12 is colocated with:
=#=#=#= Begin test: Check locations and constraints for prim12 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -a -r prim12 --output-as=xml">
<constraints>
- <rsc_colocation id="colocation-prim11-prim12-INFINITY" rsc="prim11" with-rsc="prim12" score="INFINITY"/>
- <rsc_colocation id="colocation-prim12-prim13-INFINITY" rsc="prim12" with-rsc="prim13" score="INFINITY"/>
+ <rsc_colocation id="colocation-prim11-prim12-INFINITY" rsc="prim11" with-rsc="prim12" score="INFINITY" node-attribute="#uname"/>
+ <rsc_colocation id="colocation-prim12-prim13-INFINITY" rsc="prim12" with-rsc="prim13" score="INFINITY" node-attribute="#uname"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
@@ -5099,12 +5099,12 @@ Resources prim12 is colocated with:
=#=#=#= Begin test: Recursively check locations and constraints for prim12 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -A -r prim12 --output-as=xml">
<constraints>
- <rsc_colocation id="colocation-prim11-prim12-INFINITY" rsc="prim11" with-rsc="prim12" score="INFINITY"/>
- <rsc_colocation id="colocation-prim13-prim11-INFINITY" rsc="prim13" with-rsc="prim11" score="INFINITY"/>
- <rsc_colocation id="colocation-prim12-prim13-INFINITY" rsc="prim12" with-rsc="prim13" score="INFINITY"/>
- <rsc_colocation id="colocation-prim12-prim13-INFINITY" rsc="prim12" with-rsc="prim13" score="INFINITY"/>
- <rsc_colocation id="colocation-prim13-prim11-INFINITY" rsc="prim13" with-rsc="prim11" score="INFINITY"/>
- <rsc_colocation id="colocation-prim11-prim12-INFINITY" rsc="prim11" with-rsc="prim12" score="INFINITY"/>
+ <rsc_colocation id="colocation-prim11-prim12-INFINITY" rsc="prim11" with-rsc="prim12" score="INFINITY" node-attribute="#uname"/>
+ <rsc_colocation id="colocation-prim13-prim11-INFINITY" rsc="prim13" with-rsc="prim11" score="INFINITY" node-attribute="#uname"/>
+ <rsc_colocation id="colocation-prim12-prim13-INFINITY" rsc="prim12" with-rsc="prim13" score="INFINITY" node-attribute="#uname"/>
+ <rsc_colocation id="colocation-prim12-prim13-INFINITY" rsc="prim12" with-rsc="prim13" score="INFINITY" node-attribute="#uname"/>
+ <rsc_colocation id="colocation-prim13-prim11-INFINITY" rsc="prim13" with-rsc="prim11" score="INFINITY" node-attribute="#uname"/>
+ <rsc_colocation id="colocation-prim11-prim12-INFINITY" rsc="prim11" with-rsc="prim12" score="INFINITY" node-attribute="#uname"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
@@ -5135,8 +5135,8 @@ Resources prim13 is colocated with:
=#=#=#= Begin test: Check locations and constraints for prim13 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -a -r prim13 --output-as=xml">
<constraints>
- <rsc_colocation id="colocation-prim12-prim13-INFINITY" rsc="prim12" with-rsc="prim13" score="INFINITY"/>
- <rsc_colocation id="colocation-prim13-prim11-INFINITY" rsc="prim13" with-rsc="prim11" score="INFINITY"/>
+ <rsc_colocation id="colocation-prim12-prim13-INFINITY" rsc="prim12" with-rsc="prim13" score="INFINITY" node-attribute="#uname"/>
+ <rsc_colocation id="colocation-prim13-prim11-INFINITY" rsc="prim13" with-rsc="prim11" score="INFINITY" node-attribute="#uname"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
@@ -5145,12 +5145,12 @@ Resources prim13 is colocated with:
=#=#=#= Begin test: Recursively check locations and constraints for prim13 in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -A -r prim13 --output-as=xml">
<constraints>
- <rsc_colocation id="colocation-prim12-prim13-INFINITY" rsc="prim12" with-rsc="prim13" score="INFINITY"/>
- <rsc_colocation id="colocation-prim11-prim12-INFINITY" rsc="prim11" with-rsc="prim12" score="INFINITY"/>
- <rsc_colocation id="colocation-prim13-prim11-INFINITY" rsc="prim13" with-rsc="prim11" score="INFINITY"/>
- <rsc_colocation id="colocation-prim13-prim11-INFINITY" rsc="prim13" with-rsc="prim11" score="INFINITY"/>
- <rsc_colocation id="colocation-prim11-prim12-INFINITY" rsc="prim11" with-rsc="prim12" score="INFINITY"/>
- <rsc_colocation id="colocation-prim12-prim13-INFINITY" rsc="prim12" with-rsc="prim13" score="INFINITY"/>
+ <rsc_colocation id="colocation-prim12-prim13-INFINITY" rsc="prim12" with-rsc="prim13" score="INFINITY" node-attribute="#uname"/>
+ <rsc_colocation id="colocation-prim11-prim12-INFINITY" rsc="prim11" with-rsc="prim12" score="INFINITY" node-attribute="#uname"/>
+ <rsc_colocation id="colocation-prim13-prim11-INFINITY" rsc="prim13" with-rsc="prim11" score="INFINITY" node-attribute="#uname"/>
+ <rsc_colocation id="colocation-prim13-prim11-INFINITY" rsc="prim13" with-rsc="prim11" score="INFINITY" node-attribute="#uname"/>
+ <rsc_colocation id="colocation-prim11-prim12-INFINITY" rsc="prim11" with-rsc="prim12" score="INFINITY" node-attribute="#uname"/>
+ <rsc_colocation id="colocation-prim12-prim13-INFINITY" rsc="prim12" with-rsc="prim13" score="INFINITY" node-attribute="#uname"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
@@ -5169,7 +5169,7 @@ Resources colocated with group:
=#=#=#= Begin test: Check locations and constraints for group in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -a -r group --output-as=xml">
<constraints>
- <rsc_colocation id="colocation-prim7-group-INFINITY" rsc="prim7" with-rsc="group" score="INFINITY"/>
+ <rsc_colocation id="colocation-prim7-group-INFINITY" rsc="prim7" with-rsc="group" score="INFINITY" node-attribute="#uname"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
@@ -5178,7 +5178,7 @@ Resources colocated with group:
=#=#=#= Begin test: Recursively check locations and constraints for group in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -A -r group --output-as=xml">
<constraints>
- <rsc_colocation id="colocation-prim7-group-INFINITY" rsc="prim7" with-rsc="group" score="INFINITY"/>
+ <rsc_colocation id="colocation-prim7-group-INFINITY" rsc="prim7" with-rsc="group" score="INFINITY" node-attribute="#uname"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
@@ -5197,7 +5197,7 @@ Resources colocated with clone:
=#=#=#= Begin test: Check locations and constraints for clone in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -a -r clone --output-as=xml">
<constraints>
- <rsc_colocation id="colocation-prim9-clone-INFINITY" rsc="prim9" with-rsc="clone" score="INFINITY"/>
+ <rsc_colocation id="colocation-prim9-clone-INFINITY" rsc="prim9" with-rsc="clone" score="INFINITY" node-attribute="#uname"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
@@ -5206,7 +5206,7 @@ Resources colocated with clone:
=#=#=#= Begin test: Recursively check locations and constraints for clone in XML =#=#=#=
<pacemaker-result api-version="X" request="crm_resource -A -r clone --output-as=xml">
<constraints>
- <rsc_colocation id="colocation-prim9-clone-INFINITY" rsc="prim9" with-rsc="clone" score="INFINITY"/>
+ <rsc_colocation id="colocation-prim9-clone-INFINITY" rsc="prim9" with-rsc="clone" score="INFINITY" node-attribute="#uname"/>
</constraints>
<status code="0" message="OK"/>
</pacemaker-result>
@@ -5529,34 +5529,34 @@ export overcloud-rabbit-2=overcloud-rabbit-2
<node_weight function="pcmk__primitive_assign" node="httpd-bundle-0" score="-INFINITY" id="inactive-dummy-2"/>
<node_weight function="pcmk__primitive_assign" node="httpd-bundle-1" score="-INFINITY" id="inactive-dummy-2"/>
<node_weight function="pcmk__primitive_assign" node="httpd-bundle-2" score="-INFINITY" id="inactive-dummy-2"/>
- <node_weight function="pcmk__bundle_allocate" node="cluster01" score="0" id="httpd-bundle"/>
- <node_weight function="pcmk__bundle_allocate" node="cluster02" score="0" id="httpd-bundle"/>
- <node_weight function="pcmk__bundle_allocate" node="cluster01" score="0" id="httpd-bundle-docker-0"/>
- <node_weight function="pcmk__bundle_allocate" node="cluster02" score="0" id="httpd-bundle-docker-0"/>
- <node_weight function="pcmk__bundle_allocate" node="cluster01" score="0" id="httpd-bundle-ip-192.168.122.131"/>
- <node_weight function="pcmk__bundle_allocate" node="cluster02" score="0" id="httpd-bundle-ip-192.168.122.131"/>
- <node_weight function="pcmk__bundle_allocate" node="cluster01" score="0" id="httpd-bundle-0"/>
- <node_weight function="pcmk__bundle_allocate" node="cluster02" score="0" id="httpd-bundle-0"/>
- <node_weight function="pcmk__bundle_allocate" node="cluster01" score="0" id="httpd-bundle-docker-1"/>
- <node_weight function="pcmk__bundle_allocate" node="cluster02" score="0" id="httpd-bundle-docker-1"/>
- <node_weight function="pcmk__bundle_allocate" node="cluster01" score="0" id="httpd-bundle-ip-192.168.122.132"/>
- <node_weight function="pcmk__bundle_allocate" node="cluster02" score="0" id="httpd-bundle-ip-192.168.122.132"/>
- <node_weight function="pcmk__bundle_allocate" node="cluster01" score="0" id="httpd-bundle-1"/>
- <node_weight function="pcmk__bundle_allocate" node="cluster02" score="0" id="httpd-bundle-1"/>
- <node_weight function="pcmk__bundle_allocate" node="cluster01" score="0" id="httpd-bundle-docker-2"/>
- <node_weight function="pcmk__bundle_allocate" node="cluster02" score="0" id="httpd-bundle-docker-2"/>
- <node_weight function="pcmk__bundle_allocate" node="cluster01" score="0" id="httpd-bundle-ip-192.168.122.133"/>
- <node_weight function="pcmk__bundle_allocate" node="cluster02" score="0" id="httpd-bundle-ip-192.168.122.133"/>
- <node_weight function="pcmk__bundle_allocate" node="cluster01" score="0" id="httpd-bundle-2"/>
- <node_weight function="pcmk__bundle_allocate" node="cluster02" score="0" id="httpd-bundle-2"/>
- <node_weight function="pcmk__bundle_allocate" node="cluster01" score="0" id="httpd-bundle-clone"/>
- <node_weight function="pcmk__bundle_allocate" node="cluster02" score="0" id="httpd-bundle-clone"/>
- <node_weight function="pcmk__bundle_allocate" node="httpd-bundle-0" score="-INFINITY" id="httpd-bundle-clone"/>
- <node_weight function="pcmk__bundle_allocate" node="httpd-bundle-1" score="-INFINITY" id="httpd-bundle-clone"/>
- <node_weight function="pcmk__bundle_allocate" node="httpd-bundle-2" score="-INFINITY" id="httpd-bundle-clone"/>
- <node_weight function="pcmk__bundle_allocate" node="httpd-bundle-0" score="501" id="httpd:0"/>
- <node_weight function="pcmk__bundle_allocate" node="httpd-bundle-1" score="501" id="httpd:1"/>
- <node_weight function="pcmk__bundle_allocate" node="httpd-bundle-2" score="500" id="httpd:2"/>
+ <node_weight function="pcmk__bundle_assign" node="cluster01" score="0" id="httpd-bundle"/>
+ <node_weight function="pcmk__bundle_assign" node="cluster02" score="0" id="httpd-bundle"/>
+ <node_weight function="pcmk__bundle_assign" node="cluster01" score="0" id="httpd-bundle-docker-0"/>
+ <node_weight function="pcmk__bundle_assign" node="cluster02" score="0" id="httpd-bundle-docker-0"/>
+ <node_weight function="pcmk__bundle_assign" node="cluster01" score="0" id="httpd-bundle-ip-192.168.122.131"/>
+ <node_weight function="pcmk__bundle_assign" node="cluster02" score="0" id="httpd-bundle-ip-192.168.122.131"/>
+ <node_weight function="pcmk__bundle_assign" node="cluster01" score="0" id="httpd-bundle-0"/>
+ <node_weight function="pcmk__bundle_assign" node="cluster02" score="0" id="httpd-bundle-0"/>
+ <node_weight function="pcmk__bundle_assign" node="cluster01" score="0" id="httpd-bundle-docker-1"/>
+ <node_weight function="pcmk__bundle_assign" node="cluster02" score="0" id="httpd-bundle-docker-1"/>
+ <node_weight function="pcmk__bundle_assign" node="cluster01" score="0" id="httpd-bundle-ip-192.168.122.132"/>
+ <node_weight function="pcmk__bundle_assign" node="cluster02" score="0" id="httpd-bundle-ip-192.168.122.132"/>
+ <node_weight function="pcmk__bundle_assign" node="cluster01" score="0" id="httpd-bundle-1"/>
+ <node_weight function="pcmk__bundle_assign" node="cluster02" score="0" id="httpd-bundle-1"/>
+ <node_weight function="pcmk__bundle_assign" node="cluster01" score="0" id="httpd-bundle-docker-2"/>
+ <node_weight function="pcmk__bundle_assign" node="cluster02" score="0" id="httpd-bundle-docker-2"/>
+ <node_weight function="pcmk__bundle_assign" node="cluster01" score="0" id="httpd-bundle-ip-192.168.122.133"/>
+ <node_weight function="pcmk__bundle_assign" node="cluster02" score="0" id="httpd-bundle-ip-192.168.122.133"/>
+ <node_weight function="pcmk__bundle_assign" node="cluster01" score="0" id="httpd-bundle-2"/>
+ <node_weight function="pcmk__bundle_assign" node="cluster02" score="0" id="httpd-bundle-2"/>
+ <node_weight function="pcmk__bundle_assign" node="cluster01" score="0" id="httpd-bundle-clone"/>
+ <node_weight function="pcmk__bundle_assign" node="cluster02" score="0" id="httpd-bundle-clone"/>
+ <node_weight function="pcmk__bundle_assign" node="httpd-bundle-0" score="-INFINITY" id="httpd-bundle-clone"/>
+ <node_weight function="pcmk__bundle_assign" node="httpd-bundle-1" score="-INFINITY" id="httpd-bundle-clone"/>
+ <node_weight function="pcmk__bundle_assign" node="httpd-bundle-2" score="-INFINITY" id="httpd-bundle-clone"/>
+ <node_weight function="pcmk__bundle_assign" node="httpd-bundle-0" score="501" id="httpd:0"/>
+ <node_weight function="pcmk__bundle_assign" node="httpd-bundle-1" score="501" id="httpd:1"/>
+ <node_weight function="pcmk__bundle_assign" node="httpd-bundle-2" score="500" id="httpd:2"/>
<node_weight function="pcmk__primitive_assign" node="cluster01" score="0" id="httpd-bundle-docker-0"/>
<node_weight function="pcmk__primitive_assign" node="cluster02" score="0" id="httpd-bundle-docker-0"/>
<node_weight function="pcmk__primitive_assign" node="cluster01" score="-INFINITY" id="httpd-bundle-docker-1"/>
@@ -5711,26 +5711,26 @@ Original: cluster02 capacity:
Original: httpd-bundle-0 capacity:
Original: httpd-bundle-1 capacity:
Original: httpd-bundle-2 capacity:
-pcmk__finalize_assignment: ping:0 utilization on cluster02:
-pcmk__finalize_assignment: ping:1 utilization on cluster01:
-pcmk__finalize_assignment: Fencing utilization on cluster01:
-pcmk__finalize_assignment: dummy utilization on cluster02:
-pcmk__finalize_assignment: httpd-bundle-docker-0 utilization on cluster01:
-pcmk__finalize_assignment: httpd-bundle-docker-1 utilization on cluster02:
-pcmk__finalize_assignment: httpd-bundle-ip-192.168.122.131 utilization on cluster01:
-pcmk__finalize_assignment: httpd-bundle-0 utilization on cluster01:
-pcmk__finalize_assignment: httpd:0 utilization on httpd-bundle-0:
-pcmk__finalize_assignment: httpd-bundle-ip-192.168.122.132 utilization on cluster02:
-pcmk__finalize_assignment: httpd-bundle-1 utilization on cluster02:
-pcmk__finalize_assignment: httpd:1 utilization on httpd-bundle-1:
-pcmk__finalize_assignment: httpd-bundle-2 utilization on cluster01:
-pcmk__finalize_assignment: httpd:2 utilization on httpd-bundle-2:
-pcmk__finalize_assignment: Public-IP utilization on cluster02:
-pcmk__finalize_assignment: Email utilization on cluster02:
-pcmk__finalize_assignment: mysql-proxy:0 utilization on cluster02:
-pcmk__finalize_assignment: mysql-proxy:1 utilization on cluster01:
-pcmk__finalize_assignment: promotable-rsc:0 utilization on cluster02:
-pcmk__finalize_assignment: promotable-rsc:1 utilization on cluster01:
+pcmk__assign_resource: ping:0 utilization on cluster02:
+pcmk__assign_resource: ping:1 utilization on cluster01:
+pcmk__assign_resource: Fencing utilization on cluster01:
+pcmk__assign_resource: dummy utilization on cluster02:
+pcmk__assign_resource: httpd-bundle-docker-0 utilization on cluster01:
+pcmk__assign_resource: httpd-bundle-docker-1 utilization on cluster02:
+pcmk__assign_resource: httpd-bundle-ip-192.168.122.131 utilization on cluster01:
+pcmk__assign_resource: httpd-bundle-0 utilization on cluster01:
+pcmk__assign_resource: httpd:0 utilization on httpd-bundle-0:
+pcmk__assign_resource: httpd-bundle-ip-192.168.122.132 utilization on cluster02:
+pcmk__assign_resource: httpd-bundle-1 utilization on cluster02:
+pcmk__assign_resource: httpd:1 utilization on httpd-bundle-1:
+pcmk__assign_resource: httpd-bundle-2 utilization on cluster01:
+pcmk__assign_resource: httpd:2 utilization on httpd-bundle-2:
+pcmk__assign_resource: Public-IP utilization on cluster02:
+pcmk__assign_resource: Email utilization on cluster02:
+pcmk__assign_resource: mysql-proxy:0 utilization on cluster02:
+pcmk__assign_resource: mysql-proxy:1 utilization on cluster01:
+pcmk__assign_resource: promotable-rsc:0 utilization on cluster02:
+pcmk__assign_resource: promotable-rsc:1 utilization on cluster01:
Remaining: cluster01 capacity:
Remaining: cluster02 capacity:
Remaining: httpd-bundle-0 capacity:
@@ -5961,7 +5961,7 @@ Transition Summary:
* Move Public-IP ( cluster02 -> cluster01 )
* Move Email ( cluster02 -> cluster01 )
* Stop mysql-proxy:0 ( cluster02 ) due to node availability
- * Stop promotable-rsc:0 ( Promoted cluster02 ) due to node availability
+ * Stop promotable-rsc:0 ( Promoted cluster02 ) due to node availability
Executing Cluster Transition:
* Pseudo action: httpd-bundle-1_stop_0
@@ -7048,7 +7048,7 @@ Diff: +++ 1.4.1 (null)
<cib epoch="4" num_updates="1" admin_epoch="1"/>
</change-result>
</change>
- <change operation="modify" path="/cib/configuration/resources/primitive[@id='dummy']">
+ <change operation="modify" path="/cib/configuration/resources/primitive[@id=&apos;dummy&apos;]">
<change-list>
<change-attr name="description" operation="set" value="desc"/>
</change-list>
@@ -7667,7 +7667,7 @@ Diff: +++ 0.1.0 (null)
-- /cib/status/node_state[@id='1']
-- /cib/status/node_state[@id='httpd-bundle-0']
-- /cib/status/node_state[@id='httpd-bundle-1']
-+ /cib: @crm_feature_set=3.17.4, @num_updates=0, @admin_epoch=0
++ /cib: @crm_feature_set=3.19.0, @num_updates=0, @admin_epoch=0
-- /cib: @cib-last-written, @update-origin, @update-client, @update-user, @have-quorum, @dc-uuid
=#=#=#= End test: Get active shadow instance's diff (empty CIB) - Error occurred (1) =#=#=#=
* Passed: crm_shadow - Get active shadow instance's diff (empty CIB)
@@ -7679,29 +7679,29 @@ Diff: +++ 0.1.0 (null)
<source admin_epoch="1" epoch="1" num_updates="173"/>
<target admin_epoch="0" epoch="1" num_updates="0"/>
</version>
- <change operation="delete" path="/cib/configuration/crm_config/cluster_property_set[@id='cib-bootstrap-options']"/>
- <change operation="delete" path="/cib/configuration/nodes/node[@id='1']"/>
- <change operation="delete" path="/cib/configuration/nodes/node[@id='2']"/>
- <change operation="delete" path="/cib/configuration/resources/clone[@id='ping-clone']"/>
- <change operation="delete" path="/cib/configuration/resources/primitive[@id='Fencing']"/>
- <change operation="delete" path="/cib/configuration/resources/primitive[@id='dummy']"/>
- <change operation="delete" path="/cib/configuration/resources/clone[@id='inactive-clone']"/>
- <change operation="delete" path="/cib/configuration/resources/group[@id='inactive-group']"/>
- <change operation="delete" path="/cib/configuration/resources/bundle[@id='httpd-bundle']"/>
- <change operation="delete" path="/cib/configuration/resources/group[@id='exim-group']"/>
- <change operation="delete" path="/cib/configuration/resources/clone[@id='mysql-clone-group']"/>
- <change operation="delete" path="/cib/configuration/resources/clone[@id='promotable-clone']"/>
- <change operation="delete" path="/cib/configuration/constraints/rsc_location[@id='not-on-cluster1']"/>
- <change operation="delete" path="/cib/configuration/constraints/rsc_location[@id='loc-promotable-clone']"/>
+ <change operation="delete" path="/cib/configuration/crm_config/cluster_property_set[@id=&apos;cib-bootstrap-options&apos;]"/>
+ <change operation="delete" path="/cib/configuration/nodes/node[@id=&apos;1&apos;]"/>
+ <change operation="delete" path="/cib/configuration/nodes/node[@id=&apos;2&apos;]"/>
+ <change operation="delete" path="/cib/configuration/resources/clone[@id=&apos;ping-clone&apos;]"/>
+ <change operation="delete" path="/cib/configuration/resources/primitive[@id=&apos;Fencing&apos;]"/>
+ <change operation="delete" path="/cib/configuration/resources/primitive[@id=&apos;dummy&apos;]"/>
+ <change operation="delete" path="/cib/configuration/resources/clone[@id=&apos;inactive-clone&apos;]"/>
+ <change operation="delete" path="/cib/configuration/resources/group[@id=&apos;inactive-group&apos;]"/>
+ <change operation="delete" path="/cib/configuration/resources/bundle[@id=&apos;httpd-bundle&apos;]"/>
+ <change operation="delete" path="/cib/configuration/resources/group[@id=&apos;exim-group&apos;]"/>
+ <change operation="delete" path="/cib/configuration/resources/clone[@id=&apos;mysql-clone-group&apos;]"/>
+ <change operation="delete" path="/cib/configuration/resources/clone[@id=&apos;promotable-clone&apos;]"/>
+ <change operation="delete" path="/cib/configuration/constraints/rsc_location[@id=&apos;not-on-cluster1&apos;]"/>
+ <change operation="delete" path="/cib/configuration/constraints/rsc_location[@id=&apos;loc-promotable-clone&apos;]"/>
<change operation="delete" path="/cib/configuration/tags"/>
<change operation="delete" path="/cib/configuration/op_defaults"/>
- <change operation="delete" path="/cib/status/node_state[@id='2']"/>
- <change operation="delete" path="/cib/status/node_state[@id='1']"/>
- <change operation="delete" path="/cib/status/node_state[@id='httpd-bundle-0']"/>
- <change operation="delete" path="/cib/status/node_state[@id='httpd-bundle-1']"/>
+ <change operation="delete" path="/cib/status/node_state[@id=&apos;2&apos;]"/>
+ <change operation="delete" path="/cib/status/node_state[@id=&apos;1&apos;]"/>
+ <change operation="delete" path="/cib/status/node_state[@id=&apos;httpd-bundle-0&apos;]"/>
+ <change operation="delete" path="/cib/status/node_state[@id=&apos;httpd-bundle-1&apos;]"/>
<change operation="modify" path="/cib">
<change-list>
- <change-attr name="crm_feature_set" operation="set" value="3.17.4"/>
+ <change-attr name="crm_feature_set" operation="set" value="3.19.0"/>
<change-attr name="num_updates" operation="set" value="0"/>
<change-attr name="admin_epoch" operation="set" value="0"/>
<change-attr name="cib-last-written" operation="unset"/>
@@ -7898,3 +7898,95 @@ crm_shadow: Could not access shadow instance 'cts-cli': No such file or director
</pacemaker-result>
=#=#=#= End test: Switch to nonexistent shadow instance (force) (XML) - No such object (105) =#=#=#=
* Passed: crm_shadow - Switch to nonexistent shadow instance (force) (XML)
+=#=#=#= Begin test: Verify a file-specified invalid configuration, outputting as xml =#=#=#=
+<pacemaker-result api-version="X" request="crm_verify_invalid_bz.xml --output-as=xml">
+ <status code="78" message="Invalid configuration">
+ <errors>
+ <error>Resource test2:0 is of type systemd and therefore cannot be used as a promotable clone resource</error>
+ <error>Ignoring &lt;clone&gt; resource 'test2-clone' because configuration is invalid</error>
+ <error>crm_verify: Errors found during check: config not valid</error>
+ </errors>
+ </status>
+</pacemaker-result>
+=#=#=#= End test: Verify a file-specified invalid configuration, outputting as xml - Invalid configuration (78) =#=#=#=
+* Passed: crm_verify - Verify a file-specified invalid configuration, outputting as xml
+=#=#=#= Begin test: Verify another file-specified invalid configuration, outputting as xml =#=#=#=
+<pacemaker-result api-version="X" request="crm_verify_invalid_no_stonith.xml --output-as=xml">
+ <status code="78" message="Invalid configuration">
+ <errors>
+ <error>Resource start-up disabled since no STONITH resources have been defined</error>
+ <error>Either configure some or disable STONITH with the stonith-enabled option</error>
+ <error>NOTE: Clusters with shared data need STONITH to ensure data integrity</error>
+ <error>Node pcmk-1 is unclean but cannot be fenced</error>
+ <error>Node pcmk-2 is unclean but cannot be fenced</error>
+ <error>crm_verify: Errors found during check: config not valid</error>
+ </errors>
+ </status>
+</pacemaker-result>
+=#=#=#= End test: Verify another file-specified invalid configuration, outputting as xml - Invalid configuration (78) =#=#=#=
+* Passed: crm_verify - Verify another file-specified invalid configuration, outputting as xml
+=#=#=#= Begin test: Verbosely verify a file-specified invalid configuration, outputting as xml =#=#=#=
+unpack_config warning: Blind faith: not fencing unseen nodes
+<pacemaker-result api-version="X" request="crm_verify_invalid_bz.xml --output-as=xml --verbose">
+ <status code="78" message="Invalid configuration">
+ <errors>
+ <error>Resource test2:0 is of type systemd and therefore cannot be used as a promotable clone resource</error>
+ <error>Ignoring &lt;clone&gt; resource 'test2-clone' because configuration is invalid</error>
+ <error>crm_verify: Errors found during check: config not valid</error>
+ </errors>
+ </status>
+</pacemaker-result>
+=#=#=#= End test: Verbosely verify a file-specified invalid configuration, outputting as xml - Invalid configuration (78) =#=#=#=
+* Passed: crm_verify - Verbosely verify a file-specified invalid configuration, outputting as xml
+=#=#=#= Begin test: Verbosely verify another file-specified invalid configuration, outputting as xml =#=#=#=
+(cluster_status@status.c:113) warning: Fencing and resource management disabled due to lack of quorum
+<pacemaker-result api-version="X" request="crm_verify_invalid_no_stonith.xml --output-as=xml --verbose">
+ <status code="78" message="Invalid configuration">
+ <errors>
+ <error>Resource start-up disabled since no STONITH resources have been defined</error>
+ <error>Either configure some or disable STONITH with the stonith-enabled option</error>
+ <error>NOTE: Clusters with shared data need STONITH to ensure data integrity</error>
+ <error>Node pcmk-1 is unclean but cannot be fenced</error>
+ <error>Node pcmk-2 is unclean but cannot be fenced</error>
+ <error>crm_verify: Errors found during check: config not valid</error>
+ </errors>
+ </status>
+</pacemaker-result>
+=#=#=#= End test: Verbosely verify another file-specified invalid configuration, outputting as xml - Invalid configuration (78) =#=#=#=
+* Passed: crm_verify - Verbosely verify another file-specified invalid configuration, outputting as xml
+=#=#=#= Begin test: Verify a file-specified valid configuration, outputting as xml =#=#=#=
+<pacemaker-result api-version="X" request="crm_mon.xml --output-as=xml">
+ <status code="0" message="OK"/>
+</pacemaker-result>
+=#=#=#= End test: Verify a file-specified valid configuration, outputting as xml - OK (0) =#=#=#=
+* Passed: crm_verify - Verify a file-specified valid configuration, outputting as xml
+=#=#=#= Begin test: Verify a piped-in valid configuration, outputting as xml =#=#=#=
+<pacemaker-result api-version="X" request="crm_verify -p --output-as=xml">
+ <status code="0" message="OK"/>
+</pacemaker-result>
+=#=#=#= End test: Verify a piped-in valid configuration, outputting as xml - OK (0) =#=#=#=
+* Passed: cat - Verify a piped-in valid configuration, outputting as xml
+=#=#=#= Begin test: Verbosely verify a file-specified valid configuration, outputting as xml =#=#=#=
+<pacemaker-result api-version="X" request="crm_mon.xml --output-as=xml --verbose">
+ <status code="0" message="OK"/>
+</pacemaker-result>
+=#=#=#= End test: Verbosely verify a file-specified valid configuration, outputting as xml - OK (0) =#=#=#=
+* Passed: crm_verify - Verbosely verify a file-specified valid configuration, outputting as xml
+=#=#=#= Begin test: Verbosely verify a piped-in valid configuration, outputting as xml =#=#=#=
+<pacemaker-result api-version="X" request="crm_verify -p --output-as=xml --verbose">
+ <status code="0" message="OK"/>
+</pacemaker-result>
+=#=#=#= End test: Verbosely verify a piped-in valid configuration, outputting as xml - OK (0) =#=#=#=
+* Passed: cat - Verbosely verify a piped-in valid configuration, outputting as xml
+=#=#=#= Begin test: Verify a string-supplied valid configuration, outputting as xml =#=#=#=
+<pacemaker-result api-version="X" request="crm_feature_set=&quot;3.7.1&quot; transition-key=&quot;1:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx&quot; transition-magic=&quot;0:0;1:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx&quot; exit-reason=&quot;&quot; call-id=&quot;1&quot; rc-code=&quot;0&quot; op-status=&quot;0&quot; interval=&quot;0&quot; last-rc-change=&quot;1613491700&quot; exec-time=&quot;0&quot; queue-time=&quot;0&quot; op-digest=&quot;f2317cad3d54cec5d7d7aa7d0bf35cf8&quot;/&gt; &lt;/lrm_resource&gt; &lt;/lrm_resources&gt; &lt;/lrm&gt; &lt;/node_state&gt; &lt;/status&gt; &lt;/cib&gt;' --output-as=xml">
+ <status code="0" message="OK"/>
+</pacemaker-result>
+=#=#=#= End test: Verify a string-supplied valid configuration, outputting as xml - OK (0) =#=#=#=
+* Passed: crm_verify - Verify a string-supplied valid configuration, outputting as xml
+=#=#=#= Begin test: Verbosely verify a string-supplied valid configuration, outputting as xml =#=#=#=
+<pacemaker-result api-version="X" request="crm_feature_set=&quot;3.7.1&quot; transition-key=&quot;1:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx&quot; transition-magic=&quot;0:0;1:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx&quot; exit-reason=&quot;&quot; call-id=&quot;1&quot; rc-code=&quot;0&quot; op-status=&quot;0&quot; interval=&quot;0&quot; last-rc-change=&quot;1613491700&quot; exec-time=&quot;0&quot; queue-time=&quot;0&quot; op-digest=&quot;f2317cad3d54cec5d7d7aa7d0bf35cf8&quot;/&gt; &lt;/lrm_resource&gt; &lt;/lrm_resources&gt; &lt;/lrm&gt; &lt;/node_state&gt; &lt;/status&gt; &lt;/cib&gt;' --output-as=xml --verbose">
+ <status code="0" message="OK"/>
+</pacemaker-result>
+=#=#=#= End test: Verbosely verify a string-supplied valid configuration, outputting as xml - OK (0) =#=#=#=
+* Passed: crm_verify - Verbosely verify a string-supplied valid configuration, outputting as xml
diff --git a/cts/lab/cluster_test.in b/cts/cluster_test.in
index 1741b47..f5cb3e8 100755
--- a/cts/lab/cluster_test.in
+++ b/cts/cluster_test.in
@@ -172,4 +172,4 @@ printf "\nAll set to go for %d iterations!\n" "$CTS_numtests"
|| echo "+ To use a different configuration, remove ~/.cts and re-run cts (or edit it manually)."
echo Now paste the following command into this shell:
-echo "@PYTHON@ `dirname "$0"`/CTSlab.py -L \"$CTS_logfile\" --syslog-facility \"$CTS_logfacility\" --no-unsafe-tests --stack \"$CTS_stack\" $CTS_adv --at-boot \"$CTS_boot\" $cts_extra \"$CTS_numtests\" --nodes \"$CTS_node_list\""
+echo "@PYTHON@ `dirname "$0"`/cts-lab -L \"$CTS_logfile\" --syslog-facility \"$CTS_logfacility\" --no-unsafe-tests --stack \"$CTS_stack\" $CTS_adv --at-boot \"$CTS_boot\" $cts_extra \"$CTS_numtests\" --nodes \"$CTS_node_list\""
diff --git a/cts/cts-attrd.in b/cts/cts-attrd.in
index b7ad538..b594ac3 100644
--- a/cts/cts-attrd.in
+++ b/cts/cts-attrd.in
@@ -126,7 +126,15 @@ class AttributeTests(Tests):
test.add_cmd("attrd_updater", "--name AAA -B 111 -d 5 --output-as=xml")
test.add_cmd_check_stdout("attrd_updater", "--name AAA -Q --output-as=xml",
"name=\"AAA\" value=\"111\"")
- test.add_log_pattern(r"Setting AAA\[.*\] in instance_attributes: \(unset\) -> 111 | from .* with 5s write delay",
+ test.add_log_pattern(r"Setting AAA\[.*\] in instance_attributes: \(unset\) -> 111 \| from .* with 5s write delay",
+ regex=True)
+
+ test = self.new_test("set_attr_4",
+ "Update an attribute that does not exist with a delay")
+ test.add_cmd("attrd_updater", "--name BBB -U 999 -d 10 --output-as=xml")
+ test.add_cmd_check_stdout("attrd_updater", "--name BBB -Q --output-as=xml",
+ "name=\"BBB\" value=\"999\"")
+ test.add_log_pattern(r"Setting BBB\[.*\] in instance_attributes: \(unset\) -> 999 \| from .* with 10s write delay",
regex=True)
test = self.new_test("update_attr_1",
@@ -140,6 +148,13 @@ class AttributeTests(Tests):
test.add_log_pattern(r"Setting BBB\[.*\] in instance_attributes: 222 -> 333",
regex=True)
+ test = self.new_test("update_attr_2",
+ "Update an attribute using a delay other than its default")
+ test.add_cmd("attrd_updater", "--name BBB -U 777 -d 10 --output-as=xml")
+ test.add_cmd("attrd_updater", "--name BBB -U 888 -d 7 --output-as=xml")
+ test.add_log_pattern(r"Setting BBB\[.*\] in instance_attributes: 777 -> 888 \| from .* with 10s write delay",
+ regex=True)
+
test = self.new_test("update_attr_delay_1",
"Update the delay of an attribute that already exists")
test.add_cmd("attrd_updater", "--name BBB -U 222 --output-as=xml")
diff --git a/cts/cts-cli.in b/cts/cts-cli.in
index fdad002..f4cb7c3 100755
--- a/cts/cts-cli.in
+++ b/cts/cts-cli.in
@@ -1,6 +1,6 @@
#!@BASH_PATH@
#
-# Copyright 2008-2022 the Pacemaker project contributors
+# Copyright 2008-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -143,7 +143,7 @@ function _test_assert() {
target=$1; shift
validate=$1; shift
cib=$1; shift
- app=`echo "$cmd" | sed 's/\ .*//'`
+ app=$(echo "$cmd" | head -n 1 | sed 's/\ .*//')
printf "* Running: $app - $desc\n" 1>&2
printf "=#=#=#= Begin test: $desc =#=#=#=\n"
@@ -2289,6 +2289,53 @@ function test_tools() {
desc="Switch to nonexistent shadow instance (force) (XML)"
cmd="crm_shadow --switch $shadow --batch --force --output-as=xml"
test_assert_validate $CRM_EX_NOSUCH 0
+
+ CIB_file_invalid_1="$test_home/cli/crm_verify_invalid_bz.xml"
+ CIB_file_invalid_2="$test_home/cli/crm_verify_invalid_no_stonith.xml"
+
+ desc="Verify a file-specified invalid configuration, outputting as xml"
+ cmd="crm_verify --xml-file '$CIB_file_invalid_1' --output-as=xml"
+ test_assert_validate $CRM_EX_CONFIG 0
+
+ desc="Verify another file-specified invalid configuration, outputting as xml"
+ cmd="crm_verify --xml-file '$CIB_file_invalid_2' --output-as=xml"
+ test_assert_validate $CRM_EX_CONFIG 0
+
+ desc="Verbosely verify a file-specified invalid configuration, outputting as xml"
+ cmd="crm_verify --xml-file '$CIB_file_invalid_1' --output-as=xml --verbose"
+ test_assert_validate $CRM_EX_CONFIG 0
+
+ desc="Verbosely verify another file-specified invalid configuration, outputting as xml"
+ cmd="crm_verify --xml-file '$CIB_file_invalid_2' --output-as=xml --verbose"
+ test_assert_validate $CRM_EX_CONFIG 0
+
+ export CIB_file="$test_home/cli/crm_mon.xml"
+
+ desc="Verify a file-specified valid configuration, outputting as xml"
+ cmd="crm_verify --xml-file '$CIB_file' --output-as=xml"
+ test_assert_validate $CRM_EX_OK 0
+
+ desc="Verify a piped-in valid configuration, outputting as xml"
+ cmd="cat '$CIB_file' | crm_verify -p --output-as=xml"
+ test_assert_validate $CRM_EX_OK 0
+
+ desc="Verbosely verify a file-specified valid configuration, outputting as xml"
+ cmd="crm_verify --xml-file '$CIB_file' --output-as=xml --verbose"
+ test_assert_validate $CRM_EX_OK 0
+
+ desc="Verbosely verify a piped-in valid configuration, outputting as xml"
+ cmd="cat '$CIB_file' | crm_verify -p --output-as=xml --verbose"
+ test_assert_validate $CRM_EX_OK 0
+
+ CIB_file_contents=$(cat "$CIB_file")
+
+ desc="Verify a string-supplied valid configuration, outputting as xml"
+ cmd="crm_verify -X '$CIB_file_contents' --output-as=xml"
+ test_assert_validate $CRM_EX_OK 0
+
+ desc="Verbosely verify a string-supplied valid configuration, outputting as xml"
+ cmd="crm_verify -X '$CIB_file_contents' --output-as=xml --verbose"
+ test_assert_validate $CRM_EX_OK 0
unset CIB_file
unset CIB_shadow
@@ -3382,6 +3429,11 @@ function print_or_remove_file() {
rm -f "$TMPFILE"
else
echo " $TMPFILE"
+ if [ $verbose -ne 0 ]; then
+ echo "======================================================"
+ cat "$TMPFILE"
+ echo "======================================================"
+ fi
fi
}
diff --git a/cts/cts-lab.in b/cts/cts-lab.in
new file mode 100644
index 0000000..01bf9aa
--- /dev/null
+++ b/cts/cts-lab.in
@@ -0,0 +1,136 @@
+#!@PYTHON@
+""" Command-line interface to Pacemaker's Cluster Test Suite (CTS)
+"""
+
+# pylint doesn't like the module name "cts-lab" which is an invalid complaint for this file
+# This also disables various other invalid names - it thinks scenario and match are constants
+# that should have all caps names, and that cm and n are too short.
+# pylint: disable=invalid-name
+
+__copyright__ = "Copyright 2001-2023 the Pacemaker project contributors"
+__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
+
+import signal
+import sys
+
+from pacemaker._cts.CTS import CtsLab
+from pacemaker._cts.cmcorosync import Corosync2
+from pacemaker._cts.audits import audit_list
+from pacemaker._cts.logging import LogFactory
+from pacemaker._cts.scenarios import AllOnce, Boot, BootCluster, LeaveBooted, RandomTests, Sequence
+from pacemaker._cts.tests import test_list
+
+# These are globals so they can be used by the signal handler.
+scenario = None
+LogFactory().add_stderr()
+
+
+def sig_handler(signum, _frame):
+ """ Handle the given signal number """
+
+ LogFactory().log("Interrupted by signal %d" % signum)
+
+ if scenario:
+ scenario.summarize()
+
+ if signum == 15:
+ if scenario:
+ scenario.teardown()
+
+ sys.exit(1)
+
+
+def plural_s(n):
+ """ Return a string suffix depending on whether or not n is > 1 """
+
+ if n == 1:
+ return ""
+
+ return "S"
+
+
+if __name__ == '__main__':
+ environment = CtsLab(sys.argv[1:])
+ iters = environment["iterations"]
+ tests = []
+
+ # Set the signal handler
+ signal.signal(15, sig_handler)
+ signal.signal(10, sig_handler)
+
+ # Create the Cluster Manager object
+ cm = None
+
+ if environment["Stack"] == "corosync 2+":
+ cm = Corosync2()
+ else:
+ LogFactory().log("Unknown stack: %s" % environment["stack"])
+ sys.exit(1)
+
+ if environment["TruncateLog"]:
+ if environment["OutputFile"] is None:
+ LogFactory().log("Ignoring truncate request because no output file specified")
+ else:
+ LogFactory().log("Truncating %s" % environment["OutputFile"])
+
+ with open(environment["OutputFile"], "w", encoding="utf-8") as outputfile:
+ outputfile.truncate(0)
+
+ audits = audit_list(cm)
+
+ if environment["Listtests"]:
+ tests = test_list(cm, audits)
+ LogFactory().log("Total %d tests" % len(tests))
+
+ for test in tests:
+ LogFactory().log(test.name)
+
+ sys.exit(0)
+
+ elif len(environment["tests"]) == 0:
+ tests = test_list(cm, audits)
+
+ else:
+ chosen = environment["tests"]
+ for test_case in chosen:
+ match = None
+
+ for test in test_list(cm, audits):
+ if test.name == test_case:
+ match = test
+
+ if not match:
+ LogFactory().log("--choose: No applicable/valid tests chosen")
+ sys.exit(1)
+ else:
+ tests.append(match)
+
+ # Scenario selection
+ if environment["scenario"] == "all-once":
+ iters = len(tests)
+ scenario = AllOnce(cm, [ BootCluster(cm, environment) ], audits, tests)
+ elif environment["scenario"] == "sequence":
+ scenario = Sequence(cm, [ BootCluster(cm, environment) ], audits, tests)
+ elif environment["scenario"] == "boot":
+ scenario = Boot(cm, [ LeaveBooted(cm, environment)], audits, [])
+ else:
+ scenario = RandomTests(cm, [ BootCluster(cm, environment) ], audits, tests)
+
+ LogFactory().log(">>>>>>>>>>>>>>>> BEGINNING %r TEST%s" % (iters, plural_s(iters)))
+ LogFactory().log("Stack: %s (%s)" % (environment["Stack"], environment["Name"]))
+ LogFactory().log("Schema: %s" % environment["Schema"])
+ LogFactory().log("Scenario: %s" % scenario.__doc__)
+ LogFactory().log("CTS Exerciser: %s" % environment["cts-exerciser"])
+ LogFactory().log("CTS Logfile: %s" % environment["OutputFile"])
+ LogFactory().log("Random Seed: %s" % environment["RandSeed"])
+ LogFactory().log("Syslog variant: %s" % environment["syslogd"].strip())
+ LogFactory().log("System log files: %s" % environment["LogFileName"])
+
+ if "IPBase" in environment:
+ LogFactory().log("Base IP for resources: %s" % environment["IPBase"])
+
+ LogFactory().log("Cluster starts at boot: %d" % environment["at-boot"])
+
+ environment.dump()
+ rc = environment.run(scenario, iters)
+ sys.exit(rc)
diff --git a/cts/lab/cts-log-watcher.in b/cts/cts-log-watcher.in
index cee9c94..cee9c94 100644
--- a/cts/lab/cts-log-watcher.in
+++ b/cts/cts-log-watcher.in
diff --git a/cts/cts-scheduler.in b/cts/cts-scheduler.in
index ee0cb7b..50c32f6 100644
--- a/cts/cts-scheduler.in
+++ b/cts/cts-scheduler.in
@@ -80,6 +80,10 @@ TESTS = [
[ "group-dependents", "Account for the location preferences of things colocated with a group" ],
[ "group-stop-ordering", "Ensure blocked group member stop does not force other member stops" ],
[ "colocate-unmanaged-group", "Respect mandatory colocations even if earlier group member is unmanaged" ],
+ [
+ "coloc-with-inner-group-member",
+ "Consider explicit colocations with inner group members"
+ ],
],
[
[ "rsc_dep1", "Must not" ],
@@ -205,6 +209,7 @@ TESTS = [
[ "rule-int-parse-fail-default-str-no-match",
"Integer rule values fail to parse, default to string "
"comparison: no match" ],
+ [ "timeout-by-node", "Start timeout varies by node" ],
],
[
[ "order1", "Order start 1" ],
@@ -253,6 +258,18 @@ TESTS = [
[ "anti-colocation-promoted", "Organize order of actions for promoted resources in anti-colocations" ],
[ "anti-colocation-unpromoted", "Organize order of actions for unpromoted resources in anti-colocations" ],
[ "group-anticolocation", "Group with failed last member anti-colocated with another group" ],
+ [ "group-anticolocation-2",
+ "Group with failed last member anti-colocated with another sticky group"
+ ],
+ [ "group-anticolocation-3",
+ "Group with failed last member mandatorily anti-colocated with another group"
+ ],
+ [ "group-anticolocation-4",
+ "Group with failed last member anti-colocated without influence with another group"
+ ],
+ [ "group-anticolocation-5",
+ "Group with failed last member anti-colocated with another group (third node allowed)"
+ ],
[ "group-colocation-failure",
"Group with sole member failed, colocated with another group"
],
@@ -441,6 +458,39 @@ TESTS = [
[ "cloned-group", "Make sure only the correct number of cloned groups are started" ],
[ "cloned-group-stop", "Ensure stopping qpidd also stops glance and cinder" ],
[ "clone-no-shuffle", "Don't prioritize allocation of instances that must be moved" ],
+ [ "clone-recover-no-shuffle-1",
+ "Don't shuffle instances when starting a new primitive instance" ],
+ [ "clone-recover-no-shuffle-2",
+ "Don't shuffle instances when starting a new group instance" ],
+ [ "clone-recover-no-shuffle-3",
+ "Don't shuffle instances when starting a new bundle instance" ],
+ [ "clone-recover-no-shuffle-4",
+ "Don't shuffle instances when starting a new primitive instance with "
+ "location preference "],
+ [ "clone-recover-no-shuffle-5",
+ "Don't shuffle instances when starting a new group instance with "
+ "location preference" ],
+ [ "clone-recover-no-shuffle-6",
+ "Don't shuffle instances when starting a new bundle instance with "
+ "location preference" ],
+ [ "clone-recover-no-shuffle-7",
+ "Don't shuffle instances when starting a new primitive instance that "
+ "will be promoted" ],
+ [ "clone-recover-no-shuffle-8",
+ "Don't shuffle instances when starting a new group instance that "
+ "will be promoted " ],
+ [ "clone-recover-no-shuffle-9",
+ "Don't shuffle instances when starting a new bundle instance that "
+ "will be promoted " ],
+ [ "clone-recover-no-shuffle-10",
+ "Don't shuffle instances when starting a new primitive instance that "
+ "won't be promoted" ],
+ [ "clone-recover-no-shuffle-11",
+ "Don't shuffle instances when starting a new group instance that "
+ "won't be promoted " ],
+ [ "clone-recover-no-shuffle-12",
+ "Don't shuffle instances when starting a new bundle instance that "
+ "won't be promoted " ],
[ "clone-max-zero", "Orphan processing with clone-max=0" ],
[ "clone-anon-dup",
"Bug LF#2087 - Correctly parse the state of anonymous clones that are active more than once per node" ],
@@ -715,6 +765,8 @@ TESTS = [
"cl#5301 - respect order constraints when relevant resources are being probed" ],
[ "concurrent-fencing", "Allow performing fencing operations in parallel" ],
[ "priority-fencing-delay", "Delay fencing targeting the more significant node" ],
+ [ "pending-node-no-uname", "Do not fence a pending node that doesn't have an uname in node state yet" ],
+ [ "node-pending-timeout", "Fence a pending node that has reached `node-pending-timeout`" ],
],
[
[ "systemhealth1", "System Health () #1" ],
@@ -990,6 +1042,42 @@ TESTS = [
[ "bundle-replicas-change", "Change bundle from 1 replica to multiple" ],
[ "bundle-connection-with-container", "Don't move a container due to connection preferences" ],
[ "nested-remote-recovery", "Recover bundle's container hosted on remote node" ],
+ [ "bundle-promoted-location-1",
+ "Promotable bundle, positive location" ],
+ [ "bundle-promoted-location-2",
+ "Promotable bundle, negative location" ],
+ [ "bundle-promoted-location-3",
+ "Promotable bundle, positive location for promoted role" ],
+ [ "bundle-promoted-location-4",
+ "Promotable bundle, negative location for promoted role" ],
+ [ "bundle-promoted-location-5",
+ "Promotable bundle, positive location for unpromoted role" ],
+ [ "bundle-promoted-location-6",
+ "Promotable bundle, negative location for unpromoted role" ],
+ [ "bundle-promoted-colocation-1",
+ "Primary promoted bundle, dependent primitive (mandatory coloc)" ],
+ [ "bundle-promoted-colocation-2",
+ "Primary promoted bundle, dependent primitive (optional coloc)" ],
+ [ "bundle-promoted-colocation-3",
+ "Dependent promoted bundle, primary primitive (mandatory coloc)" ],
+ [ "bundle-promoted-colocation-4",
+ "Dependent promoted bundle, primary primitive (optional coloc)" ],
+ [ "bundle-promoted-colocation-5",
+ "Primary and dependent promoted bundle instances (mandatory coloc)" ],
+ [ "bundle-promoted-colocation-6",
+ "Primary and dependent promoted bundle instances (optional coloc)" ],
+ [ "bundle-promoted-anticolocation-1",
+ "Primary promoted bundle, dependent primitive (mandatory anti)" ],
+ [ "bundle-promoted-anticolocation-2",
+ "Primary promoted bundle, dependent primitive (optional anti)" ],
+ [ "bundle-promoted-anticolocation-3",
+ "Dependent promoted bundle, primary primitive (mandatory anti)" ],
+ [ "bundle-promoted-anticolocation-4",
+ "Dependent promoted bundle, primary primitive (optional anti)" ],
+ [ "bundle-promoted-anticolocation-5",
+ "Primary and dependent promoted bundle instances (mandatory anti)" ],
+ [ "bundle-promoted-anticolocation-6",
+ "Primary and dependent promoted bundle instances (optional anti)" ],
],
[
[ "whitebox-fail1", "Fail whitebox container rsc" ],
diff --git a/cts/cts.in b/cts/cts.in
new file mode 100755
index 0000000..24339aa
--- /dev/null
+++ b/cts/cts.in
@@ -0,0 +1,404 @@
+#!@BASH_PATH@
+#
+# Copyright 2012-2023 the Pacemaker project contributors
+#
+# The version control history for this file may have further details.
+#
+# This source code is licensed under the GNU General Public License version 2
+# or later (GPLv2+) WITHOUT ANY WARRANTY.
+#
+
+# e.g. /etc/sysconfig or /etc/default
+CONFIG_DIR=@CONFIGDIR@
+
+cts_root=`dirname $0`
+
+logfile=0
+summary=0
+verbose=0
+watch=0
+saved=0
+tests=""
+
+install=0
+clean=0
+kill=0
+run=0
+boot=0
+target=rhel-7
+cmd=""
+trace=""
+
+custom_log=""
+patterns="-e CTS:"
+
+function sed_in_place_remotely() {
+ cluster-helper -g $cluster_name -- cp -p "\"$1\"" "\"$1.sed\"" \&\& sed -e "\"$2\"" "\"$1\"" \> "\"$1.sed\"" \&\& mv "\"$1.sed\"" "\"$1\""
+}
+
+
+helpmsg=$(cat <<EOF
+Usage: %s [options] {[CMDS]}
+
+[--]help, -h show help screen and exit
+-x turn on debugging
+-a show relevant screen sessions and exit
+-c,-g CLUSTER_NAME set the cluster name
+-S show summary from the last CTS run
+-s show summary for the current log (see -l)
+-v increase verbosity
+-p (currently unused)
+-e PATTERN grep pattern to apply when 'summary' or 'watch' requested
+-l print the filename of the log that would be operated on
+-w continous (filtered) monitoring of the log file
+-f,-sf FILE show summary for the provided log
+-t TEST, [0-9]* add a test to the working set
+[--]kill request termination of cluster software
+[--]run request CTS run (passing remaining arguments through)
+[--]boot, start request CTS run (with --boot option)
+[--]clean request cleaning up after CTS run
+[--]install, --inst request installing packages to get ready to run CTS
+trace-ls, tls list traced functions
+trace-add, tadd FUNC add a function to the list of traced ones
+trace-rm, trm FUNC remove a function from the list of traced ones
+trace-set, tset FUNC set function(s) as the only to be traced
+(f|fedora|r|rhel).* specify target distro
+-- delimits tests that follow
+EOF
+)
+
+while true; do
+ case $1 in
+ -h|--help|help)
+ printf "${helpmsg}\n" "$0"
+ exit
+ ;;
+ -x)
+ set -x
+ shift
+ ;;
+ -a)
+ screen -ls | grep cts
+ exit 0
+ ;;
+ -c|-g)
+ cluster_name=$2
+ shift
+ shift
+ ;;
+ -S)
+ summary=1
+ saved=1
+ shift
+ ;;
+ -s)
+ summary=1
+ shift
+ ;;
+ -v)
+ verbose=`expr $verbose + 1`
+ shift
+ ;;
+ -p)
+ shift
+ ;;
+ -e)
+ patterns="$patterns -e `echo $2 | sed 's/ /\\\W/g'`"
+ shift
+ shift
+ ;;
+ -l)
+ logfile=1
+ shift
+ ;;
+ -w)
+ watch=1
+ shift
+ ;;
+ -f|-sf)
+ summary=1
+ custom_log=$2
+ shift
+ shift
+ ;;
+ -t)
+ tests="$tests $2"
+ shift
+ shift
+ ;;
+ [0-9]*)
+ tests="$tests $1"
+ shift
+ ;;
+ --kill|kill)
+ kill=1
+ shift
+ break
+ ;;
+ --run|run)
+ run=1
+ shift
+ break
+ ;;
+ --boot|boot|start)
+ boot=1
+ clean=1
+ shift
+ break
+ ;;
+ --clean|clean)
+ clean=1
+ shift
+ ;;
+ --inst|--install|install)
+ install=1
+ clean=1
+ shift
+ ;;
+ trace-ls|tls)
+ cmd=$1
+ shift
+ ;;
+ trace-add|tadd|trace-rm|trm|trace-set|tset)
+ cmd=$1
+ trace=$2
+ shift
+ shift
+ if [ -z "$trace" ]; then
+ printf "${helpmsg}\n" "$0"
+ exit
+ fi
+ ;;
+ f*)
+ target="fedora-`echo $1 | sed -e s/fedora// -e s/-// -e s/f//`"
+ shift
+ ;;
+ r|rhel)
+ target="rhel-7"
+ shift
+ ;;
+ r*)
+ target="rhel-`echo $1 | sed -e s/rhel// -e s/-// -e s/r//`"
+ shift
+ ;;
+ --)
+ shift
+ tests="$tests $*"
+ break
+ ;;
+ "")
+ break
+ ;;
+ *)
+ echo "Unknown argument: $1"
+ exit 1
+ ;;
+ esac
+done
+
+# Add the location of this script
+export PATH="$PATH:$cts_root"
+which cluster-helper &>/dev/null
+if [ $? != 0 ]; then
+ echo $0 needs the cluster-helper script to be in your path
+ exit 1
+fi
+
+which cluster-clean &>/dev/null
+if [ $? != 0 ]; then
+ echo $0 needs the cluster-clean script to be in your path
+ exit 1
+fi
+
+if [ "x$cluster_name" = x ] || [ "x$cluster_name" = xpick ]; then
+ clusters=`ls -1 ~/.dsh/group/[a-z]+[0-9] | sed s/.*group.// | tr '\n' ' ' `
+
+ echo "custom) interactively define a cluster"
+ for i in $clusters; do
+ echo "$i) `cluster-helper --list short -g $i`"
+ done
+
+ read -p "Choose a cluster [custom]: " cluster_name
+ echo
+fi
+
+if [ -z $cluster_name ]; then
+ cluster_name=custom
+fi
+
+
+case $cluster_name in
+ custom)
+ read -p "Cluster name: " cluster_name
+ read -p "Cluster hosts: " cluster_hosts
+ read -p "Cluster log file: " cluster_log
+ cluster-helper add -g "$cluster_name" -w "$cluster_hosts"
+ ;;
+ *)
+ cluster_hosts=`cluster-helper --list short -g $cluster_name`
+ cluster_log=~/cluster-$cluster_name.log
+ ;;
+esac
+
+# NOTES ABOUT THESE AWESOME REGULAR EXPRESSIONS:
+#
+# * We can't assume GNU sed. Unfortunately, + and * are GNU extensions. Thus,
+# we have to use {1,} for + and {0,} for *.
+# * You don't need to add an extra set of escaped quotes around the sed expression
+# arguments here - sed_in_place_remotely will do that for you.
+# * Only literal quotes need the triple backslashes. All other special characters
+# are fine with just a single one.
+# * sed needs a LOT of characters escaped - \, {, }, (, ), and | at least.
+
+if [ x$cmd != x ]; then
+ config="${CONFIG_DIR}/pacemaker"
+ case $cmd in
+ trace-ls|tls)
+ cluster-helper -g $cluster_name -- grep "^[[:space:]]*PCMK_trace_functions" $config
+ ;;
+ trace-add|tadd)
+ echo "Adding $trace to PCMK_trace_functions"
+ # Note that this only works if there's already a PCMK_trace_functions line.
+ # If there isn't one, create it with trace-set first.
+ #
+ # Match optional whitespace; then PCMK_trace_functions; then an equals
+ # surrounded by optional whitespace; then an optional quote; then whatever
+ # else (presumably, this is the list of previously traced functions with
+ # an optional trailing quote). Replace the entire line with
+ # PCMK_trace_functions=<new_fn>,<previous trailing line contents>
+ sed_in_place_remotely "$config" "s/^[ \t]\{0,\}PCMK_trace_functions[ \t]\{0,\}=[ \t]\{0,\}\(\\\"\{0,1\}\)\(.\{1,\}\)/PCMK_trace_functions=\1$trace,\2/"
+ ;;
+ trace-rm|trm)
+ echo "Removing $trace from PCMK_trace_functions"
+ # A bunch of simple regexes are easier to follow than one giant one.
+ # Look for $trace in the following places on any line containing
+ # PCMK_trace_functions near the beginning:
+ #
+ # (1) At the start of a list -
+ # Match one of a leading quote, or an equals followed by optional
+ # whitespace; then $trace; then a comma. Replace $trace with whatever
+ # came before it.
+ # (2) In the middle of a list -
+ # Match a comma; then $trace; then a comma. Replace $trace with a
+ # single comma.
+ # (3) At the end of a list -
+ # Match a comma; then $trace; then one of a quote, whitespace, or
+ # the EOL. Replace $trace with whatever came after it.
+ # (4) All by itself -
+ # Match one of a leading quote, whitespace, or equals followed by
+ # optional whitespace; then $trace; then one of a trailing quote,
+ # whitespace, or the EOL. Replace $trace with whatever came before
+ # and after it.
+ sed_in_place_remotely "$config" "/^[ \t]\{0,\}PCMK_trace_functions/ { \
+ s/\(\\\"\|=\|[ \t]\{1,\}\)$trace,/\1/ ; \
+ s/,$trace,/,/ ; \
+ s/,$trace\(\\\"\|[ \t]\{1,\}\|$\)/\1/ ; \
+ s/\(\\\"\|[ \t]\{1,\}\|=[ \t]\{0,\}\)$trace\(\\\"\|[ \t]\{1,\}\|$\)/\1\2/ }"
+ ;;
+ trace-set|tset)
+ echo "Setting PCMK_trace_functions to '$trace'"
+ # Do this in two separate sed commands:
+ #
+ # (1) Unconditionally remove any existing PCMK_trace_functions= lines.
+ # (2) Add a new line with $trace after the example line, which therefore
+ # must exist. Note that GNU sed would support "a PCMK_trace_functions=$trace",
+ # but that's an extension. For all other seds, we have to put the
+ # command and the text on separate lines.
+ sed_in_place_remotely "$config" "/^[ \t]*PCMK_trace_functions/ d ; /^# Example: PCMK_trace_functions/ a\\\
+PCMK_trace_functions=\\\"$trace\\\""
+ ;;
+ esac
+ exit 0
+fi
+
+if [ $run = 1 ]; then
+ install=1
+ clean=1
+fi
+
+if [ $clean = 1 ]; then
+ rm -f $cluster_log
+ cluster-clean -g $cluster_name --kill
+elif [ $kill = 1 ]; then
+ cluster-clean -g $cluster_name --kill-only
+ exit 0
+fi
+
+if [ $install = 1 ]; then
+ cluster-helper -g $cluster_name -- yum install -y pacemaker pacemaker-debuginfo pacemaker-cts libqb libqb-debuginfo
+fi
+
+if [ $boot = 1 ]; then
+ $cts_root/cts-lab -r -c -g $cluster_name --boot
+ rc=$?
+ if [ $rc = 0 ]; then
+ echo "The cluster is ready..."
+ fi
+ exit $rc
+
+elif [ $run = 1 ]; then
+ $cts_root/cts-lab -r -c -g $cluster_name 500 "$@"
+ exit $?
+
+elif [ $clean = 1 ]; then
+ exit 0
+fi
+
+screen -ls | grep cts-$cluster_name &>/dev/null
+active=$?
+
+if [ ! -z $custom_log ]; then
+ cluster_log=$custom_log
+fi
+
+if [ "x$tests" != x ] && [ "x$tests" != "x " ]; then
+ for t in $tests; do
+ echo "crm_report --cts-log $cluster_log -d -T $t"
+ crm_report --cts-log $cluster_log -d -T $t
+ done
+
+elif [ $logfile = 1 ]; then
+ echo $cluster_log
+
+elif [ $summary = 1 ]; then
+ files=$cluster_log
+ if [ $saved = 1 ]; then
+ files=`ls -1tr ~/CTS-*/cluster-log.txt`
+ fi
+ for f in $files; do
+ echo $f
+ case $verbose in
+ 0)
+ cat -n $f | grep $patterns | grep -v "CTS: debug:"
+ ;;
+ 1)
+ cat -n $f | grep $patterns | grep -v "CTS:.* cmd:"
+ ;;
+ *)
+ cat -n $f | grep $patterns
+ ;;
+ esac
+ echo ""
+ done
+
+elif [ $watch = 1 ]; then
+ case $verbose in
+ 0)
+ tail -F $cluster_log | grep $patterns | grep -v "CTS: debug:"
+ ;;
+ 1)
+ tail -F $cluster_log | grep $patterns | grep -v "CTS:.* cmd:"
+ ;;
+ *)
+ tail -F $cluster_log | grep $patterns
+ ;;
+ esac
+
+elif [ $active = 0 ]; then
+ screen -x cts-$cluster_name
+
+else
+ touch $cluster_log
+ export cluster_name cluster_hosts cluster_log
+ screen -S cts-$cluster_name bash
+fi
diff --git a/cts/lab/CIB.py b/cts/lab/CIB.py
deleted file mode 100644
index 5981654..0000000
--- a/cts/lab/CIB.py
+++ /dev/null
@@ -1,518 +0,0 @@
-""" CIB generator for Pacemaker's Cluster Test Suite (CTS)
-"""
-
-__copyright__ = "Copyright 2008-2023 the Pacemaker project contributors"
-__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
-
-import os
-import warnings
-import tempfile
-
-from pacemaker.buildoptions import BuildOptions
-from pacemaker._cts.CTS import CtsLab
-
-
-class CibBase(object):
- def __init__(self, Factory, tag, _id, **kwargs):
- self.tag = tag
- self.name = _id
- self.kwargs = kwargs
- self.children = []
- self.Factory = Factory
-
- def __repr__(self):
- return "%s-%s" % (self.tag, self.name)
-
- def add_child(self, child):
- self.children.append(child)
-
- def __setitem__(self, key, value):
- if value:
- self.kwargs[key] = value
- else:
- self.kwargs.pop(key, None)
-
-from cts.cib_xml import *
-
-
-class ConfigBase(object):
- cts_cib = None
- version = "unknown"
- Factory = None
-
- def __init__(self, CM, factory, tmpfile=None):
- self.CM = CM
- self.Factory = factory
-
- if not tmpfile:
- warnings.filterwarnings("ignore")
- f=tempfile.NamedTemporaryFile(delete=True)
- f.close()
- tmpfile = f.name
- warnings.resetwarnings()
-
- self.Factory.tmpfile = tmpfile
-
- def version(self):
- return self.version
-
- def NextIP(self):
- ip = self.CM.Env["IPBase"]
- if ":" in ip:
- (prefix, sep, suffix) = ip.rpartition(":")
- suffix = str(hex(int(suffix, 16)+1)).lstrip("0x")
- else:
- (prefix, sep, suffix) = ip.rpartition(".")
- suffix = str(int(suffix)+1)
-
- ip = prefix + sep + suffix
- self.CM.Env["IPBase"] = ip
- return ip.strip()
-
-
-class CIB12(ConfigBase):
- version = "pacemaker-1.2"
- counter = 1
-
- def _show(self, command=""):
- output = ""
- (_, result) = self.Factory.rsh(self.Factory.target, "HOME=/root CIB_file="+self.Factory.tmpfile+" cibadmin -Ql "+command, verbose=1)
- for line in result:
- output += line
- self.Factory.debug("Generated Config: "+line)
- return output
-
- def NewIP(self, name=None, standard="ocf"):
- if self.CM.Env["IPagent"] == "IPaddr2":
- ip = self.NextIP()
- if not name:
- if ":" in ip:
- (prefix, sep, suffix) = ip.rpartition(":")
- name = "r"+suffix
- else:
- name = "r"+ip
-
- r = Resource(self.Factory, name, self.CM.Env["IPagent"], standard)
- r["ip"] = ip
-
- if ":" in ip:
- r["cidr_netmask"] = "64"
- r["nic"] = "eth0"
- else:
- r["cidr_netmask"] = "32"
-
- else:
- if not name:
- name = "r%s%d" % (self.CM.Env["IPagent"], self.counter)
- self.counter = self.counter + 1
- r = Resource(self.Factory, name, self.CM.Env["IPagent"], standard)
-
- r.add_op("monitor", "5s")
- return r
-
- def get_node_id(self, node_name):
- """ Check the cluster configuration for a node ID. """
-
- # We can't account for every possible configuration,
- # so we only return a node ID if:
- # * The node is specified in /etc/corosync/corosync.conf
- # with "ring0_addr:" equal to node_name and "nodeid:"
- # explicitly specified.
- # In all other cases, we return 0.
- node_id = 0
-
- # awkward command: use } as record separator
- # so each corosync.conf "object" is one record;
- # match the "node {" record that has "ring0_addr: node_name";
- # then print the substring of that record after "nodeid:"
- (rc, output) = self.Factory.rsh(self.Factory.target,
- r"""awk -v RS="}" """
- r"""'/^(\s*nodelist\s*{)?\s*node\s*{.*(ring0_addr|name):\s*%s(\s+|$)/"""
- r"""{gsub(/.*nodeid:\s*/,"");gsub(/\s+.*$/,"");print}' %s"""
- % (node_name, BuildOptions.COROSYNC_CONFIG_FILE), verbose=1)
-
- if rc == 0 and len(output) == 1:
- try:
- node_id = int(output[0])
- except ValueError:
- node_id = 0
-
- return node_id
-
- def install(self, target):
- old = self.Factory.tmpfile
-
- # Force a rebuild
- self.cts_cib = None
-
- self.Factory.tmpfile = BuildOptions.CIB_DIR + "/cib.xml"
- self.contents(target)
- self.Factory.rsh(self.Factory.target, "chown " + BuildOptions.DAEMON_USER + " " + self.Factory.tmpfile)
-
- self.Factory.tmpfile = old
-
- def contents(self, target=None):
- # fencing resource
- if self.cts_cib:
- return self.cts_cib
-
- if target:
- self.Factory.target = target
-
- self.Factory.rsh(self.Factory.target, "HOME=/root cibadmin --empty %s > %s" % (self.version, self.Factory.tmpfile))
- self.num_nodes = len(self.CM.Env["nodes"])
-
- no_quorum = "stop"
- if self.num_nodes < 3:
- no_quorum = "ignore"
- self.Factory.log("Cluster only has %d nodes, configuring: no-quorum-policy=ignore" % self.num_nodes)
-
- # We don't need a nodes section unless we add attributes
- stn = None
-
- # Fencing resource
- # Define first so that the shell doesn't reject every update
- if self.CM.Env["DoFencing"]:
-
- # Define the "real" fencing device
- st = Resource(self.Factory, "Fencing", ""+self.CM.Env["stonith-type"], "stonith")
-
- # Set a threshold for unreliable stonith devices such as the vmware one
- st.add_meta("migration-threshold", "5")
- st.add_op("monitor", "120s", timeout="120s")
- st.add_op("stop", "0", timeout="60s")
- st.add_op("start", "0", timeout="60s")
-
- # For remote node tests, a cluster node is stopped and brought back up
- # as a remote node with the name "remote-OLDNAME". To allow fencing
- # devices to fence these nodes, create a list of all possible node names.
- all_node_names = [ prefix+n for n in self.CM.Env["nodes"] for prefix in ('', 'remote-') ]
-
- # Add all parameters specified by user
- entries = self.CM.Env["stonith-params"].split(',')
- for entry in entries:
- try:
- (name, value) = entry.split('=', 1)
- except ValueError:
- print("Warning: skipping invalid fencing parameter: %s" % entry)
- continue
-
- # Allow user to specify "all" as the node list, and expand it here
- if name in [ "hostlist", "pcmk_host_list" ] and value == "all":
- value = ' '.join(all_node_names)
-
- st[name] = value
-
- st.commit()
-
- # Test advanced fencing logic
- if True:
- stf_nodes = []
- stt_nodes = []
- attr_nodes = {}
-
- # Create the levels
- stl = FencingTopology(self.Factory)
- for node in self.CM.Env["nodes"]:
- # Remote node tests will rename the node
- remote_node = "remote-" + node
-
- # Randomly assign node to a fencing method
- ftype = self.CM.Env.random_gen.choice(["levels-and", "levels-or ", "broadcast "])
-
- # For levels-and, randomly choose targeting by node name or attribute
- by = ""
- if ftype == "levels-and":
- node_id = self.get_node_id(node)
- if node_id == 0 or self.CM.Env.random_gen.choice([True, False]):
- by = " (by name)"
- else:
- attr_nodes[node] = node_id
- by = " (by attribute)"
-
- self.CM.log(" - Using %s fencing for node: %s%s" % (ftype, node, by))
-
- if ftype == "levels-and":
- # If targeting by name, add a topology level for this node
- if node not in attr_nodes:
- stl.level(1, node, "FencingPass,Fencing")
-
- # Always target remote nodes by name, otherwise we would need to add
- # an attribute to the remote node only during remote tests (we don't
- # want nonexistent remote nodes showing up in the non-remote tests).
- # That complexity is not worth the effort.
- stl.level(1, remote_node, "FencingPass,Fencing")
-
- # Add the node (and its remote equivalent) to the list of levels-and nodes.
- stt_nodes.extend([node, remote_node])
-
- elif ftype == "levels-or ":
- for n in [ node, remote_node ]:
- stl.level(1, n, "FencingFail")
- stl.level(2, n, "Fencing")
- stf_nodes.extend([node, remote_node])
-
- # If any levels-and nodes were targeted by attribute,
- # create the attributes and a level for the attribute.
- if attr_nodes:
- stn = Nodes(self.Factory)
- for (node_name, node_id) in list(attr_nodes.items()):
- stn.add_node(node_name, node_id, { "cts-fencing" : "levels-and" })
- stl.level(1, None, "FencingPass,Fencing", "cts-fencing", "levels-and")
-
- # Create a Dummy agent that always passes for levels-and
- if len(stt_nodes):
- stt = Resource(self.Factory, "FencingPass", "fence_dummy", "stonith")
- stt["pcmk_host_list"] = " ".join(stt_nodes)
- # Wait this many seconds before doing anything, handy for letting disks get flushed too
- stt["random_sleep_range"] = "30"
- stt["mode"] = "pass"
- stt.commit()
-
- # Create a Dummy agent that always fails for levels-or
- if len(stf_nodes):
- stf = Resource(self.Factory, "FencingFail", "fence_dummy", "stonith")
- stf["pcmk_host_list"] = " ".join(stf_nodes)
- # Wait this many seconds before doing anything, handy for letting disks get flushed too
- stf["random_sleep_range"] = "30"
- stf["mode"] = "fail"
- stf.commit()
-
- # Now commit the levels themselves
- stl.commit()
-
- o = Option(self.Factory)
- o["stonith-enabled"] = self.CM.Env["DoFencing"]
- o["start-failure-is-fatal"] = "false"
- o["pe-input-series-max"] = "5000"
- o["shutdown-escalation"] = "5min"
- o["batch-limit"] = "10"
- o["dc-deadtime"] = "5s"
- o["no-quorum-policy"] = no_quorum
-
- if self.CM.Env["DoBSC"]:
- o["ident-string"] = "Linux-HA TEST configuration file - REMOVEME!!"
-
- o.commit()
-
- o = OpDefaults(self.Factory)
- o["timeout"] = "90s"
- o.commit()
-
- # Commit the nodes section if we defined one
- if stn is not None:
- stn.commit()
-
- # Add an alerts section if possible
- if self.Factory.rsh.exists_on_all(self.CM.Env["notification-agent"], self.CM.Env["nodes"]):
- alerts = Alerts(self.Factory)
- alerts.add_alert(self.CM.Env["notification-agent"],
- self.CM.Env["notification-recipient"])
- alerts.commit()
-
- # Add resources?
- if self.CM.Env["CIBResource"]:
- self.add_resources()
-
- if self.CM.cluster_monitor == 1:
- mon = Resource(self.Factory, "cluster_mon", "ocf", "ClusterMon", "pacemaker")
- mon.add_op("start", "0", requires="nothing")
- mon.add_op("monitor", "5s", requires="nothing")
- mon["update"] = "10"
- mon["extra_options"] = "-r -n"
- mon["user"] = "abeekhof"
- mon["htmlfile"] = "/suse/abeekhof/Export/cluster.html"
- mon.commit()
-
- #self._create('''location prefer-dc cluster_mon rule -INFINITY: \#is_dc eq false''')
-
- # generate cib
- self.cts_cib = self._show()
-
- if self.Factory.tmpfile != BuildOptions.CIB_DIR + "/cib.xml":
- self.Factory.rsh(self.Factory.target, "rm -f "+self.Factory.tmpfile)
-
- return self.cts_cib
-
- def add_resources(self):
- # Per-node resources
- for node in self.CM.Env["nodes"]:
- name = "rsc_"+node
- r = self.NewIP(name)
- r.prefer(node, "100")
- r.commit()
-
- # Migrator
- # Make this slightly sticky (since we have no other location constraints) to avoid relocation during Reattach
- m = Resource(self.Factory, "migrator","Dummy", "ocf", "pacemaker")
- m["passwd"] = "whatever"
- m.add_meta("resource-stickiness","1")
- m.add_meta("allow-migrate", "1")
- m.add_op("monitor", "P10S")
- m.commit()
-
- # Ping the test exerciser
- p = Resource(self.Factory, "ping-1","ping", "ocf", "pacemaker")
- p.add_op("monitor", "60s")
- p["host_list"] = self.CM.Env["cts-exerciser"]
- p["name"] = "connected"
- p["debug"] = "true"
-
- c = Clone(self.Factory, "Connectivity", p)
- c["globally-unique"] = "false"
- c.commit()
-
- # promotable clone resource
- s = Resource(self.Factory, "stateful-1", "Stateful", "ocf", "pacemaker")
- s.add_op("monitor", "15s", timeout="60s")
- s.add_op("monitor", "16s", timeout="60s", role="Promoted")
- ms = Clone(self.Factory, "promotable-1", s)
- ms["promotable"] = "true"
- ms["clone-max"] = self.num_nodes
- ms["clone-node-max"] = 1
- ms["promoted-max"] = 1
- ms["promoted-node-max"] = 1
-
- # Require connectivity to run the promotable clone
- r = Rule(self.Factory, "connected", "-INFINITY", op="or")
- r.add_child(Expression(self.Factory, "m1-connected-1", "connected", "lt", "1"))
- r.add_child(Expression(self.Factory, "m1-connected-2", "connected", "not_defined", None))
- ms.prefer("connected", rule=r)
-
- ms.commit()
-
- # Group Resource
- g = Group(self.Factory, "group-1")
- g.add_child(self.NewIP())
-
- if self.CM.Env["have_systemd"]:
- sysd = Resource(self.Factory, "petulant",
- "pacemaker-cts-dummyd@10", "service")
- sysd.add_op("monitor", "P10S")
- g.add_child(sysd)
- else:
- g.add_child(self.NewIP())
-
- g.add_child(self.NewIP())
-
- # Make group depend on the promotable clone
- g.after("promotable-1", first="promote", then="start")
- g.colocate("promotable-1", "INFINITY", withrole="Promoted")
-
- g.commit()
-
- # LSB resource
- lsb = Resource(self.Factory, "lsb-dummy", "LSBDummy", "lsb")
- lsb.add_op("monitor", "5s")
-
- # LSB with group
- lsb.after("group-1")
- lsb.colocate("group-1")
-
- lsb.commit()
-
-
-class CIB20(CIB12):
- version = "pacemaker-2.5"
-
-class CIB30(CIB12):
- version = "pacemaker-3.7"
-
-#class HASI(CIB10):
-# def add_resources(self):
-# # DLM resource
-# self._create('''primitive dlm ocf:pacemaker:controld op monitor interval=120s''')
-# self._create('''clone dlm-clone dlm meta globally-unique=false interleave=true''')
-
- # O2CB resource
-# self._create('''primitive o2cb ocf:ocfs2:o2cb op monitor interval=120s''')
-# self._create('''clone o2cb-clone o2cb meta globally-unique=false interleave=true''')
-# self._create('''colocation o2cb-with-dlm INFINITY: o2cb-clone dlm-clone''')
-# self._create('''order start-o2cb-after-dlm mandatory: dlm-clone o2cb-clone''')
-
-
-class ConfigFactory(object):
- def __init__(self, CM):
- self.CM = CM
- self.rsh = self.CM.rsh
- self.register("pacemaker12", CIB12, CM, self)
- self.register("pacemaker20", CIB20, CM, self)
- self.register("pacemaker30", CIB30, CM, self)
-# self.register("hae", HASI, CM, self)
- if not self.CM.Env["ListTests"]:
- self.target = self.CM.Env["nodes"][0]
- self.tmpfile = None
-
- def log(self, args):
- self.CM.log("cib: %s" % args)
-
- def debug(self, args):
- self.CM.debug("cib: %s" % args)
-
- def register(self, methodName, constructor, *args, **kargs):
- """register a constructor"""
- _args = [constructor]
- _args.extend(args)
- setattr(self, methodName, ConfigFactoryItem(*_args, **kargs))
-
- def unregister(self, methodName):
- """unregister a constructor"""
- delattr(self, methodName)
-
- def createConfig(self, name="pacemaker-1.0"):
- if name == "pacemaker-1.0":
- name = "pacemaker10";
- elif name == "pacemaker-1.2":
- name = "pacemaker12";
- elif name == "pacemaker-2.0":
- name = "pacemaker20";
- elif name.startswith("pacemaker-3."):
- name = "pacemaker30";
- elif name == "hasi":
- name = "hae";
-
- if hasattr(self, name):
- return getattr(self, name)()
- else:
- self.CM.log("Configuration variant '%s' is unknown. Defaulting to latest config" % name)
-
- return self.pacemaker30()
-
-
-class ConfigFactoryItem(object):
- def __init__(self, function, *args, **kargs):
- self._function = function
- self._args = args
- self._kargs = kargs
-
- def __call__(self, *args, **kargs):
- """call function"""
- _args = list(self._args)
- _args.extend(args)
- _kargs = self._kargs.copy()
- _kargs.update(kargs)
- return self._function(*_args,**_kargs)
-
-if __name__ == '__main__':
- """ Unit test (pass cluster node names as command line arguments) """
-
- import cts.CM_corosync
- import sys
-
- if len(sys.argv) < 2:
- print("Usage: %s <node> ..." % sys.argv[0])
- sys.exit(1)
-
- args = [
- "--nodes", " ".join(sys.argv[1:]),
- "--clobber-cib",
- "--populate-resources",
- "--stack", "corosync",
- "--test-ip-base", "fe80::1234:56:7890:1000",
- "--stonith", "rhcs",
- ]
- env = CtsLab(args)
- cm = CM_corosync.crm_corosync()
- CibFactory = ConfigFactory(cm)
- cib = CibFactory.createConfig("pacemaker-3.0")
- print(cib.contents())
diff --git a/cts/lab/CM_corosync.py b/cts/lab/CM_corosync.py
deleted file mode 100644
index dce7e98..0000000
--- a/cts/lab/CM_corosync.py
+++ /dev/null
@@ -1,60 +0,0 @@
-""" Corosync-specific class for Pacemaker's Cluster Test Suite (CTS)
-"""
-
-__copyright__ = "Copyright 2007-2023 the Pacemaker project contributors"
-__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
-
-from cts.ClusterManager import ClusterManager
-
-from pacemaker._cts.CTS import Process
-from pacemaker._cts.patterns import PatternSelector
-
-class crm_corosync(ClusterManager):
- '''
- Corosync version 2 cluster manager class
- '''
- def __init__(self, name=None):
- if not name: name="crm-corosync"
- ClusterManager.__init__(self)
-
- self.fullcomplist = {}
- self.templates = PatternSelector(self.name)
-
- def Components(self):
- complist = []
- if not len(list(self.fullcomplist.keys())):
- for c in [ "pacemaker-based", "pacemaker-controld", "pacemaker-attrd", "pacemaker-execd", "pacemaker-fenced" ]:
- self.fullcomplist[c] = Process(
- self, c,
- pats = self.templates.get_component(c),
- badnews_ignore = self.templates.get_component("%s-ignore" % c) +
- self.templates.get_component("common-ignore"))
-
- # the scheduler uses dc_pats instead of pats
- self.fullcomplist["pacemaker-schedulerd"] = Process(
- self, "pacemaker-schedulerd",
- dc_pats = self.templates.get_component("pacemaker-schedulerd"),
- badnews_ignore = self.templates.get_component("pacemaker-schedulerd-ignore") +
- self.templates.get_component("common-ignore"))
-
- # add (or replace) extra components
- self.fullcomplist["corosync"] = Process(
- self, "corosync",
- pats = self.templates.get_component("corosync"),
- badnews_ignore = self.templates.get_component("corosync-ignore") +
- self.templates.get_component("common-ignore")
- )
-
- # Processes running under valgrind can't be shot with "killall -9 processname",
- # so don't include them in the returned list
- vgrind = self.Env["valgrind-procs"].split()
- for key in list(self.fullcomplist.keys()):
- if self.Env["valgrind-tests"]:
- if key in vgrind:
- self.log("Filtering %s from the component list as it is being profiled by valgrind" % key)
- continue
- if key == "pacemaker-fenced" and not self.Env["DoFencing"]:
- continue
- complist.append(self.fullcomplist[key])
-
- return complist
diff --git a/cts/lab/CTSaudits.py b/cts/lab/CTSaudits.py
deleted file mode 100755
index 51a04f8..0000000
--- a/cts/lab/CTSaudits.py
+++ /dev/null
@@ -1,879 +0,0 @@
-""" Auditing classes for Pacemaker's Cluster Test Suite (CTS)
-"""
-
-__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
-__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
-
-import time, re, uuid
-
-from pacemaker.buildoptions import BuildOptions
-from pacemaker._cts.watcher import LogKind, LogWatcher
-
-class ClusterAudit(object):
-
- def __init__(self, cm):
- self.CM = cm
-
- def __call__(self):
- raise ValueError("Abstract Class member (__call__)")
-
- def is_applicable(self):
- '''Return TRUE if we are applicable in the current test configuration'''
- raise ValueError("Abstract Class member (is_applicable)")
- return 1
-
- def log(self, args):
- self.CM.log("audit: %s" % args)
-
- def debug(self, args):
- self.CM.debug("audit: %s" % args)
-
- def name(self):
- raise ValueError("Abstract Class member (name)")
-
-AllAuditClasses = [ ]
-
-
-class LogAudit(ClusterAudit):
-
- def name(self):
- return "LogAudit"
-
- def __init__(self, cm):
- self.CM = cm
-
- def RestartClusterLogging(self, nodes=None):
- if not nodes:
- nodes = self.CM.Env["nodes"]
-
- self.CM.debug("Restarting logging on: %s" % repr(nodes))
-
- for node in nodes:
- if self.CM.Env["have_systemd"]:
- (rc, _) = self.CM.rsh(node, "systemctl stop systemd-journald.socket")
- if rc != 0:
- self.CM.log ("ERROR: Cannot stop 'systemd-journald' on %s" % node)
-
- (rc, _) = self.CM.rsh(node, "systemctl start systemd-journald.service")
- if rc != 0:
- self.CM.log ("ERROR: Cannot start 'systemd-journald' on %s" % node)
-
- (rc, _) = self.CM.rsh(node, "service %s restart" % self.CM.Env["syslogd"])
- if rc != 0:
- self.CM.log ("ERROR: Cannot restart '%s' on %s" % (self.CM.Env["syslogd"], node))
-
- def _create_watcher(self, patterns, kind):
- watch = LogWatcher(self.CM.Env["LogFileName"], patterns,
- self.CM.Env["nodes"], kind, "LogAudit", 5,
- silent=True)
- watch.set_watch()
- return watch
-
- def TestLogging(self):
- patterns = []
- prefix = "Test message from"
- suffix = str(uuid.uuid4())
- watch = {}
-
- for node in self.CM.Env["nodes"]:
- # Look for the node name in two places to make sure
- # that syslog is logging with the correct hostname
- m = re.search("^([^.]+).*", node)
- if m:
- simple = m.group(1)
- else:
- simple = node
- patterns.append("%s.*%s %s %s" % (simple, prefix, node, suffix))
-
- watch_pref = self.CM.Env["LogWatcher"]
- if watch_pref == LogKind.ANY:
- kinds = [ LogKind.FILE ]
- if self.CM.Env["have_systemd"]:
- kinds += [ LogKind.JOURNAL ]
- kinds += [ LogKind.REMOTE_FILE ]
- for k in kinds:
- watch[k] = self._create_watcher(patterns, k)
- self.CM.log("Logging test message with identifier %s" % (suffix))
- else:
- watch[watch_pref] = self._create_watcher(patterns, watch_pref)
-
- for node in self.CM.Env["nodes"]:
- cmd = "logger -p %s.info %s %s %s" % (self.CM.Env["SyslogFacility"], prefix, node, suffix)
-
- (rc, _) = self.CM.rsh(node, cmd, synchronous=False, verbose=0)
- if rc != 0:
- self.CM.log ("ERROR: Cannot execute remote command [%s] on %s" % (cmd, node))
-
- for k in list(watch.keys()):
- w = watch[k]
- if watch_pref == LogKind.ANY:
- self.CM.log("Checking for test message in %s logs" % (k))
- w.look_for_all(silent=True)
- if w.unmatched:
- for regex in w.unmatched:
- self.CM.log("Test message [%s] not found in %s logs" % (regex, w.kind))
- else:
- if watch_pref == LogKind.ANY:
- self.CM.log("Found test message in %s logs" % (k))
- self.CM.Env["LogWatcher"] = k
- return 1
-
- return 0
-
- def __call__(self):
- max = 3
- attempt = 0
-
- self.CM.ns.wait_for_all_nodes(self.CM.Env["nodes"])
- while attempt <= max and self.TestLogging() == 0:
- attempt = attempt + 1
- self.RestartClusterLogging()
- time.sleep(60*attempt)
-
- if attempt > max:
- self.CM.log("ERROR: Cluster logging unrecoverable.")
- return 0
-
- return 1
-
- def is_applicable(self):
- if self.CM.Env["DoBSC"]:
- return 0
- if self.CM.Env["LogAuditDisabled"]:
- return 0
- return 1
-
-
-class DiskAudit(ClusterAudit):
-
- def name(self):
- return "DiskspaceAudit"
-
- def __init__(self, cm):
- self.CM = cm
-
- def __call__(self):
- result = 1
- # @TODO Use directory of PCMK_logfile if set on host
- dfcmd = "df -BM " + BuildOptions.LOG_DIR + " | tail -1 | awk '{print $(NF-1)\" \"$(NF-2)}' | tr -d 'M%'"
-
- self.CM.ns.wait_for_all_nodes(self.CM.Env["nodes"])
- for node in self.CM.Env["nodes"]:
- (_, dfout) = self.CM.rsh(node, dfcmd, verbose=1)
- if not dfout:
- self.CM.log ("ERROR: Cannot execute remote df command [%s] on %s" % (dfcmd, node))
- else:
- dfout = dfout[0].strip()
-
- try:
- (used, remain) = dfout.split()
- used_percent = int(used)
- remaining_mb = int(remain)
- except (ValueError, TypeError):
- self.CM.log("Warning: df output '%s' from %s was invalid [%s, %s]"
- % (dfout, node, used, remain))
- else:
- if remaining_mb < 10 or used_percent > 95:
- self.CM.log("CRIT: Out of log disk space on %s (%d%% / %dMB)"
- % (node, used_percent, remaining_mb))
- result = None
- if self.CM.Env["continue"]:
- answer = "Y"
- else:
- try:
- answer = input('Continue? [nY]')
- except EOFError as e:
- answer = "n"
-
- if answer and answer == "n":
- raise ValueError("Disk full on %s" % (node))
-
- elif remaining_mb < 100 or used_percent > 90:
- self.CM.log("WARN: Low on log disk space (%dMB) on %s" % (remaining_mb, node))
- return result
-
- def is_applicable(self):
- if self.CM.Env["DoBSC"]:
- return 0
- return 1
-
-
-class FileAudit(ClusterAudit):
-
- def name(self):
- return "FileAudit"
-
- def __init__(self, cm):
- self.CM = cm
- self.known = []
-
- def __call__(self):
- result = 1
-
- self.CM.ns.wait_for_all_nodes(self.CM.Env["nodes"])
- for node in self.CM.Env["nodes"]:
-
- (_, lsout) = self.CM.rsh(node, "ls -al /var/lib/pacemaker/cores/* | grep core.[0-9]", verbose=1)
- for line in lsout:
- line = line.strip()
- if line not in self.known:
- result = 0
- self.known.append(line)
- self.CM.log("Warning: Pacemaker core file on %s: %s" % (node, line))
-
- (_, lsout) = self.CM.rsh(node, "ls -al /var/lib/corosync | grep core.[0-9]", verbose=1)
- for line in lsout:
- line = line.strip()
- if line not in self.known:
- result = 0
- self.known.append(line)
- self.CM.log("Warning: Corosync core file on %s: %s" % (node, line))
-
- if node in self.CM.ShouldBeStatus and self.CM.ShouldBeStatus[node] == "down":
- clean = 0
- (_, lsout) = self.CM.rsh(node, "ls -al /dev/shm | grep qb-", verbose=1)
- for line in lsout:
- result = 0
- clean = 1
- self.CM.log("Warning: Stale IPC file on %s: %s" % (node, line))
-
- if clean:
- (_, lsout) = self.CM.rsh(node, "ps axf | grep -e pacemaker -e corosync", verbose=1)
- for line in lsout:
- self.CM.debug("ps[%s]: %s" % (node, line))
-
- self.CM.rsh(node, "rm -rf /dev/shm/qb-*")
-
- else:
- self.CM.debug("Skipping %s" % node)
-
- return result
-
- def is_applicable(self):
- return 1
-
-
-class AuditResource(object):
- def __init__(self, cm, line):
- fields = line.split()
- self.CM = cm
- self.line = line
- self.type = fields[1]
- self.id = fields[2]
- self.clone_id = fields[3]
- self.parent = fields[4]
- self.rprovider = fields[5]
- self.rclass = fields[6]
- self.rtype = fields[7]
- self.host = fields[8]
- self.needs_quorum = fields[9]
- self.flags = int(fields[10])
- self.flags_s = fields[11]
-
- if self.parent == "NA":
- self.parent = None
-
- def unique(self):
- if self.flags & int("0x00000020", 16):
- return 1
- return 0
-
- def orphan(self):
- if self.flags & int("0x00000001", 16):
- return 1
- return 0
-
- def managed(self):
- if self.flags & int("0x00000002", 16):
- return 1
- return 0
-
-
-class AuditConstraint(object):
- def __init__(self, cm, line):
- fields = line.split()
- self.CM = cm
- self.line = line
- self.type = fields[1]
- self.id = fields[2]
- self.rsc = fields[3]
- self.target = fields[4]
- self.score = fields[5]
- self.rsc_role = fields[6]
- self.target_role = fields[7]
-
- if self.rsc_role == "NA":
- self.rsc_role = None
- if self.target_role == "NA":
- self.target_role = None
-
-
-class PrimitiveAudit(ClusterAudit):
- def name(self):
- return "PrimitiveAudit"
-
- def __init__(self, cm):
- self.CM = cm
-
- def doResourceAudit(self, resource, quorum):
- rc = 1
- active = self.CM.ResourceLocation(resource.id)
-
- if len(active) == 1:
- if quorum:
- self.debug("Resource %s active on %s" % (resource.id, repr(active)))
-
- elif resource.needs_quorum == 1:
- self.CM.log("Resource %s active without quorum: %s"
- % (resource.id, repr(active)))
- rc = 0
-
- elif not resource.managed():
- self.CM.log("Resource %s not managed. Active on %s"
- % (resource.id, repr(active)))
-
- elif not resource.unique():
- # TODO: Figure out a clever way to actually audit these resource types
- if len(active) > 1:
- self.debug("Non-unique resource %s is active on: %s"
- % (resource.id, repr(active)))
- else:
- self.debug("Non-unique resource %s is not active" % resource.id)
-
- elif len(active) > 1:
- self.CM.log("Resource %s is active multiple times: %s"
- % (resource.id, repr(active)))
- rc = 0
-
- elif resource.orphan():
- self.debug("Resource %s is an inactive orphan" % resource.id)
-
- elif len(self.inactive_nodes) == 0:
- self.CM.log("WARN: Resource %s not served anywhere" % resource.id)
- rc = 0
-
- elif self.CM.Env["warn-inactive"]:
- if quorum or not resource.needs_quorum:
- self.CM.log("WARN: Resource %s not served anywhere (Inactive nodes: %s)"
- % (resource.id, repr(self.inactive_nodes)))
- else:
- self.debug("Resource %s not served anywhere (Inactive nodes: %s)"
- % (resource.id, repr(self.inactive_nodes)))
-
- elif quorum or not resource.needs_quorum:
- self.debug("Resource %s not served anywhere (Inactive nodes: %s)"
- % (resource.id, repr(self.inactive_nodes)))
-
- return rc
-
- def setup(self):
- self.target = None
- self.resources = []
- self.constraints = []
- self.active_nodes = []
- self.inactive_nodes = []
-
- for node in self.CM.Env["nodes"]:
- if self.CM.ShouldBeStatus[node] == "up":
- self.active_nodes.append(node)
- else:
- self.inactive_nodes.append(node)
-
- for node in self.CM.Env["nodes"]:
- if self.target == None and self.CM.ShouldBeStatus[node] == "up":
- self.target = node
-
- if not self.target:
- # TODO: In Pacemaker 1.0 clusters we'll be able to run crm_resource
- # with CIB_file=/path/to/cib.xml even when the cluster isn't running
- self.debug("No nodes active - skipping %s" % self.name())
- return 0
-
- (_, lines) = self.CM.rsh(self.target, "crm_resource -c", verbose=1)
-
- for line in lines:
- if re.search("^Resource", line):
- self.resources.append(AuditResource(self.CM, line))
- elif re.search("^Constraint", line):
- self.constraints.append(AuditConstraint(self.CM, line))
- else:
- self.CM.log("Unknown entry: %s" % line);
-
- return 1
-
- def __call__(self):
- rc = 1
-
- if not self.setup():
- return 1
-
- quorum = self.CM.HasQuorum(None)
- for resource in self.resources:
- if resource.type == "primitive":
- if self.doResourceAudit(resource, quorum) == 0:
- rc = 0
- return rc
-
- def is_applicable(self):
- # @TODO Due to long-ago refactoring, this name test would never match,
- # so this audit (and those derived from it) would never run.
- # Uncommenting the next lines fixes the name test, but that then
- # exposes pre-existing bugs that need to be fixed.
- #if self.CM["Name"] == "crm-corosync":
- # return 1
- return 0
-
-
-class GroupAudit(PrimitiveAudit):
- def name(self):
- return "GroupAudit"
-
- def __call__(self):
- rc = 1
- if not self.setup():
- return 1
-
- for group in self.resources:
- if group.type == "group":
- first_match = 1
- group_location = None
- for child in self.resources:
- if child.parent == group.id:
- nodes = self.CM.ResourceLocation(child.id)
-
- if first_match and len(nodes) > 0:
- group_location = nodes[0]
-
- first_match = 0
-
- if len(nodes) > 1:
- rc = 0
- self.CM.log("Child %s of %s is active more than once: %s"
- % (child.id, group.id, repr(nodes)))
-
- elif len(nodes) == 0:
- # Groups are allowed to be partially active
- # However we do need to make sure later children aren't running
- group_location = None
- self.debug("Child %s of %s is stopped" % (child.id, group.id))
-
- elif nodes[0] != group_location:
- rc = 0
- self.CM.log("Child %s of %s is active on the wrong node (%s) expected %s"
- % (child.id, group.id, nodes[0], group_location))
- else:
- self.debug("Child %s of %s is active on %s" % (child.id, group.id, nodes[0]))
-
- return rc
-
-
-class CloneAudit(PrimitiveAudit):
- def name(self):
- return "CloneAudit"
-
- def __call__(self):
- rc = 1
- if not self.setup():
- return 1
-
- for clone in self.resources:
- if clone.type == "clone":
- for child in self.resources:
- if child.parent == clone.id and child.type == "primitive":
- self.debug("Checking child %s of %s..." % (child.id, clone.id))
- # Check max and node_max
- # Obtain with:
- # crm_resource -g clone_max --meta -r child.id
- # crm_resource -g clone_node_max --meta -r child.id
-
- return rc
-
-
-class ColocationAudit(PrimitiveAudit):
- def name(self):
- return "ColocationAudit"
-
- def crm_location(self, resource):
- (rc, lines) = self.CM.rsh(self.target, "crm_resource -W -r %s -Q"%resource, verbose=1)
- hosts = []
- if rc == 0:
- for line in lines:
- fields = line.split()
- hosts.append(fields[0])
-
- return hosts
-
- def __call__(self):
- rc = 1
- if not self.setup():
- return 1
-
- for coloc in self.constraints:
- if coloc.type == "rsc_colocation":
- source = self.crm_location(coloc.rsc)
- target = self.crm_location(coloc.target)
- if len(source) == 0:
- self.debug("Colocation audit (%s): %s not running" % (coloc.id, coloc.rsc))
- else:
- for node in source:
- if not node in target:
- rc = 0
- self.CM.log("Colocation audit (%s): %s running on %s (not in %s)"
- % (coloc.id, coloc.rsc, node, repr(target)))
- else:
- self.debug("Colocation audit (%s): %s running on %s (in %s)"
- % (coloc.id, coloc.rsc, node, repr(target)))
-
- return rc
-
-
-class ControllerStateAudit(ClusterAudit):
- def __init__(self, cm):
- self.CM = cm
- self.Stats = {"calls":0
- , "success":0
- , "failure":0
- , "skipped":0
- , "auditfail":0}
-
- def has_key(self, key):
- return key in self.Stats
-
- def __setitem__(self, key, value):
- self.Stats[key] = value
-
- def __getitem__(self, key):
- return self.Stats[key]
-
- def incr(self, name):
- '''Increment (or initialize) the value associated with the given name'''
- if not name in self.Stats:
- self.Stats[name] = 0
- self.Stats[name] = self.Stats[name]+1
-
- def __call__(self):
- passed = 1
- up_are_down = 0
- down_are_up = 0
- unstable_list = []
-
- for node in self.CM.Env["nodes"]:
- should_be = self.CM.ShouldBeStatus[node]
- rc = self.CM.test_node_CM(node)
- if rc > 0:
- if should_be == "down":
- down_are_up = down_are_up + 1
- if rc == 1:
- unstable_list.append(node)
- elif should_be == "up":
- up_are_down = up_are_down + 1
-
- if len(unstable_list) > 0:
- passed = 0
- self.CM.log("Cluster is not stable: %d (of %d): %s"
- % (len(unstable_list), self.CM.upcount(), repr(unstable_list)))
-
- if up_are_down > 0:
- passed = 0
- self.CM.log("%d (of %d) nodes expected to be up were down."
- % (up_are_down, len(self.CM.Env["nodes"])))
-
- if down_are_up > 0:
- passed = 0
- self.CM.log("%d (of %d) nodes expected to be down were up."
- % (down_are_up, len(self.CM.Env["nodes"])))
-
- return passed
-
- def name(self):
- return "ControllerStateAudit"
-
- def is_applicable(self):
- # @TODO Due to long-ago refactoring, this name test would never match,
- # so this audit (and those derived from it) would never run.
- # Uncommenting the next lines fixes the name test, but that then
- # exposes pre-existing bugs that need to be fixed.
- #if self.CM["Name"] == "crm-corosync":
- # return 1
- return 0
-
-
-class CIBAudit(ClusterAudit):
- def __init__(self, cm):
- self.CM = cm
- self.Stats = {"calls":0
- , "success":0
- , "failure":0
- , "skipped":0
- , "auditfail":0}
-
- def has_key(self, key):
- return key in self.Stats
-
- def __setitem__(self, key, value):
- self.Stats[key] = value
-
- def __getitem__(self, key):
- return self.Stats[key]
-
- def incr(self, name):
- '''Increment (or initialize) the value associated with the given name'''
- if not name in self.Stats:
- self.Stats[name] = 0
- self.Stats[name] = self.Stats[name]+1
-
- def __call__(self):
- passed = 1
- ccm_partitions = self.CM.find_partitions()
-
- if len(ccm_partitions) == 0:
- self.debug("\tNo partitions to audit")
- return 1
-
- for partition in ccm_partitions:
- self.debug("\tAuditing CIB consistency for: %s" % partition)
- partition_passed = 0
- if self.audit_cib_contents(partition) == 0:
- passed = 0
-
- return passed
-
- def audit_cib_contents(self, hostlist):
- passed = 1
- node0 = None
- node0_xml = None
-
- partition_hosts = hostlist.split()
- for node in partition_hosts:
- node_xml = self.store_remote_cib(node, node0)
-
- if node_xml == None:
- self.CM.log("Could not perform audit: No configuration from %s" % node)
- passed = 0
-
- elif node0 == None:
- node0 = node
- node0_xml = node_xml
-
- elif node0_xml == None:
- self.CM.log("Could not perform audit: No configuration from %s" % node0)
- passed = 0
-
- else:
- (rc, result) = self.CM.rsh(
- node0, "crm_diff -VV -cf --new %s --original %s" % (node_xml, node0_xml), verbose=1)
-
- if rc != 0:
- self.CM.log("Diff between %s and %s failed: %d" % (node0_xml, node_xml, rc))
- passed = 0
-
- for line in result:
- if not re.search("<diff/>", line):
- passed = 0
- self.debug("CibDiff[%s-%s]: %s" % (node0, node, line))
- else:
- self.debug("CibDiff[%s-%s] Ignoring: %s" % (node0, node, line))
-
-# self.CM.rsh(node0, "rm -f %s" % node_xml)
-# self.CM.rsh(node0, "rm -f %s" % node0_xml)
- return passed
-
- def store_remote_cib(self, node, target):
- combined = ""
- filename = "/tmp/ctsaudit.%s.xml" % node
-
- if not target:
- target = node
-
- (rc, lines) = self.CM.rsh(node, self.CM["CibQuery"], verbose=1)
- if rc != 0:
- self.CM.log("Could not retrieve configuration")
- return None
-
- self.CM.rsh("localhost", "rm -f %s" % filename)
- for line in lines:
- self.CM.rsh("localhost", "echo \'%s\' >> %s" % (line[:-1], filename), verbose=0)
-
- if self.CM.rsh.copy(filename, "root@%s:%s" % (target, filename), silent=True) != 0:
- self.CM.log("Could not store configuration")
- return None
- return filename
-
- def name(self):
- return "CibAudit"
-
- def is_applicable(self):
- # @TODO Due to long-ago refactoring, this name test would never match,
- # so this audit (and those derived from it) would never run.
- # Uncommenting the next lines fixes the name test, but that then
- # exposes pre-existing bugs that need to be fixed.
- #if self.CM["Name"] == "crm-corosync":
- # return 1
- return 0
-
-
-class PartitionAudit(ClusterAudit):
- def __init__(self, cm):
- self.CM = cm
- self.Stats = {"calls":0
- , "success":0
- , "failure":0
- , "skipped":0
- , "auditfail":0}
- self.NodeEpoch = {}
- self.NodeState = {}
- self.NodeQuorum = {}
-
- def has_key(self, key):
- return key in self.Stats
-
- def __setitem__(self, key, value):
- self.Stats[key] = value
-
- def __getitem__(self, key):
- return self.Stats[key]
-
- def incr(self, name):
- '''Increment (or initialize) the value associated with the given name'''
- if not name in self.Stats:
- self.Stats[name] = 0
- self.Stats[name] = self.Stats[name]+1
-
- def __call__(self):
- passed = 1
- ccm_partitions = self.CM.find_partitions()
-
- if ccm_partitions == None or len(ccm_partitions) == 0:
- return 1
-
- self.CM.cluster_stable(double_check=True)
-
- if len(ccm_partitions) != self.CM.partitions_expected:
- self.CM.log("ERROR: %d cluster partitions detected:" % len(ccm_partitions))
- passed = 0
- for partition in ccm_partitions:
- self.CM.log("\t %s" % partition)
-
- for partition in ccm_partitions:
- partition_passed = 0
- if self.audit_partition(partition) == 0:
- passed = 0
-
- return passed
-
- def trim_string(self, avalue):
- if not avalue:
- return None
- if len(avalue) > 1:
- return avalue[:-1]
-
- def trim2int(self, avalue):
- if not avalue:
- return None
- if len(avalue) > 1:
- return int(avalue[:-1])
-
- def audit_partition(self, partition):
- passed = 1
- dc_found = []
- dc_allowed_list = []
- lowest_epoch = None
- node_list = partition.split()
-
- self.debug("Auditing partition: %s" % (partition))
- for node in node_list:
- if self.CM.ShouldBeStatus[node] != "up":
- self.CM.log("Warn: Node %s appeared out of nowhere" % (node))
- self.CM.ShouldBeStatus[node] = "up"
- # not in itself a reason to fail the audit (not what we're
- # checking for in this audit)
-
- (_, out) = self.CM.rsh(node, self.CM["StatusCmd"] % node, verbose=1)
- self.NodeState[node] = out[0].strip()
-
- (_, out) = self.CM.rsh(node, self.CM["EpochCmd"], verbose=1)
- self.NodeEpoch[node] = out[0].strip()
-
- (_, out) = self.CM.rsh(node, self.CM["QuorumCmd"], verbose=1)
- self.NodeQuorum[node] = out[0].strip()
-
- self.debug("Node %s: %s - %s - %s." % (node, self.NodeState[node], self.NodeEpoch[node], self.NodeQuorum[node]))
- self.NodeState[node] = self.trim_string(self.NodeState[node])
- self.NodeEpoch[node] = self.trim2int(self.NodeEpoch[node])
- self.NodeQuorum[node] = self.trim_string(self.NodeQuorum[node])
-
- if not self.NodeEpoch[node]:
- self.CM.log("Warn: Node %s dissappeared: cant determin epoch" % (node))
- self.CM.ShouldBeStatus[node] = "down"
- # not in itself a reason to fail the audit (not what we're
- # checking for in this audit)
- elif lowest_epoch == None or self.NodeEpoch[node] < lowest_epoch:
- lowest_epoch = self.NodeEpoch[node]
-
- if not lowest_epoch:
- self.CM.log("Lowest epoch not determined in %s" % (partition))
- passed = 0
-
- for node in node_list:
- if self.CM.ShouldBeStatus[node] == "up":
- if self.CM.is_node_dc(node, self.NodeState[node]):
- dc_found.append(node)
- if self.NodeEpoch[node] == lowest_epoch:
- self.debug("%s: OK" % node)
- elif not self.NodeEpoch[node]:
- self.debug("Check on %s ignored: no node epoch" % node)
- elif not lowest_epoch:
- self.debug("Check on %s ignored: no lowest epoch" % node)
- else:
- self.CM.log("DC %s is not the oldest node (%d vs. %d)"
- % (node, self.NodeEpoch[node], lowest_epoch))
- passed = 0
-
- if len(dc_found) == 0:
- self.CM.log("DC not found on any of the %d allowed nodes: %s (of %s)"
- % (len(dc_allowed_list), str(dc_allowed_list), str(node_list)))
-
- elif len(dc_found) > 1:
- self.CM.log("%d DCs (%s) found in cluster partition: %s"
- % (len(dc_found), str(dc_found), str(node_list)))
- passed = 0
-
- if passed == 0:
- for node in node_list:
- if self.CM.ShouldBeStatus[node] == "up":
- self.CM.log("epoch %s : %s"
- % (self.NodeEpoch[node], self.NodeState[node]))
-
- return passed
-
- def name(self):
- return "PartitionAudit"
-
- def is_applicable(self):
- # @TODO Due to long-ago refactoring, this name test would never match,
- # so this audit (and those derived from it) would never run.
- # Uncommenting the next lines fixes the name test, but that then
- # exposes pre-existing bugs that need to be fixed.
- #if self.CM["Name"] == "crm-corosync":
- # return 1
- return 0
-
-AllAuditClasses.append(DiskAudit)
-AllAuditClasses.append(FileAudit)
-AllAuditClasses.append(LogAudit)
-AllAuditClasses.append(ControllerStateAudit)
-AllAuditClasses.append(PartitionAudit)
-AllAuditClasses.append(PrimitiveAudit)
-AllAuditClasses.append(GroupAudit)
-AllAuditClasses.append(CloneAudit)
-AllAuditClasses.append(ColocationAudit)
-AllAuditClasses.append(CIBAudit)
-
-
-def AuditList(cm):
- result = []
- for auditclass in AllAuditClasses:
- a = auditclass(cm)
- if a.is_applicable():
- result.append(a)
- return result
diff --git a/cts/lab/CTSlab.py.in b/cts/lab/CTSlab.py.in
deleted file mode 100644
index bd990fd..0000000
--- a/cts/lab/CTSlab.py.in
+++ /dev/null
@@ -1,135 +0,0 @@
-#!@PYTHON@
-""" Command-line interface to Pacemaker's Cluster Test Suite (CTS)
-"""
-
-__copyright__ = "Copyright 2001-2023 the Pacemaker project contributors"
-__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
-
-import sys, signal, os
-
-pdir = os.path.dirname(sys.path[0])
-sys.path.insert(0, pdir) # So that things work from the source directory
-
-try:
- from cts.CM_corosync import *
- from cts.CTSaudits import AuditList
- from cts.CTStests import TestList
- from cts.CTSscenarios import *
-
- from pacemaker._cts.CTS import CtsLab
- from pacemaker._cts.logging import LogFactory
-except ImportError as e:
- sys.stderr.write("abort: %s\n" % e)
- sys.stderr.write("check your install and PYTHONPATH; couldn't find cts libraries in:\n%s\n" %
- ' '.join(sys.path))
- sys.exit(1)
-
-# These are globals so they can be used by the signal handler.
-scenario = None
-LogFactory().add_stderr()
-
-
-def sig_handler(signum, frame) :
- LogFactory().log("Interrupted by signal %d"%signum)
- if scenario: scenario.summarize()
- if signum == 15 :
- if scenario: scenario.TearDown()
- sys.exit(1)
-
-
-def plural_s(n, uppercase=False):
- if n == 1:
- return ""
- elif uppercase:
- return "S"
- else:
- return "s"
-
-
-if __name__ == '__main__':
-
- Environment = CtsLab(sys.argv[1:])
- NumIter = Environment["iterations"]
- Tests = []
-
- # Set the signal handler
- signal.signal(15, sig_handler)
- signal.signal(10, sig_handler)
-
- # Create the Cluster Manager object
- cm = None
- if Environment["Stack"] == "corosync 2+":
- cm = crm_corosync()
-
- else:
- LogFactory().log("Unknown stack: "+Environment["stack"])
- sys.exit(1)
-
- if Environment["TruncateLog"]:
- if Environment["OutputFile"] is None:
- LogFactory().log("Ignoring truncate request because no output file specified")
- else:
- LogFactory().log("Truncating %s" % Environment["OutputFile"])
- with open(Environment["OutputFile"], "w") as outputfile:
- outputfile.truncate(0)
-
- Audits = AuditList(cm)
-
- if Environment["ListTests"]:
- Tests = TestList(cm, Audits)
- LogFactory().log("Total %d tests"%len(Tests))
- for test in Tests :
- LogFactory().log(str(test.name));
- sys.exit(0)
-
- elif len(Environment["tests"]) == 0:
- Tests = TestList(cm, Audits)
-
- else:
- Chosen = Environment["tests"]
- for TestCase in Chosen:
- match = None
-
- for test in TestList(cm, Audits):
- if test.name == TestCase:
- match = test
-
- if not match:
- LogFactory().log("--choose: No applicable/valid tests chosen")
- sys.exit(1)
- else:
- Tests.append(match)
-
- # Scenario selection
- if Environment["scenario"] == "basic-sanity":
- scenario = RandomTests(cm, [ BasicSanityCheck(Environment) ], Audits, Tests)
-
- elif Environment["scenario"] == "all-once":
- NumIter = len(Tests)
- scenario = AllOnce(
- cm, [ BootCluster(Environment) ], Audits, Tests)
- elif Environment["scenario"] == "sequence":
- scenario = Sequence(
- cm, [ BootCluster(Environment) ], Audits, Tests)
- elif Environment["scenario"] == "boot":
- scenario = Boot(cm, [ LeaveBooted(Environment)], Audits, [])
- else:
- scenario = RandomTests(
- cm, [ BootCluster(Environment) ], Audits, Tests)
-
- LogFactory().log(">>>>>>>>>>>>>>>> BEGINNING " + repr(NumIter) + " TEST" + plural_s(NumIter, True) + " ")
- LogFactory().log("Stack: %s (%s)" % (Environment["Stack"], Environment["Name"]))
- LogFactory().log("Schema: %s" % Environment["Schema"])
- LogFactory().log("Scenario: %s" % scenario.__doc__)
- LogFactory().log("CTS Exerciser: %s" % Environment["cts-exerciser"])
- LogFactory().log("CTS Logfile: %s" % Environment["OutputFile"])
- LogFactory().log("Random Seed: %s" % Environment["RandSeed"])
- LogFactory().log("Syslog variant: %s" % Environment["syslogd"].strip())
- LogFactory().log("System log files: %s" % Environment["LogFileName"])
- if Environment.has_key("IPBase"):
- LogFactory().log("Base IP for resources: %s" % Environment["IPBase"])
- LogFactory().log("Cluster starts at boot: %d" % Environment["at-boot"])
-
- Environment.dump()
- rc = Environment.run(scenario, NumIter)
- sys.exit(rc)
diff --git a/cts/lab/CTSscenarios.py b/cts/lab/CTSscenarios.py
deleted file mode 100644
index 37cb094..0000000
--- a/cts/lab/CTSscenarios.py
+++ /dev/null
@@ -1,563 +0,0 @@
-""" Test scenario classes for Pacemaker's Cluster Test Suite (CTS)
-"""
-
-__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
-__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
-
-import os
-import re
-import sys
-import time
-
-from cts.CTStests import CTSTest
-from cts.CTSaudits import ClusterAudit
-
-from pacemaker._cts.watcher import LogWatcher
-
-class ScenarioComponent(object):
-
- def __init__(self, Env):
- self.Env = Env
-
- def IsApplicable(self):
- '''Return True if the current ScenarioComponent is applicable
- in the given LabEnvironment given to the constructor.
- '''
-
- raise ValueError("Abstract Class member (IsApplicable)")
-
- def SetUp(self, CM):
- '''Set up the given ScenarioComponent'''
- raise ValueError("Abstract Class member (Setup)")
-
- def TearDown(self, CM):
- '''Tear down (undo) the given ScenarioComponent'''
- raise ValueError("Abstract Class member (Setup)")
-
-
-class Scenario(object):
- (
-'''The basic idea of a scenario is that of an ordered list of
-ScenarioComponent objects. Each ScenarioComponent is SetUp() in turn,
-and then after the tests have been run, they are torn down using TearDown()
-(in reverse order).
-
-A Scenario is applicable to a particular cluster manager iff each
-ScenarioComponent is applicable.
-
-A partially set up scenario is torn down if it fails during setup.
-''')
-
- def __init__(self, ClusterManager, Components, Audits, Tests):
-
- "Initialize the Scenario from the list of ScenarioComponents"
-
- self.ClusterManager = ClusterManager
- self.Components = Components
- self.Audits = Audits
- self.Tests = Tests
-
- self.BadNews = None
- self.TestSets = []
- self.Stats = {"success":0, "failure":0, "BadNews":0, "skipped":0}
- self.Sets = []
-
- #self.ns=CTS.NodeStatus(self.Env)
-
- for comp in Components:
- if not issubclass(comp.__class__, ScenarioComponent):
- raise ValueError("Init value must be subclass of ScenarioComponent")
-
- for audit in Audits:
- if not issubclass(audit.__class__, ClusterAudit):
- raise ValueError("Init value must be subclass of ClusterAudit")
-
- for test in Tests:
- if not issubclass(test.__class__, CTSTest):
- raise ValueError("Init value must be a subclass of CTSTest")
-
- def IsApplicable(self):
- (
-'''A Scenario IsApplicable() iff each of its ScenarioComponents IsApplicable()
-'''
- )
-
- for comp in self.Components:
- if not comp.IsApplicable():
- return None
- return True
-
- def SetUp(self):
- '''Set up the Scenario. Return TRUE on success.'''
-
- self.ClusterManager.prepare()
- self.audit() # Also detects remote/local log config
- self.ClusterManager.ns.wait_for_all_nodes(self.ClusterManager.Env["nodes"])
-
- self.audit()
- self.ClusterManager.install_support()
-
- self.BadNews = LogWatcher(self.ClusterManager.Env["LogFileName"],
- self.ClusterManager.templates.get_patterns("BadNews"),
- self.ClusterManager.Env["nodes"],
- self.ClusterManager.Env["LogWatcher"],
- "BadNews", 0)
- self.BadNews.set_watch() # Call after we've figured out what type of log watching to do in LogAudit
-
- j = 0
- while j < len(self.Components):
- if not self.Components[j].SetUp(self.ClusterManager):
- # OOPS! We failed. Tear partial setups down.
- self.audit()
- self.ClusterManager.log("Tearing down partial setup")
- self.TearDown(j)
- return None
- j = j + 1
-
- self.audit()
- return 1
-
- def TearDown(self, max=None):
-
- '''Tear Down the Scenario - in reverse order.'''
-
- if max == None:
- max = len(self.Components)-1
- j = max
- while j >= 0:
- self.Components[j].TearDown(self.ClusterManager)
- j = j - 1
-
- self.audit()
- self.ClusterManager.install_support("uninstall")
-
- def incr(self, name):
- '''Increment (or initialize) the value associated with the given name'''
- if not name in self.Stats:
- self.Stats[name] = 0
- self.Stats[name] = self.Stats[name]+1
-
- def run(self, Iterations):
- self.ClusterManager.oprofileStart()
- try:
- self.run_loop(Iterations)
- self.ClusterManager.oprofileStop()
- except:
- self.ClusterManager.oprofileStop()
- raise
-
- def run_loop(self, Iterations):
- raise ValueError("Abstract Class member (run_loop)")
-
- def run_test(self, test, testcount):
- nodechoice = self.ClusterManager.Env.random_node()
-
- ret = 1
- where = ""
- did_run = 0
-
- self.ClusterManager.instance_errorstoignore_clear()
- self.ClusterManager.log(("Running test %s" % test.name).ljust(35) + (" (%s) " % nodechoice).ljust(15) + "[" + ("%d" % testcount).rjust(3) + "]")
-
- starttime = test.set_timer()
- if not test.setup(nodechoice):
- self.ClusterManager.log("Setup failed")
- ret = 0
-
- elif not test.canrunnow(nodechoice):
- self.ClusterManager.log("Skipped")
- test.skipped()
-
- else:
- did_run = 1
- ret = test(nodechoice)
-
- if not test.teardown(nodechoice):
- self.ClusterManager.log("Teardown failed")
- if self.ClusterManager.Env["continue"]:
- answer = "Y"
- else:
- try:
- answer = input('Continue? [nY]')
- except EOFError as e:
- answer = "n"
- if answer and answer == "n":
- raise ValueError("Teardown of %s on %s failed" % (test.name, nodechoice))
- ret = 0
-
- stoptime = time.time()
- self.ClusterManager.oprofileSave(testcount)
-
- elapsed_time = stoptime - starttime
- test_time = stoptime - test.get_timer()
- if not test["min_time"]:
- test["elapsed_time"] = elapsed_time
- test["min_time"] = test_time
- test["max_time"] = test_time
- else:
- test["elapsed_time"] = test["elapsed_time"] + elapsed_time
- if test_time < test["min_time"]:
- test["min_time"] = test_time
- if test_time > test["max_time"]:
- test["max_time"] = test_time
-
- if ret:
- self.incr("success")
- test.log_timer()
- else:
- self.incr("failure")
- self.ClusterManager.statall()
- did_run = 1 # Force the test count to be incremented anyway so test extraction works
-
- self.audit(test.errorstoignore())
- return did_run
-
- def summarize(self):
- self.ClusterManager.log("****************")
- self.ClusterManager.log("Overall Results:" + repr(self.Stats))
- self.ClusterManager.log("****************")
-
- stat_filter = {
- "calls":0,
- "failure":0,
- "skipped":0,
- "auditfail":0,
- }
- self.ClusterManager.log("Test Summary")
- for test in self.Tests:
- for key in list(stat_filter.keys()):
- stat_filter[key] = test.Stats[key]
- self.ClusterManager.log(("Test %s: "%test.name).ljust(25) + " %s"%repr(stat_filter))
-
- self.ClusterManager.debug("Detailed Results")
- for test in self.Tests:
- self.ClusterManager.debug(("Test %s: "%test.name).ljust(25) + " %s"%repr(test.Stats))
-
- self.ClusterManager.log("<<<<<<<<<<<<<<<< TESTS COMPLETED")
-
- def audit(self, LocalIgnore=[]):
- errcount = 0
- ignorelist = []
- ignorelist.append("CTS:")
- ignorelist.extend(LocalIgnore)
- ignorelist.extend(self.ClusterManager.errorstoignore())
- ignorelist.extend(self.ClusterManager.instance_errorstoignore())
-
- # This makes sure everything is stabilized before starting...
- failed = 0
- for audit in self.Audits:
- if not audit():
- self.ClusterManager.log("Audit " + audit.name() + " FAILED.")
- failed += 1
- else:
- self.ClusterManager.debug("Audit " + audit.name() + " passed.")
-
- while errcount < 1000:
- match = None
- if self.BadNews:
- match = self.BadNews.look(0)
-
- if match:
- add_err = 1
- for ignore in ignorelist:
- if add_err == 1 and re.search(ignore, match):
- add_err = 0
- if add_err == 1:
- self.ClusterManager.log("BadNews: " + match)
- self.incr("BadNews")
- errcount = errcount + 1
- else:
- break
- else:
- if self.ClusterManager.Env["continue"]:
- answer = "Y"
- else:
- try:
- answer = input('Big problems. Continue? [nY]')
- except EOFError as e:
- answer = "n"
- if answer and answer == "n":
- self.ClusterManager.log("Shutting down.")
- self.summarize()
- self.TearDown()
- raise ValueError("Looks like we hit a BadNews jackpot!")
-
- if self.BadNews:
- self.BadNews.end()
- return failed
-
-
-class AllOnce(Scenario):
- '''Every Test Once''' # Accessable as __doc__
- def run_loop(self, Iterations):
- testcount = 1
- for test in self.Tests:
- self.run_test(test, testcount)
- testcount += 1
-
-
-class RandomTests(Scenario):
- '''Random Test Execution'''
- def run_loop(self, Iterations):
- testcount = 1
- while testcount <= Iterations:
- test = self.ClusterManager.Env.random_gen.choice(self.Tests)
- self.run_test(test, testcount)
- testcount += 1
-
-
-class BasicSanity(Scenario):
- '''Basic Cluster Sanity'''
- def run_loop(self, Iterations):
- testcount = 1
- while testcount <= Iterations:
- test = self.Environment.random_gen.choice(self.Tests)
- self.run_test(test, testcount)
- testcount += 1
-
-
-class Sequence(Scenario):
- '''Named Tests in Sequence'''
- def run_loop(self, Iterations):
- testcount = 1
- while testcount <= Iterations:
- for test in self.Tests:
- self.run_test(test, testcount)
- testcount += 1
-
-
-class Boot(Scenario):
- '''Start the Cluster'''
- def run_loop(self, Iterations):
- testcount = 0
-
-
-class BootCluster(ScenarioComponent):
- (
-'''BootCluster is the most basic of ScenarioComponents.
-This ScenarioComponent simply starts the cluster manager on all the nodes.
-It is fairly robust as it waits for all nodes to come up before starting
-as they might have been rebooted or crashed for some reason beforehand.
-''')
- def __init__(self, Env):
- pass
-
- def IsApplicable(self):
- '''BootCluster is so generic it is always Applicable'''
- return True
-
- def SetUp(self, CM):
- '''Basic Cluster Manager startup. Start everything'''
-
- CM.prepare()
-
- # Clear out the cobwebs ;-)
- CM.stopall(verbose=True, force=True)
-
- # Now start the Cluster Manager on all the nodes.
- CM.log("Starting Cluster Manager on all nodes.")
- return CM.startall(verbose=True, quick=True)
-
- def TearDown(self, CM, force=False):
- '''Set up the given ScenarioComponent'''
-
- # Stop the cluster manager everywhere
-
- CM.log("Stopping Cluster Manager on all nodes")
- return CM.stopall(verbose=True, force=force)
-
-
-class LeaveBooted(BootCluster):
- def TearDown(self, CM):
- '''Set up the given ScenarioComponent'''
-
- # Stop the cluster manager everywhere
-
- CM.log("Leaving Cluster running on all nodes")
- return 1
-
-
-class PingFest(ScenarioComponent):
- (
-'''PingFest does a flood ping to each node in the cluster from the test machine.
-
-If the LabEnvironment Parameter PingSize is set, it will be used as the size
-of ping packet requested (via the -s option). If it is not set, it defaults
-to 1024 bytes.
-
-According to the manual page for ping:
- Outputs packets as fast as they come back or one hundred times per
- second, whichever is more. For every ECHO_REQUEST sent a period ``.''
- is printed, while for every ECHO_REPLY received a backspace is printed.
- This provides a rapid display of how many packets are being dropped.
- Only the super-user may use this option. This can be very hard on a net-
- work and should be used with caution.
-''' )
-
- def __init__(self, Env):
- self.Env = Env
-
- def IsApplicable(self):
- '''PingFests are always applicable ;-)
- '''
-
- return True
-
- def SetUp(self, CM):
- '''Start the PingFest!'''
-
- self.PingSize = 1024
- if "PingSize" in list(CM.Env.keys()):
- self.PingSize = CM.Env["PingSize"]
-
- CM.log("Starting %d byte flood pings" % self.PingSize)
-
- self.PingPids = []
- for node in CM.Env["nodes"]:
- self.PingPids.append(self._pingchild(node))
-
- CM.log("Ping PIDs: " + repr(self.PingPids))
- return 1
-
- def TearDown(self, CM):
- '''Stop it right now! My ears are pinging!!'''
-
- for pid in self.PingPids:
- if pid != None:
- CM.log("Stopping ping process %d" % pid)
- os.kill(pid, signal.SIGKILL)
-
- def _pingchild(self, node):
-
- Args = ["ping", "-qfn", "-s", str(self.PingSize), node]
-
- sys.stdin.flush()
- sys.stdout.flush()
- sys.stderr.flush()
- pid = os.fork()
-
- if pid < 0:
- self.Env.log("Cannot fork ping child")
- return None
- if pid > 0:
- return pid
-
- # Otherwise, we're the child process.
-
- os.execvp("ping", Args)
- self.Env.log("Cannot execvp ping: " + repr(Args))
- sys.exit(1)
-
-
-class BasicSanityCheck(ScenarioComponent):
- (
-'''
-''')
-
- def IsApplicable(self):
- return self.Env["DoBSC"]
-
- def SetUp(self, CM):
-
- CM.prepare()
-
- # Clear out the cobwebs
- self.TearDown(CM)
-
- # Now start the Cluster Manager on all the nodes.
- CM.log("Starting Cluster Manager on BSC node(s).")
- return CM.startall()
-
- def TearDown(self, CM):
- CM.log("Stopping Cluster Manager on BSC node(s).")
- return CM.stopall()
-
-
-class Benchmark(ScenarioComponent):
- (
-'''
-''')
-
- def IsApplicable(self):
- return self.Env["benchmark"]
-
- def SetUp(self, CM):
-
- CM.prepare()
-
- # Clear out the cobwebs
- self.TearDown(CM, force=True)
-
- # Now start the Cluster Manager on all the nodes.
- CM.log("Starting Cluster Manager on all node(s).")
- return CM.startall()
-
- def TearDown(self, CM):
- CM.log("Stopping Cluster Manager on all node(s).")
- return CM.stopall()
-
-
-class RollingUpgrade(ScenarioComponent):
- (
-'''
-Test a rolling upgrade between two versions of the stack
-''')
-
- def __init__(self, Env):
- self.Env = Env
-
- def IsApplicable(self):
- if not self.Env["rpm-dir"]:
- return None
- if not self.Env["current-version"]:
- return None
- if not self.Env["previous-version"]:
- return None
-
- return True
-
- def install(self, node, version):
-
- target_dir = "/tmp/rpm-%s" % version
- src_dir = "%s/%s" % (self.CM.Env["rpm-dir"], version)
-
- self.CM.rsh(node, "mkdir -p %s" % target_dir)
- rc = self.CM.cp("%s/*.rpm %s:%s" % (src_dir, node, target_dir))
- self.CM.rsh(node, "rpm -Uvh --force %s/*.rpm" % (target_dir))
-
- return self.success()
-
- def upgrade(self, node):
- return self.install(node, self.CM.Env["current-version"])
-
- def downgrade(self, node):
- return self.install(node, self.CM.Env["previous-version"])
-
- def SetUp(self, CM):
- print(repr(self)+"prepare")
- CM.prepare()
-
- # Clear out the cobwebs
- CM.stopall(force=True)
-
- CM.log("Downgrading all nodes to %s." % self.Env["previous-version"])
-
- for node in self.Env["nodes"]:
- if not self.downgrade(node):
- CM.log("Couldn't downgrade %s" % node)
- return None
-
- return 1
-
- def TearDown(self, CM):
- # Stop everything
- CM.log("Stopping Cluster Manager on Upgrade nodes.")
- CM.stopall()
-
- CM.log("Upgrading all nodes to %s." % self.Env["current-version"])
- for node in self.Env["nodes"]:
- if not self.upgrade(node):
- CM.log("Couldn't upgrade %s" % node)
- return None
-
- return 1
diff --git a/cts/lab/CTStests.py b/cts/lab/CTStests.py
deleted file mode 100644
index 61766ce..0000000
--- a/cts/lab/CTStests.py
+++ /dev/null
@@ -1,3178 +0,0 @@
-""" Test-specific classes for Pacemaker's Cluster Test Suite (CTS)
-"""
-
-__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
-__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
-
-#
-# SPECIAL NOTE:
-#
-# Tests may NOT implement any cluster-manager-specific code in them.
-# EXTEND the ClusterManager object to provide the base capabilities
-# the test needs if you need to do something that the current CM classes
-# do not. Otherwise you screw up the whole point of the object structure
-# in CTS.
-#
-# Thank you.
-#
-
-import os
-import re
-import time
-import subprocess
-import tempfile
-
-from stat import *
-from cts.CTSaudits import *
-
-from pacemaker import BuildOptions
-from pacemaker._cts.CTS import NodeStatus
-from pacemaker._cts.environment import EnvFactory
-from pacemaker._cts.logging import LogFactory
-from pacemaker._cts.patterns import PatternSelector
-from pacemaker._cts.remote import RemoteFactory
-from pacemaker._cts.watcher import LogWatcher
-
-AllTestClasses = [ ]
-
-
-class CTSTest(object):
- '''
- A Cluster test.
- We implement the basic set of properties and behaviors for a generic
- cluster test.
-
- Cluster tests track their own statistics.
- We keep each of the kinds of counts we track as separate {name,value}
- pairs.
- '''
-
- def __init__(self, cm):
- #self.name="the unnamed test"
- self.Stats = {"calls":0
- , "success":0
- , "failure":0
- , "skipped":0
- , "auditfail":0}
-
-# if not issubclass(cm.__class__, ClusterManager):
-# raise ValueError("Must be a ClusterManager object")
- self.CM = cm
- self.Env = EnvFactory().getInstance()
- self.rsh = RemoteFactory().getInstance()
- self.logger = LogFactory()
- self.templates = PatternSelector(cm["Name"])
- self.Audits = []
- self.timeout = 120
- self.passed = 1
- self.is_loop = 0
- self.is_unsafe = 0
- self.is_experimental = 0
- self.is_container = 0
- self.is_valgrind = 0
- self.benchmark = 0 # which tests to benchmark
- self.timer = {} # timers
-
- def log(self, args):
- self.logger.log(args)
-
- def debug(self, args):
- self.logger.debug(args)
-
- def has_key(self, key):
- return key in self.Stats
-
- def __setitem__(self, key, value):
- self.Stats[key] = value
-
- def __getitem__(self, key):
- if str(key) == "0":
- raise ValueError("Bad call to 'foo in X', should reference 'foo in X.Stats' instead")
-
- if key in self.Stats:
- return self.Stats[key]
- return None
-
- def log_mark(self, msg):
- self.debug("MARK: test %s %s %d" % (self.name,msg,time.time()))
- return
-
- def get_timer(self,key = "test"):
- try: return self.timer[key]
- except: return 0
-
- def set_timer(self,key = "test"):
- self.timer[key] = time.time()
- return self.timer[key]
-
- def log_timer(self,key = "test"):
- elapsed = 0
- if key in self.timer:
- elapsed = time.time() - self.timer[key]
- s = key == "test" and self.name or "%s:%s" % (self.name,key)
- self.debug("%s runtime: %.2f" % (s, elapsed))
- del self.timer[key]
- return elapsed
-
- def incr(self, name):
- '''Increment (or initialize) the value associated with the given name'''
- if not name in self.Stats:
- self.Stats[name] = 0
- self.Stats[name] = self.Stats[name]+1
-
- # Reset the test passed boolean
- if name == "calls":
- self.passed = 1
-
- def failure(self, reason="none"):
- '''Increment the failure count'''
- self.passed = 0
- self.incr("failure")
- self.logger.log(("Test %s" % self.name).ljust(35) + " FAILED: %s" % reason)
- return None
-
- def success(self):
- '''Increment the success count'''
- self.incr("success")
- return 1
-
- def skipped(self):
- '''Increment the skipped count'''
- self.incr("skipped")
- return 1
-
- def __call__(self, node):
- '''Perform the given test'''
- raise ValueError("Abstract Class member (__call__)")
- self.incr("calls")
- return self.failure()
-
- def audit(self):
- passed = 1
- if len(self.Audits) > 0:
- for audit in self.Audits:
- if not audit():
- self.logger.log("Internal %s Audit %s FAILED." % (self.name, audit.name()))
- self.incr("auditfail")
- passed = 0
- return passed
-
- def setup(self, node):
- '''Setup the given test'''
- return self.success()
-
- def teardown(self, node):
- '''Tear down the given test'''
- return self.success()
-
- def create_watch(self, patterns, timeout, name=None):
- if not name:
- name = self.name
- return LogWatcher(self.Env["LogFileName"], patterns, self.Env["nodes"], self.Env["LogWatcher"], name, timeout)
-
- def local_badnews(self, prefix, watch, local_ignore=[]):
- errcount = 0
- if not prefix:
- prefix = "LocalBadNews:"
-
- ignorelist = []
- ignorelist.append(" CTS: ")
- ignorelist.append(prefix)
- ignorelist.extend(local_ignore)
-
- while errcount < 100:
- match = watch.look(0)
- if match:
- add_err = 1
- for ignore in ignorelist:
- if add_err == 1 and re.search(ignore, match):
- add_err = 0
- if add_err == 1:
- self.logger.log(prefix + " " + match)
- errcount = errcount + 1
- else:
- break
- else:
- self.logger.log("Too many errors!")
-
- watch.end()
- return errcount
-
- def is_applicable(self):
- return self.is_applicable_common()
-
- def is_applicable_common(self):
- '''Return True if we are applicable in the current test configuration'''
- #raise ValueError("Abstract Class member (is_applicable)")
-
- if self.is_loop and not self.Env["loop-tests"]:
- return False
- elif self.is_unsafe and not self.Env["unsafe-tests"]:
- return False
- elif self.is_valgrind and not self.Env["valgrind-tests"]:
- return False
- elif self.is_experimental and not self.Env["experimental-tests"]:
- return False
- elif self.is_container and not self.Env["container-tests"]:
- return False
- elif self.Env["benchmark"] and self.benchmark == 0:
- return False
-
- return True
-
- def find_ocfs2_resources(self, node):
- self.r_o2cb = None
- self.r_ocfs2 = []
-
- (_, lines) = self.rsh(node, "crm_resource -c", verbose=1)
- for line in lines:
- if re.search("^Resource", line):
- r = AuditResource(self.CM, line)
- if r.rtype == "o2cb" and r.parent != "NA":
- self.debug("Found o2cb: %s" % self.r_o2cb)
- self.r_o2cb = r.parent
- if re.search("^Constraint", line):
- c = AuditConstraint(self.CM, line)
- if c.type == "rsc_colocation" and c.target == self.r_o2cb:
- self.r_ocfs2.append(c.rsc)
-
- self.debug("Found ocfs2 filesystems: %s" % repr(self.r_ocfs2))
- return len(self.r_ocfs2)
-
- def canrunnow(self, node):
- '''Return TRUE if we can meaningfully run right now'''
- return 1
-
- def errorstoignore(self):
- '''Return list of errors which are 'normal' and should be ignored'''
- return []
-
-
-class StopTest(CTSTest):
- '''Stop (deactivate) the cluster manager on a node'''
- def __init__(self, cm):
- CTSTest.__init__(self, cm)
- self.name = "Stop"
-
- def __call__(self, node):
- '''Perform the 'stop' test. '''
- self.incr("calls")
- if self.CM.ShouldBeStatus[node] != "up":
- return self.skipped()
-
- patterns = []
- # Technically we should always be able to notice ourselves stopping
- patterns.append(self.templates["Pat:We_stopped"] % node)
-
- # Any active node needs to notice this one left
- # (note that this won't work if we have multiple partitions)
- for other in self.Env["nodes"]:
- if self.CM.ShouldBeStatus[other] == "up" and other != node:
- patterns.append(self.templates["Pat:They_stopped"] %(other, self.CM.key_for_node(node)))
- #self.debug("Checking %s will notice %s left"%(other, node))
-
- watch = self.create_watch(patterns, self.Env["DeadTime"])
- watch.set_watch()
-
- if node == self.CM.OurNode:
- self.incr("us")
- else:
- if self.CM.upcount() <= 1:
- self.incr("all")
- else:
- self.incr("them")
-
- self.CM.StopaCM(node)
- watch_result = watch.look_for_all()
-
- failreason = None
- UnmatchedList = "||"
- if watch.unmatched:
- (_, output) = self.rsh(node, "/bin/ps axf", verbose=1)
- for line in output:
- self.debug(line)
-
- (_, output) = self.rsh(node, "/usr/sbin/dlm_tool dump 2>/dev/null", verbose=1)
- for line in output:
- self.debug(line)
-
- for regex in watch.unmatched:
- self.logger.log ("ERROR: Shutdown pattern not found: %s" % (regex))
- UnmatchedList += regex + "||";
- failreason = "Missing shutdown pattern"
-
- self.CM.cluster_stable(self.Env["DeadTime"])
-
- if not watch.unmatched or self.CM.upcount() == 0:
- return self.success()
-
- if len(watch.unmatched) >= self.CM.upcount():
- return self.failure("no match against (%s)" % UnmatchedList)
-
- if failreason == None:
- return self.success()
- else:
- return self.failure(failreason)
-#
-# We don't register StopTest because it's better when called by
-# another test...
-#
-
-
-class StartTest(CTSTest):
- '''Start (activate) the cluster manager on a node'''
- def __init__(self, cm, debug=None):
- CTSTest.__init__(self,cm)
- self.name = "start"
- self.debug = debug
-
- def __call__(self, node):
- '''Perform the 'start' test. '''
- self.incr("calls")
-
- if self.CM.upcount() == 0:
- self.incr("us")
- else:
- self.incr("them")
-
- if self.CM.ShouldBeStatus[node] != "down":
- return self.skipped()
- elif self.CM.StartaCM(node):
- return self.success()
- else:
- return self.failure("Startup %s on node %s failed"
- % (self.Env["Name"], node))
-
-#
-# We don't register StartTest because it's better when called by
-# another test...
-#
-
-
-class FlipTest(CTSTest):
- '''If it's running, stop it. If it's stopped start it.
- Overthrow the status quo...
- '''
- def __init__(self, cm):
- CTSTest.__init__(self,cm)
- self.name = "Flip"
- self.start = StartTest(cm)
- self.stop = StopTest(cm)
-
- def __call__(self, node):
- '''Perform the 'Flip' test. '''
- self.incr("calls")
- if self.CM.ShouldBeStatus[node] == "up":
- self.incr("stopped")
- ret = self.stop(node)
- type = "up->down"
- # Give the cluster time to recognize it's gone...
- time.sleep(self.Env["StableTime"])
- elif self.CM.ShouldBeStatus[node] == "down":
- self.incr("started")
- ret = self.start(node)
- type = "down->up"
- else:
- return self.skipped()
-
- self.incr(type)
- if ret:
- return self.success()
- else:
- return self.failure("%s failure" % type)
-
-# Register FlipTest as a good test to run
-AllTestClasses.append(FlipTest)
-
-
-class RestartTest(CTSTest):
- '''Stop and restart a node'''
- def __init__(self, cm):
- CTSTest.__init__(self,cm)
- self.name = "Restart"
- self.start = StartTest(cm)
- self.stop = StopTest(cm)
- self.benchmark = 1
-
- def __call__(self, node):
- '''Perform the 'restart' test. '''
- self.incr("calls")
-
- self.incr("node:" + node)
-
- ret1 = 1
- if self.CM.StataCM(node):
- self.incr("WasStopped")
- if not self.start(node):
- return self.failure("start (setup) failure: "+node)
-
- self.set_timer()
- if not self.stop(node):
- return self.failure("stop failure: "+node)
- if not self.start(node):
- return self.failure("start failure: "+node)
- return self.success()
-
-# Register RestartTest as a good test to run
-AllTestClasses.append(RestartTest)
-
-
-class StonithdTest(CTSTest):
- def __init__(self, cm):
- CTSTest.__init__(self, cm)
- self.name = "Stonithd"
- self.startall = SimulStartLite(cm)
- self.benchmark = 1
-
- def __call__(self, node):
- self.incr("calls")
- if len(self.Env["nodes"]) < 2:
- return self.skipped()
-
- ret = self.startall(None)
- if not ret:
- return self.failure("Setup failed")
-
- is_dc = self.CM.is_node_dc(node)
-
- watchpats = []
- watchpats.append(self.templates["Pat:Fencing_ok"] % node)
- watchpats.append(self.templates["Pat:NodeFenced"] % node)
-
- if not self.Env["at-boot"]:
- self.debug("Expecting %s to stay down" % node)
- self.CM.ShouldBeStatus[node] = "down"
- else:
- self.debug("Expecting %s to come up again %d" % (node, self.Env["at-boot"]))
- watchpats.append("%s.* S_STARTING -> S_PENDING" % node)
- watchpats.append("%s.* S_PENDING -> S_NOT_DC" % node)
-
- watch = self.create_watch(watchpats, 30 + self.Env["DeadTime"] + self.Env["StableTime"] + self.Env["StartTime"])
- watch.set_watch()
-
- origin = self.Env.random_gen.choice(self.Env["nodes"])
-
- (rc, _) = self.rsh(origin, "stonith_admin --reboot %s -VVVVVV" % node)
-
- if rc == 124: # CRM_EX_TIMEOUT
- # Look for the patterns, usually this means the required
- # device was running on the node to be fenced - or that
- # the required devices were in the process of being loaded
- # and/or moved
- #
- # Effectively the node committed suicide so there will be
- # no confirmation, but pacemaker should be watching and
- # fence the node again
-
- self.logger.log("Fencing command on %s to fence %s timed out" % (origin, node))
-
- elif origin != node and rc != 0:
- self.debug("Waiting for the cluster to recover")
- self.CM.cluster_stable()
-
- self.debug("Waiting for fenced node to come back up")
- self.CM.ns.wait_for_all_nodes(self.Env["nodes"], 600)
-
- self.logger.log("Fencing command on %s failed to fence %s (rc=%d)" % (origin, node, rc))
-
- elif origin == node and rc != 255:
- # 255 == broken pipe, ie. the node was fenced as expected
- self.logger.log("Locally originated fencing returned %d" % rc)
-
- self.set_timer("fence")
- matched = watch.look_for_all()
- self.log_timer("fence")
- self.set_timer("reform")
- if watch.unmatched:
- self.logger.log("Patterns not found: " + repr(watch.unmatched))
-
- self.debug("Waiting for the cluster to recover")
- self.CM.cluster_stable()
-
- self.debug("Waiting for fenced node to come back up")
- self.CM.ns.wait_for_all_nodes(self.Env["nodes"], 600)
-
- self.debug("Waiting for the cluster to re-stabilize with all nodes")
- is_stable = self.CM.cluster_stable(self.Env["StartTime"])
-
- if not matched:
- return self.failure("Didn't find all expected patterns")
- elif not is_stable:
- return self.failure("Cluster did not become stable")
-
- self.log_timer("reform")
- return self.success()
-
- def errorstoignore(self):
- return [
- self.templates["Pat:Fencing_start"] % ".*",
- self.templates["Pat:Fencing_ok"] % ".*",
- self.templates["Pat:Fencing_active"],
- r"error.*: Operation 'reboot' targeting .* by .* for stonith_admin.*: Timer expired",
- ]
-
- def is_applicable(self):
- if not self.is_applicable_common():
- return False
-
- if "DoFencing" in list(self.Env.keys()):
- return self.Env["DoFencing"]
-
- return True
-
-AllTestClasses.append(StonithdTest)
-
-
-class StartOnebyOne(CTSTest):
- '''Start all the nodes ~ one by one'''
- def __init__(self, cm):
- CTSTest.__init__(self,cm)
- self.name = "StartOnebyOne"
- self.stopall = SimulStopLite(cm)
- self.start = StartTest(cm)
- self.ns = NodeStatus(cm.Env)
-
- def __call__(self, dummy):
- '''Perform the 'StartOnebyOne' test. '''
- self.incr("calls")
-
- # We ignore the "node" parameter...
-
- # Shut down all the nodes...
- ret = self.stopall(None)
- if not ret:
- return self.failure("Test setup failed")
-
- failed = []
- self.set_timer()
- for node in self.Env["nodes"]:
- if not self.start(node):
- failed.append(node)
-
- if len(failed) > 0:
- return self.failure("Some node failed to start: " + repr(failed))
-
- return self.success()
-
-# Register StartOnebyOne as a good test to run
-AllTestClasses.append(StartOnebyOne)
-
-
-class SimulStart(CTSTest):
- '''Start all the nodes ~ simultaneously'''
- def __init__(self, cm):
- CTSTest.__init__(self,cm)
- self.name = "SimulStart"
- self.stopall = SimulStopLite(cm)
- self.startall = SimulStartLite(cm)
-
- def __call__(self, dummy):
- '''Perform the 'SimulStart' test. '''
- self.incr("calls")
-
- # We ignore the "node" parameter...
-
- # Shut down all the nodes...
- ret = self.stopall(None)
- if not ret:
- return self.failure("Setup failed")
-
- if not self.startall(None):
- return self.failure("Startall failed")
-
- return self.success()
-
-# Register SimulStart as a good test to run
-AllTestClasses.append(SimulStart)
-
-
-class SimulStop(CTSTest):
- '''Stop all the nodes ~ simultaneously'''
- def __init__(self, cm):
- CTSTest.__init__(self,cm)
- self.name = "SimulStop"
- self.startall = SimulStartLite(cm)
- self.stopall = SimulStopLite(cm)
-
- def __call__(self, dummy):
- '''Perform the 'SimulStop' test. '''
- self.incr("calls")
-
- # We ignore the "node" parameter...
-
- # Start up all the nodes...
- ret = self.startall(None)
- if not ret:
- return self.failure("Setup failed")
-
- if not self.stopall(None):
- return self.failure("Stopall failed")
-
- return self.success()
-
-# Register SimulStop as a good test to run
-AllTestClasses.append(SimulStop)
-
-
-class StopOnebyOne(CTSTest):
- '''Stop all the nodes in order'''
- def __init__(self, cm):
- CTSTest.__init__(self,cm)
- self.name = "StopOnebyOne"
- self.startall = SimulStartLite(cm)
- self.stop = StopTest(cm)
-
- def __call__(self, dummy):
- '''Perform the 'StopOnebyOne' test. '''
- self.incr("calls")
-
- # We ignore the "node" parameter...
-
- # Start up all the nodes...
- ret = self.startall(None)
- if not ret:
- return self.failure("Setup failed")
-
- failed = []
- self.set_timer()
- for node in self.Env["nodes"]:
- if not self.stop(node):
- failed.append(node)
-
- if len(failed) > 0:
- return self.failure("Some node failed to stop: " + repr(failed))
-
- return self.success()
-
-# Register StopOnebyOne as a good test to run
-AllTestClasses.append(StopOnebyOne)
-
-
-class RestartOnebyOne(CTSTest):
- '''Restart all the nodes in order'''
- def __init__(self, cm):
- CTSTest.__init__(self,cm)
- self.name = "RestartOnebyOne"
- self.startall = SimulStartLite(cm)
-
- def __call__(self, dummy):
- '''Perform the 'RestartOnebyOne' test. '''
- self.incr("calls")
-
- # We ignore the "node" parameter...
-
- # Start up all the nodes...
- ret = self.startall(None)
- if not ret:
- return self.failure("Setup failed")
-
- did_fail = []
- self.set_timer()
- self.restart = RestartTest(self.CM)
- for node in self.Env["nodes"]:
- if not self.restart(node):
- did_fail.append(node)
-
- if did_fail:
- return self.failure("Could not restart %d nodes: %s"
- % (len(did_fail), repr(did_fail)))
- return self.success()
-
-# Register StopOnebyOne as a good test to run
-AllTestClasses.append(RestartOnebyOne)
-
-
-class PartialStart(CTSTest):
- '''Start a node - but tell it to stop before it finishes starting up'''
- def __init__(self, cm):
- CTSTest.__init__(self,cm)
- self.name = "PartialStart"
- self.startall = SimulStartLite(cm)
- self.stopall = SimulStopLite(cm)
- self.stop = StopTest(cm)
- #self.is_unsafe = 1
-
- def __call__(self, node):
- '''Perform the 'PartialStart' test. '''
- self.incr("calls")
-
- ret = self.stopall(None)
- if not ret:
- return self.failure("Setup failed")
-
- watchpats = []
- watchpats.append("pacemaker-controld.*Connecting to .* cluster infrastructure")
- watch = self.create_watch(watchpats, self.Env["DeadTime"]+10)
- watch.set_watch()
-
- self.CM.StartaCMnoBlock(node)
- ret = watch.look_for_all()
- if not ret:
- self.logger.log("Patterns not found: " + repr(watch.unmatched))
- return self.failure("Setup of %s failed" % node)
-
- ret = self.stop(node)
- if not ret:
- return self.failure("%s did not stop in time" % node)
-
- return self.success()
-
- def errorstoignore(self):
- '''Return list of errors which should be ignored'''
-
- # We might do some fencing in the 2-node case if we make it up far enough
- return [
- r"Executing reboot fencing operation",
- r"Requesting fencing \([^)]+\) targeting node ",
- ]
-
-# Register StopOnebyOne as a good test to run
-AllTestClasses.append(PartialStart)
-
-
-class StandbyTest(CTSTest):
- def __init__(self, cm):
- CTSTest.__init__(self,cm)
- self.name = "Standby"
- self.benchmark = 1
-
- self.start = StartTest(cm)
- self.startall = SimulStartLite(cm)
-
- # make sure the node is active
- # set the node to standby mode
- # check resources, none resource should be running on the node
- # set the node to active mode
- # check resouces, resources should have been migrated back (SHOULD THEY?)
-
- def __call__(self, node):
-
- self.incr("calls")
- ret = self.startall(None)
- if not ret:
- return self.failure("Start all nodes failed")
-
- self.debug("Make sure node %s is active" % node)
- if self.CM.StandbyStatus(node) != "off":
- if not self.CM.SetStandbyMode(node, "off"):
- return self.failure("can't set node %s to active mode" % node)
-
- self.CM.cluster_stable()
-
- status = self.CM.StandbyStatus(node)
- if status != "off":
- return self.failure("standby status of %s is [%s] but we expect [off]" % (node, status))
-
- self.debug("Getting resources running on node %s" % node)
- rsc_on_node = self.CM.active_resources(node)
-
- watchpats = []
- watchpats.append(r"State transition .* -> S_POLICY_ENGINE")
- watch = self.create_watch(watchpats, self.Env["DeadTime"]+10)
- watch.set_watch()
-
- self.debug("Setting node %s to standby mode" % node)
- if not self.CM.SetStandbyMode(node, "on"):
- return self.failure("can't set node %s to standby mode" % node)
-
- self.set_timer("on")
-
- ret = watch.look_for_all()
- if not ret:
- self.logger.log("Patterns not found: " + repr(watch.unmatched))
- self.CM.SetStandbyMode(node, "off")
- return self.failure("cluster didn't react to standby change on %s" % node)
-
- self.CM.cluster_stable()
-
- status = self.CM.StandbyStatus(node)
- if status != "on":
- return self.failure("standby status of %s is [%s] but we expect [on]" % (node, status))
- self.log_timer("on")
-
- self.debug("Checking resources")
- bad_run = self.CM.active_resources(node)
- if len(bad_run) > 0:
- rc = self.failure("%s set to standby, %s is still running on it" % (node, repr(bad_run)))
- self.debug("Setting node %s to active mode" % node)
- self.CM.SetStandbyMode(node, "off")
- return rc
-
- self.debug("Setting node %s to active mode" % node)
- if not self.CM.SetStandbyMode(node, "off"):
- return self.failure("can't set node %s to active mode" % node)
-
- self.set_timer("off")
- self.CM.cluster_stable()
-
- status = self.CM.StandbyStatus(node)
- if status != "off":
- return self.failure("standby status of %s is [%s] but we expect [off]" % (node, status))
- self.log_timer("off")
-
- return self.success()
-
-AllTestClasses.append(StandbyTest)
-
-
-class ValgrindTest(CTSTest):
- '''Check for memory leaks'''
- def __init__(self, cm):
- CTSTest.__init__(self,cm)
- self.name = "Valgrind"
- self.stopall = SimulStopLite(cm)
- self.startall = SimulStartLite(cm)
- self.is_valgrind = 1
- self.is_loop = 1
-
- def setup(self, node):
- self.incr("calls")
-
- ret = self.stopall(None)
- if not ret:
- return self.failure("Stop all nodes failed")
-
- # @TODO Edit /etc/sysconfig/pacemaker on all nodes to enable valgrind,
- # and clear any valgrind logs from previous runs. For now, we rely on
- # the user to do this manually.
-
- ret = self.startall(None)
- if not ret:
- return self.failure("Start all nodes failed")
-
- return self.success()
-
- def teardown(self, node):
- # Return all nodes to normal
- # @TODO Edit /etc/sysconfig/pacemaker on all nodes to disable valgrind
- ret = self.stopall(None)
- if not ret:
- return self.failure("Stop all nodes failed")
-
- return self.success()
-
- def find_leaks(self):
- # Check for leaks
- # (no longer used but kept in case feature is restored)
- leaked = []
- self.stop = StopTest(self.CM)
-
- for node in self.Env["nodes"]:
- rc = self.stop(node)
- if not rc:
- self.failure("Couldn't shut down %s" % node)
-
- (rc, _) = self.rsh(node, "grep -e indirectly.*lost:.*[1-9] -e definitely.*lost:.*[1-9] -e (ERROR|error).*SUMMARY:.*[1-9].*errors %s" % self.logger.logPat)
- if rc != 1:
- leaked.append(node)
- self.failure("Valgrind errors detected on %s" % node)
- (_, output) = self.rsh(node, "grep -e lost: -e SUMMARY: %s" % self.logger.logPat, verbose=1)
- for line in output:
- self.logger.log(line)
- (_, output) = self.rsh(node, "cat %s" % self.logger.logPat, verbose=1)
- for line in output:
- self.debug(line)
-
- self.rsh(node, "rm -f %s" % self.logger.logPat, verbose=1)
- return leaked
-
- def __call__(self, node):
- #leaked = self.find_leaks()
- #if len(leaked) > 0:
- # return self.failure("Nodes %s leaked" % repr(leaked))
-
- return self.success()
-
- def errorstoignore(self):
- '''Return list of errors which should be ignored'''
- return [
- r"pacemaker-based.*: \*\*\*\*\*\*\*\*\*\*\*\*\*",
- r"pacemaker-based.*: .* avoid confusing Valgrind",
- r"HA_VALGRIND_ENABLED",
- ]
-
-
-class StandbyLoopTest(ValgrindTest):
- '''Check for memory leaks by putting a node in and out of standby for an hour'''
- # @TODO This is not a useful test for memory leaks
- def __init__(self, cm):
- ValgrindTest.__init__(self,cm)
- self.name = "StandbyLoop"
-
- def __call__(self, node):
-
- lpc = 0
- delay = 2
- failed = 0
- done = time.time() + self.Env["loop-minutes"] * 60
- while time.time() <= done and not failed:
- lpc = lpc + 1
-
- time.sleep(delay)
- if not self.CM.SetStandbyMode(node, "on"):
- self.failure("can't set node %s to standby mode" % node)
- failed = lpc
-
- time.sleep(delay)
- if not self.CM.SetStandbyMode(node, "off"):
- self.failure("can't set node %s to active mode" % node)
- failed = lpc
-
- leaked = self.find_leaks()
- if failed:
- return self.failure("Iteration %d failed" % failed)
- elif len(leaked) > 0:
- return self.failure("Nodes %s leaked" % repr(leaked))
-
- return self.success()
-
-#AllTestClasses.append(StandbyLoopTest)
-
-
-class BandwidthTest(CTSTest):
-# Tests should not be cluster-manager-specific
-# If you need to find out cluster manager configuration to do this, then
-# it should be added to the generic cluster manager API.
- '''Test the bandwidth which the cluster uses'''
- def __init__(self, cm):
- CTSTest.__init__(self, cm)
- self.name = "Bandwidth"
- self.start = StartTest(cm)
- self.__setitem__("min",0)
- self.__setitem__("max",0)
- self.__setitem__("totalbandwidth",0)
- (handle, self.tempfile) = tempfile.mkstemp(".cts")
- os.close(handle)
- self.startall = SimulStartLite(cm)
-
- def __call__(self, node):
- '''Perform the Bandwidth test'''
- self.incr("calls")
-
- if self.CM.upcount() < 1:
- return self.skipped()
-
- Path = self.CM.InternalCommConfig()
- if "ip" not in Path["mediatype"]:
- return self.skipped()
-
- port = Path["port"][0]
- port = int(port)
-
- ret = self.startall(None)
- if not ret:
- return self.failure("Test setup failed")
- time.sleep(5) # We get extra messages right after startup.
-
- fstmpfile = "/var/run/band_estimate"
- dumpcmd = "tcpdump -p -n -c 102 -i any udp port %d > %s 2>&1" \
- % (port, fstmpfile)
-
- (rc, _) = self.rsh(node, dumpcmd)
- if rc == 0:
- farfile = "root@%s:%s" % (node, fstmpfile)
- self.rsh.copy(farfile, self.tempfile)
- Bandwidth = self.countbandwidth(self.tempfile)
- if not Bandwidth:
- self.logger.log("Could not compute bandwidth.")
- return self.success()
- intband = int(Bandwidth + 0.5)
- self.logger.log("...bandwidth: %d bits/sec" % intband)
- self.Stats["totalbandwidth"] = self.Stats["totalbandwidth"] + Bandwidth
- if self.Stats["min"] == 0:
- self.Stats["min"] = Bandwidth
- if Bandwidth > self.Stats["max"]:
- self.Stats["max"] = Bandwidth
- if Bandwidth < self.Stats["min"]:
- self.Stats["min"] = Bandwidth
- self.rsh(node, "rm -f %s" % fstmpfile)
- os.unlink(self.tempfile)
- return self.success()
- else:
- return self.failure("no response from tcpdump command [%d]!" % rc)
-
- def countbandwidth(self, file):
- fp = open(file, "r")
- fp.seek(0)
- count = 0
- sum = 0
- while 1:
- line = fp.readline()
- if not line:
- return None
- if re.search("udp",line) or re.search("UDP,", line):
- count = count + 1
- linesplit = line.split(" ")
- for j in range(len(linesplit)-1):
- if linesplit[j] == "udp": break
- if linesplit[j] == "length:": break
-
- try:
- sum = sum + int(linesplit[j+1])
- except ValueError:
- self.logger.log("Invalid tcpdump line: %s" % line)
- return None
- T1 = linesplit[0]
- timesplit = T1.split(":")
- time2split = timesplit[2].split(".")
- time1 = (int(timesplit[0])*60+int(timesplit[1]))*60+int(time2split[0])+int(time2split[1])*0.000001
- break
-
- while count < 100:
- line = fp.readline()
- if not line:
- return None
- if re.search("udp",line) or re.search("UDP,", line):
- count = count+1
- linessplit = line.split(" ")
- for j in range(len(linessplit)-1):
- if linessplit[j] == "udp": break
- if linessplit[j] == "length:": break
- try:
- sum = int(linessplit[j+1]) + sum
- except ValueError:
- self.logger.log("Invalid tcpdump line: %s" % line)
- return None
-
- T2 = linessplit[0]
- timesplit = T2.split(":")
- time2split = timesplit[2].split(".")
- time2 = (int(timesplit[0])*60+int(timesplit[1]))*60+int(time2split[0])+int(time2split[1])*0.000001
- time = time2-time1
- if (time <= 0):
- return 0
- return int((sum*8)/time)
-
- def is_applicable(self):
- '''BandwidthTest never applicable'''
- return False
-
-AllTestClasses.append(BandwidthTest)
-
-
-###################################################################
-class MaintenanceMode(CTSTest):
-###################################################################
- def __init__(self, cm):
- CTSTest.__init__(self,cm)
- self.name = "MaintenanceMode"
- self.start = StartTest(cm)
- self.startall = SimulStartLite(cm)
- self.max = 30
- #self.is_unsafe = 1
- self.benchmark = 1
- self.action = "asyncmon"
- self.interval = 0
- self.rid = "maintenanceDummy"
-
- def toggleMaintenanceMode(self, node, action):
- pats = []
- pats.append(self.templates["Pat:DC_IDLE"])
-
- # fail the resource right after turning Maintenance mode on
- # verify it is not recovered until maintenance mode is turned off
- if action == "On":
- pats.append(self.templates["Pat:RscOpFail"] % (self.action, self.rid))
- else:
- pats.append(self.templates["Pat:RscOpOK"] % ("stop", self.rid))
- pats.append(self.templates["Pat:RscOpOK"] % ("start", self.rid))
-
- watch = self.create_watch(pats, 60)
- watch.set_watch()
-
- self.debug("Turning maintenance mode %s" % action)
- self.rsh(node, self.templates["MaintenanceMode%s" % (action)])
- if (action == "On"):
- self.rsh(node, "crm_resource -V -F -r %s -H %s &>/dev/null" % (self.rid, node))
-
- self.set_timer("recover%s" % (action))
- watch.look_for_all()
- self.log_timer("recover%s" % (action))
- if watch.unmatched:
- self.debug("Failed to find patterns when turning maintenance mode %s" % action)
- return repr(watch.unmatched)
-
- return ""
-
- def insertMaintenanceDummy(self, node):
- pats = []
- pats.append(("%s.*" % node) + (self.templates["Pat:RscOpOK"] % ("start", self.rid)))
-
- watch = self.create_watch(pats, 60)
- watch.set_watch()
-
- self.CM.AddDummyRsc(node, self.rid)
-
- self.set_timer("addDummy")
- watch.look_for_all()
- self.log_timer("addDummy")
-
- if watch.unmatched:
- self.debug("Failed to find patterns when adding maintenance dummy resource")
- return repr(watch.unmatched)
- return ""
-
- def removeMaintenanceDummy(self, node):
- pats = []
- pats.append(self.templates["Pat:RscOpOK"] % ("stop", self.rid))
-
- watch = self.create_watch(pats, 60)
- watch.set_watch()
- self.CM.RemoveDummyRsc(node, self.rid)
-
- self.set_timer("removeDummy")
- watch.look_for_all()
- self.log_timer("removeDummy")
-
- if watch.unmatched:
- self.debug("Failed to find patterns when removing maintenance dummy resource")
- return repr(watch.unmatched)
- return ""
-
- def managedRscList(self, node):
- rscList = []
- (_, lines) = self.rsh(node, "crm_resource -c", verbose=1)
- for line in lines:
- if re.search("^Resource", line):
- tmp = AuditResource(self.CM, line)
- if tmp.managed():
- rscList.append(tmp.id)
-
- return rscList
-
- def verifyResources(self, node, rscList, managed):
- managedList = list(rscList)
- managed_str = "managed"
- if not managed:
- managed_str = "unmanaged"
-
- (_, lines) = self.rsh(node, "crm_resource -c", verbose=1)
- for line in lines:
- if re.search("^Resource", line):
- tmp = AuditResource(self.CM, line)
- if managed and not tmp.managed():
- continue
- elif not managed and tmp.managed():
- continue
- elif managedList.count(tmp.id):
- managedList.remove(tmp.id)
-
- if len(managedList) == 0:
- self.debug("Found all %s resources on %s" % (managed_str, node))
- return True
-
- self.logger.log("Could not find all %s resources on %s. %s" % (managed_str, node, managedList))
- return False
-
- def __call__(self, node):
- '''Perform the 'MaintenanceMode' test. '''
- self.incr("calls")
- verify_managed = False
- verify_unmanaged = False
- failPat = ""
-
- ret = self.startall(None)
- if not ret:
- return self.failure("Setup failed")
-
- # get a list of all the managed resources. We use this list
- # after enabling maintenance mode to verify all managed resources
- # become un-managed. After maintenance mode is turned off, we use
- # this list to verify all the resources become managed again.
- managedResources = self.managedRscList(node)
- if len(managedResources) == 0:
- self.logger.log("No managed resources on %s" % node)
- return self.skipped()
-
- # insert a fake resource we can fail during maintenance mode
- # so we can verify recovery does not take place until after maintenance
- # mode is disabled.
- failPat = failPat + self.insertMaintenanceDummy(node)
-
- # toggle maintenance mode ON, then fail dummy resource.
- failPat = failPat + self.toggleMaintenanceMode(node, "On")
-
- # verify all the resources are now unmanaged
- if self.verifyResources(node, managedResources, False):
- verify_unmanaged = True
-
- # Toggle maintenance mode OFF, verify dummy is recovered.
- failPat = failPat + self.toggleMaintenanceMode(node, "Off")
-
- # verify all the resources are now managed again
- if self.verifyResources(node, managedResources, True):
- verify_managed = True
-
- # Remove our maintenance dummy resource.
- failPat = failPat + self.removeMaintenanceDummy(node)
-
- self.CM.cluster_stable()
-
- if failPat != "":
- return self.failure("Unmatched patterns: %s" % (failPat))
- elif verify_unmanaged is False:
- return self.failure("Failed to verify resources became unmanaged during maintenance mode")
- elif verify_managed is False:
- return self.failure("Failed to verify resources switched back to managed after disabling maintenance mode")
-
- return self.success()
-
- def errorstoignore(self):
- '''Return list of errors which should be ignored'''
- return [
- r"Updating failcount for %s" % self.rid,
- r"schedulerd.*: Recover\s+%s\s+\(.*\)" % self.rid,
- r"Unknown operation: fail",
- self.templates["Pat:RscOpOK"] % (self.action, self.rid),
- r"(ERROR|error).*: Action %s_%s_%d .* initiated outside of a transition" % (self.rid, self.action, self.interval),
- ]
-
-AllTestClasses.append(MaintenanceMode)
-
-
-class ResourceRecover(CTSTest):
- def __init__(self, cm):
- CTSTest.__init__(self,cm)
- self.name = "ResourceRecover"
- self.start = StartTest(cm)
- self.startall = SimulStartLite(cm)
- self.max = 30
- self.rid = None
- self.rid_alt = None
- #self.is_unsafe = 1
- self.benchmark = 1
-
- # these are the values used for the new LRM API call
- self.action = "asyncmon"
- self.interval = 0
-
- def __call__(self, node):
- '''Perform the 'ResourceRecover' test. '''
- self.incr("calls")
-
- ret = self.startall(None)
- if not ret:
- return self.failure("Setup failed")
-
- # List all resources active on the node (skip test if none)
- resourcelist = self.CM.active_resources(node)
- if len(resourcelist) == 0:
- self.logger.log("No active resources on %s" % node)
- return self.skipped()
-
- # Choose one resource at random
- rsc = self.choose_resource(node, resourcelist)
- if rsc is None:
- return self.failure("Could not get details of resource '%s'" % self.rid)
- if rsc.id == rsc.clone_id:
- self.debug("Failing " + rsc.id)
- else:
- self.debug("Failing " + rsc.id + " (also known as " + rsc.clone_id + ")")
-
- # Log patterns to watch for (failure, plus restart if managed)
- pats = []
- pats.append(self.templates["Pat:CloneOpFail"] % (self.action, rsc.id, rsc.clone_id))
- if rsc.managed():
- pats.append(self.templates["Pat:RscOpOK"] % ("stop", self.rid))
- if rsc.unique():
- pats.append(self.templates["Pat:RscOpOK"] % ("start", self.rid))
- else:
- # Anonymous clones may get restarted with a different clone number
- pats.append(self.templates["Pat:RscOpOK"] % ("start", ".*"))
-
- # Fail resource. (Ideally, we'd fail it twice, to ensure the fail count
- # is incrementing properly, but it might restart on a different node.
- # We'd have to temporarily ban it from all other nodes and ensure the
- # migration-threshold hasn't been reached.)
- if self.fail_resource(rsc, node, pats) is None:
- return None # self.failure() already called
-
- return self.success()
-
- def choose_resource(self, node, resourcelist):
- """ Choose a random resource to target """
-
- self.rid = self.Env.random_gen.choice(resourcelist)
- self.rid_alt = self.rid
- (_, lines) = self.rsh(node, "crm_resource -c", verbose=1)
- for line in lines:
- if line.startswith("Resource: "):
- rsc = AuditResource(self.CM, line)
- if rsc.id == self.rid:
- # Handle anonymous clones that get renamed
- self.rid = rsc.clone_id
- return rsc
- return None
-
- def get_failcount(self, node):
- """ Check the fail count of targeted resource on given node """
-
- (rc, lines) = self.rsh(node,
- "crm_failcount --quiet --query --resource %s "
- "--operation %s --interval %d "
- "--node %s" % (self.rid, self.action,
- self.interval, node), verbose=1)
- if rc != 0 or len(lines) != 1:
- self.logger.log("crm_failcount on %s failed (%d): %s" % (node, rc,
- " // ".join(map(str.strip, lines))))
- return -1
- try:
- failcount = int(lines[0])
- except (IndexError, ValueError):
- self.logger.log("crm_failcount output on %s unparseable: %s" % (node,
- ' '.join(lines)))
- return -1
- return failcount
-
- def fail_resource(self, rsc, node, pats):
- """ Fail the targeted resource, and verify as expected """
-
- orig_failcount = self.get_failcount(node)
-
- watch = self.create_watch(pats, 60)
- watch.set_watch()
-
- self.rsh(node, "crm_resource -V -F -r %s -H %s &>/dev/null" % (self.rid, node))
-
- self.set_timer("recover")
- watch.look_for_all()
- self.log_timer("recover")
-
- self.CM.cluster_stable()
- recovered = self.CM.ResourceLocation(self.rid)
-
- if watch.unmatched:
- return self.failure("Patterns not found: %s" % repr(watch.unmatched))
-
- elif rsc.unique() and len(recovered) > 1:
- return self.failure("%s is now active on more than one node: %s"%(self.rid, repr(recovered)))
-
- elif len(recovered) > 0:
- self.debug("%s is running on: %s" % (self.rid, repr(recovered)))
-
- elif rsc.managed():
- return self.failure("%s was not recovered and is inactive" % self.rid)
-
- new_failcount = self.get_failcount(node)
- if new_failcount != (orig_failcount + 1):
- return self.failure("%s fail count is %d not %d" % (self.rid,
- new_failcount, orig_failcount + 1))
-
- return 0 # Anything but None is success
-
- def errorstoignore(self):
- '''Return list of errors which should be ignored'''
- return [
- r"Updating failcount for %s" % self.rid,
- r"schedulerd.*: Recover\s+(%s|%s)\s+\(.*\)" % (self.rid, self.rid_alt),
- r"Unknown operation: fail",
- self.templates["Pat:RscOpOK"] % (self.action, self.rid),
- r"(ERROR|error).*: Action %s_%s_%d .* initiated outside of a transition" % (self.rid, self.action, self.interval),
- ]
-
-AllTestClasses.append(ResourceRecover)
-
-
-class ComponentFail(CTSTest):
- def __init__(self, cm):
- CTSTest.__init__(self,cm)
- self.name = "ComponentFail"
- self.startall = SimulStartLite(cm)
- self.complist = cm.Components()
- self.patterns = []
- self.okerrpatterns = []
- self.is_unsafe = 1
-
- def __call__(self, node):
- '''Perform the 'ComponentFail' test. '''
- self.incr("calls")
- self.patterns = []
- self.okerrpatterns = []
-
- # start all nodes
- ret = self.startall(None)
- if not ret:
- return self.failure("Setup failed")
-
- if not self.CM.cluster_stable(self.Env["StableTime"]):
- return self.failure("Setup failed - unstable")
-
- node_is_dc = self.CM.is_node_dc(node, None)
-
- # select a component to kill
- chosen = self.Env.random_gen.choice(self.complist)
- while chosen.dc_only and node_is_dc == 0:
- chosen = self.Env.random_gen.choice(self.complist)
-
- self.debug("...component %s (dc=%d)" % (chosen.name, node_is_dc))
- self.incr(chosen.name)
-
- if chosen.name != "corosync":
- self.patterns.append(self.templates["Pat:ChildKilled"] %(node, chosen.name))
- self.patterns.append(self.templates["Pat:ChildRespawn"] %(node, chosen.name))
-
- self.patterns.extend(chosen.pats)
- if node_is_dc:
- self.patterns.extend(chosen.dc_pats)
-
- # @TODO this should be a flag in the Component
- if chosen.name in [ "corosync", "pacemaker-based", "pacemaker-fenced" ]:
- # Ignore actions for fence devices if fencer will respawn
- # (their registration will be lost, and probes will fail)
- self.okerrpatterns = [ self.templates["Pat:Fencing_active"] ]
- (_, lines) = self.rsh(node, "crm_resource -c", verbose=1)
- for line in lines:
- if re.search("^Resource", line):
- r = AuditResource(self.CM, line)
- if r.rclass == "stonith":
- self.okerrpatterns.append(self.templates["Pat:Fencing_recover"] % r.id)
- self.okerrpatterns.append(self.templates["Pat:Fencing_probe"] % r.id)
-
- # supply a copy so self.patterns doesn't end up empty
- tmpPats = []
- tmpPats.extend(self.patterns)
- self.patterns.extend(chosen.badnews_ignore)
-
- # Look for STONITH ops, depending on Env["at-boot"] we might need to change the nodes status
- stonithPats = []
- stonithPats.append(self.templates["Pat:Fencing_ok"] % node)
- stonith = self.create_watch(stonithPats, 0)
- stonith.set_watch()
-
- # set the watch for stable
- watch = self.create_watch(
- tmpPats, self.Env["DeadTime"] + self.Env["StableTime"] + self.Env["StartTime"])
- watch.set_watch()
-
- # kill the component
- chosen.kill(node)
-
- self.debug("Waiting for the cluster to recover")
- self.CM.cluster_stable()
-
- self.debug("Waiting for any fenced node to come back up")
- self.CM.ns.wait_for_all_nodes(self.Env["nodes"], 600)
-
- self.debug("Waiting for the cluster to re-stabilize with all nodes")
- self.CM.cluster_stable(self.Env["StartTime"])
-
- self.debug("Checking if %s was shot" % node)
- shot = stonith.look(60)
- if shot:
- self.debug("Found: " + repr(shot))
- self.okerrpatterns.append(self.templates["Pat:Fencing_start"] % node)
-
- if not self.Env["at-boot"]:
- self.CM.ShouldBeStatus[node] = "down"
-
- # If fencing occurred, chances are many (if not all) the expected logs
- # will not be sent - or will be lost when the node reboots
- return self.success()
-
- # check for logs indicating a graceful recovery
- matched = watch.look_for_all(allow_multiple_matches=True)
- if watch.unmatched:
- self.logger.log("Patterns not found: " + repr(watch.unmatched))
-
- self.debug("Waiting for the cluster to re-stabilize with all nodes")
- is_stable = self.CM.cluster_stable(self.Env["StartTime"])
-
- if not matched:
- return self.failure("Didn't find all expected %s patterns" % chosen.name)
- elif not is_stable:
- return self.failure("Cluster did not become stable after killing %s" % chosen.name)
-
- return self.success()
-
- def errorstoignore(self):
- '''Return list of errors which should be ignored'''
- # Note that okerrpatterns refers to the last time we ran this test
- # The good news is that this works fine for us...
- self.okerrpatterns.extend(self.patterns)
- return self.okerrpatterns
-
-AllTestClasses.append(ComponentFail)
-
-
-class SplitBrainTest(CTSTest):
- '''It is used to test split-brain. when the path between the two nodes break
- check the two nodes both take over the resource'''
- def __init__(self,cm):
- CTSTest.__init__(self,cm)
- self.name = "SplitBrain"
- self.start = StartTest(cm)
- self.startall = SimulStartLite(cm)
- self.is_experimental = 1
-
- def isolate_partition(self, partition):
- other_nodes = []
- other_nodes.extend(self.Env["nodes"])
-
- for node in partition:
- try:
- other_nodes.remove(node)
- except ValueError:
- self.logger.log("Node "+node+" not in " + repr(self.Env["nodes"]) + " from " +repr(partition))
-
- if len(other_nodes) == 0:
- return 1
-
- self.debug("Creating partition: " + repr(partition))
- self.debug("Everyone else: " + repr(other_nodes))
-
- for node in partition:
- if not self.CM.isolate_node(node, other_nodes):
- self.logger.log("Could not isolate %s" % node)
- return 0
-
- return 1
-
- def heal_partition(self, partition):
- other_nodes = []
- other_nodes.extend(self.Env["nodes"])
-
- for node in partition:
- try:
- other_nodes.remove(node)
- except ValueError:
- self.logger.log("Node "+node+" not in " + repr(self.Env["nodes"]))
-
- if len(other_nodes) == 0:
- return 1
-
- self.debug("Healing partition: " + repr(partition))
- self.debug("Everyone else: " + repr(other_nodes))
-
- for node in partition:
- self.CM.unisolate_node(node, other_nodes)
-
- def __call__(self, node):
- '''Perform split-brain test'''
- self.incr("calls")
- self.passed = 1
- partitions = {}
-
- ret = self.startall(None)
- if not ret:
- return self.failure("Setup failed")
-
- while 1:
- # Retry until we get multiple partitions
- partitions = {}
- p_max = len(self.Env["nodes"])
- for node in self.Env["nodes"]:
- p = self.Env.random_gen.randint(1, p_max)
- if not p in partitions:
- partitions[p] = []
- partitions[p].append(node)
- p_max = len(list(partitions.keys()))
- if p_max > 1:
- break
- # else, try again
-
- self.debug("Created %d partitions" % p_max)
- for key in list(partitions.keys()):
- self.debug("Partition["+str(key)+"]:\t"+repr(partitions[key]))
-
- # Disabling STONITH to reduce test complexity for now
- self.rsh(node, "crm_attribute -V -n stonith-enabled -v false")
-
- for key in list(partitions.keys()):
- self.isolate_partition(partitions[key])
-
- count = 30
- while count > 0:
- if len(self.CM.find_partitions()) != p_max:
- time.sleep(10)
- else:
- break
- else:
- self.failure("Expected partitions were not created")
-
- # Target number of partitions formed - wait for stability
- if not self.CM.cluster_stable():
- self.failure("Partitioned cluster not stable")
-
- # Now audit the cluster state
- self.CM.partitions_expected = p_max
- if not self.audit():
- self.failure("Audits failed")
- self.CM.partitions_expected = 1
-
- # And heal them again
- for key in list(partitions.keys()):
- self.heal_partition(partitions[key])
-
- # Wait for a single partition to form
- count = 30
- while count > 0:
- if len(self.CM.find_partitions()) != 1:
- time.sleep(10)
- count -= 1
- else:
- break
- else:
- self.failure("Cluster did not reform")
-
- # Wait for it to have the right number of members
- count = 30
- while count > 0:
- members = []
-
- partitions = self.CM.find_partitions()
- if len(partitions) > 0:
- members = partitions[0].split()
-
- if len(members) != len(self.Env["nodes"]):
- time.sleep(10)
- count -= 1
- else:
- break
- else:
- self.failure("Cluster did not completely reform")
-
- # Wait up to 20 minutes - the delay is more preferable than
- # trying to continue with in a messed up state
- if not self.CM.cluster_stable(1200):
- self.failure("Reformed cluster not stable")
- if self.Env["continue"]:
- answer = "Y"
- else:
- try:
- answer = input('Continue? [nY]')
- except EOFError as e:
- answer = "n"
- if answer and answer == "n":
- raise ValueError("Reformed cluster not stable")
-
- # Turn fencing back on
- if self.Env["DoFencing"]:
- self.rsh(node, "crm_attribute -V -D -n stonith-enabled")
-
- self.CM.cluster_stable()
-
- if self.passed:
- return self.success()
- return self.failure("See previous errors")
-
- def errorstoignore(self):
- '''Return list of errors which are 'normal' and should be ignored'''
- return [
- r"Another DC detected:",
- r"(ERROR|error).*: .*Application of an update diff failed",
- r"pacemaker-controld.*:.*not in our membership list",
- r"CRIT:.*node.*returning after partition",
- ]
-
- def is_applicable(self):
- if not self.is_applicable_common():
- return False
- return len(self.Env["nodes"]) > 2
-
-AllTestClasses.append(SplitBrainTest)
-
-
-class Reattach(CTSTest):
- def __init__(self, cm):
- CTSTest.__init__(self,cm)
- self.name = "Reattach"
- self.startall = SimulStartLite(cm)
- self.restart1 = RestartTest(cm)
- self.stopall = SimulStopLite(cm)
- self.is_unsafe = 0 # Handled by canrunnow()
-
- def _is_managed(self, node):
- (_, is_managed) = self.rsh(node, "crm_attribute -t rsc_defaults -n is-managed -q -G -d true", verbose=1)
- is_managed = is_managed[0].strip()
- return is_managed == "true"
-
- def _set_unmanaged(self, node):
- self.debug("Disable resource management")
- self.rsh(node, "crm_attribute -t rsc_defaults -n is-managed -v false")
-
- def _set_managed(self, node):
- self.debug("Re-enable resource management")
- self.rsh(node, "crm_attribute -t rsc_defaults -n is-managed -D")
-
- def setup(self, node):
- attempt = 0
- if not self.startall(None):
- return None
-
- # Make sure we are really _really_ stable and that all
- # resources, including those that depend on transient node
- # attributes, are started
- while not self.CM.cluster_stable(double_check=True):
- if attempt < 5:
- attempt += 1
- self.debug("Not stable yet, re-testing")
- else:
- self.logger.log("Cluster is not stable")
- return None
-
- return 1
-
- def teardown(self, node):
-
- # Make sure 'node' is up
- start = StartTest(self.CM)
- start(node)
-
- if not self._is_managed(node):
- self.logger.log("Attempting to re-enable resource management on %s" % node)
- self._set_managed(node)
- self.CM.cluster_stable()
- if not self._is_managed(node):
- self.logger.log("Could not re-enable resource management")
- return 0
-
- return 1
-
- def canrunnow(self, node):
- '''Return TRUE if we can meaningfully run right now'''
- if self.find_ocfs2_resources(node):
- self.logger.log("Detach/Reattach scenarios are not possible with OCFS2 services present")
- return 0
- return 1
-
- def __call__(self, node):
- self.incr("calls")
-
- pats = []
- # Conveniently, the scheduler will display this message when disabling
- # management, even if fencing is not enabled, so we can rely on it.
- managed = self.create_watch(["No fencing will be done"], 60)
- managed.set_watch()
-
- self._set_unmanaged(node)
-
- if not managed.look_for_all():
- self.logger.log("Patterns not found: " + repr(managed.unmatched))
- return self.failure("Resource management not disabled")
-
- pats = []
- pats.append(self.templates["Pat:RscOpOK"] % ("start", ".*"))
- pats.append(self.templates["Pat:RscOpOK"] % ("stop", ".*"))
- pats.append(self.templates["Pat:RscOpOK"] % ("promote", ".*"))
- pats.append(self.templates["Pat:RscOpOK"] % ("demote", ".*"))
- pats.append(self.templates["Pat:RscOpOK"] % ("migrate", ".*"))
-
- watch = self.create_watch(pats, 60, "ShutdownActivity")
- watch.set_watch()
-
- self.debug("Shutting down the cluster")
- ret = self.stopall(None)
- if not ret:
- self._set_managed(node)
- return self.failure("Couldn't shut down the cluster")
-
- self.debug("Bringing the cluster back up")
- ret = self.startall(None)
- time.sleep(5) # allow ping to update the CIB
- if not ret:
- self._set_managed(node)
- return self.failure("Couldn't restart the cluster")
-
- if self.local_badnews("ResourceActivity:", watch):
- self._set_managed(node)
- return self.failure("Resources stopped or started during cluster restart")
-
- watch = self.create_watch(pats, 60, "StartupActivity")
- watch.set_watch()
-
- # Re-enable resource management (and verify it happened).
- self._set_managed(node)
- self.CM.cluster_stable()
- if not self._is_managed(node):
- return self.failure("Could not re-enable resource management")
-
- # Ignore actions for STONITH resources
- ignore = []
- (_, lines) = self.rsh(node, "crm_resource -c", verbose=1)
- for line in lines:
- if re.search("^Resource", line):
- r = AuditResource(self.CM, line)
- if r.rclass == "stonith":
-
- self.debug("Ignoring start actions for %s" % r.id)
- ignore.append(self.templates["Pat:RscOpOK"] % ("start", r.id))
-
- if self.local_badnews("ResourceActivity:", watch, ignore):
- return self.failure("Resources stopped or started after resource management was re-enabled")
-
- return ret
-
- def errorstoignore(self):
- '''Return list of errors which should be ignored'''
- return [
- r"resource( was|s were) active at shutdown",
- ]
-
- def is_applicable(self):
- return True
-
-AllTestClasses.append(Reattach)
-
-
-class SpecialTest1(CTSTest):
- '''Set up a custom test to cause quorum failure issues for Andrew'''
- def __init__(self, cm):
- CTSTest.__init__(self,cm)
- self.name = "SpecialTest1"
- self.startall = SimulStartLite(cm)
- self.restart1 = RestartTest(cm)
- self.stopall = SimulStopLite(cm)
-
- def __call__(self, node):
- '''Perform the 'SpecialTest1' test for Andrew. '''
- self.incr("calls")
-
- # Shut down all the nodes...
- ret = self.stopall(None)
- if not ret:
- return self.failure("Could not stop all nodes")
-
- # Test config recovery when the other nodes come up
- self.rsh(node, "rm -f " + BuildOptions.CIB_DIR + "/cib*")
-
- # Start the selected node
- ret = self.restart1(node)
- if not ret:
- return self.failure("Could not start "+node)
-
- # Start all remaining nodes
- ret = self.startall(None)
- if not ret:
- return self.failure("Could not start the remaining nodes")
-
- return self.success()
-
- def errorstoignore(self):
- '''Return list of errors which should be ignored'''
- # Errors that occur as a result of the CIB being wiped
- return [
- r"error.*: v1 patchset error, patch failed to apply: Application of an update diff failed",
- r"error.*: Resource start-up disabled since no STONITH resources have been defined",
- r"error.*: Either configure some or disable STONITH with the stonith-enabled option",
- r"error.*: NOTE: Clusters with shared data need STONITH to ensure data integrity",
- ]
-
-AllTestClasses.append(SpecialTest1)
-
-
-class HAETest(CTSTest):
- '''Set up a custom test to cause quorum failure issues for Andrew'''
- def __init__(self, cm):
- CTSTest.__init__(self,cm)
- self.name = "HAETest"
- self.stopall = SimulStopLite(cm)
- self.startall = SimulStartLite(cm)
- self.is_loop = 1
-
- def setup(self, node):
- # Start all remaining nodes
- ret = self.startall(None)
- if not ret:
- return self.failure("Couldn't start all nodes")
- return self.success()
-
- def teardown(self, node):
- # Stop everything
- ret = self.stopall(None)
- if not ret:
- return self.failure("Couldn't stop all nodes")
- return self.success()
-
- def wait_on_state(self, node, resource, expected_clones, attempts=240):
- while attempts > 0:
- active = 0
- (rc, lines) = self.rsh(node, "crm_resource -r %s -W -Q" % resource, verbose=1)
-
- # Hack until crm_resource does the right thing
- if rc == 0 and lines:
- active = len(lines)
-
- if len(lines) == expected_clones:
- return 1
-
- elif rc == 1:
- self.debug("Resource %s is still inactive" % resource)
-
- elif rc == 234:
- self.logger.log("Unknown resource %s" % resource)
- return 0
-
- elif rc == 246:
- self.logger.log("Cluster is inactive")
- return 0
-
- elif rc != 0:
- self.logger.log("Call to crm_resource failed, rc=%d" % rc)
- return 0
-
- else:
- self.debug("Resource %s is active on %d times instead of %d" % (resource, active, expected_clones))
-
- attempts -= 1
- time.sleep(1)
-
- return 0
-
- def find_dlm(self, node):
- self.r_dlm = None
-
- (_, lines) = self.rsh(node, "crm_resource -c", verbose=1)
- for line in lines:
- if re.search("^Resource", line):
- r = AuditResource(self.CM, line)
- if r.rtype == "controld" and r.parent != "NA":
- self.debug("Found dlm: %s" % self.r_dlm)
- self.r_dlm = r.parent
- return 1
- return 0
-
- def find_hae_resources(self, node):
- self.r_dlm = None
- self.r_o2cb = None
- self.r_ocfs2 = []
-
- if self.find_dlm(node):
- self.find_ocfs2_resources(node)
-
- def is_applicable(self):
- if not self.is_applicable_common():
- return False
- if self.Env["Schema"] == "hae":
- return True
- return None
-
-
-class HAERoleTest(HAETest):
- def __init__(self, cm):
- '''Lars' mount/unmount test for the HA extension. '''
- HAETest.__init__(self,cm)
- self.name = "HAERoleTest"
-
- def change_state(self, node, resource, target):
- (rc, _) = self.rsh(node, "crm_resource -V -r %s -p target-role -v %s --meta" % (resource, target))
- return rc
-
- def __call__(self, node):
- self.incr("calls")
- lpc = 0
- failed = 0
- delay = 2
- done = time.time() + self.Env["loop-minutes"]*60
- self.find_hae_resources(node)
-
- clone_max = len(self.Env["nodes"])
- while time.time() <= done and not failed:
- lpc = lpc + 1
-
- self.change_state(node, self.r_dlm, "Stopped")
- if not self.wait_on_state(node, self.r_dlm, 0):
- self.failure("%s did not go down correctly" % self.r_dlm)
- failed = lpc
-
- self.change_state(node, self.r_dlm, "Started")
- if not self.wait_on_state(node, self.r_dlm, clone_max):
- self.failure("%s did not come up correctly" % self.r_dlm)
- failed = lpc
-
- if not self.wait_on_state(node, self.r_o2cb, clone_max):
- self.failure("%s did not come up correctly" % self.r_o2cb)
- failed = lpc
-
- for fs in self.r_ocfs2:
- if not self.wait_on_state(node, fs, clone_max):
- self.failure("%s did not come up correctly" % fs)
- failed = lpc
-
- if failed:
- return self.failure("iteration %d failed" % failed)
- return self.success()
-
-AllTestClasses.append(HAERoleTest)
-
-
-class HAEStandbyTest(HAETest):
- '''Set up a custom test to cause quorum failure issues for Andrew'''
- def __init__(self, cm):
- HAETest.__init__(self,cm)
- self.name = "HAEStandbyTest"
-
- def change_state(self, node, resource, target):
- (rc, _) = self.rsh(node, "crm_standby -V -l reboot -v %s" % (target))
- return rc
-
- def __call__(self, node):
- self.incr("calls")
-
- lpc = 0
- failed = 0
- done = time.time() + self.Env["loop-minutes"]*60
- self.find_hae_resources(node)
-
- clone_max = len(self.Env["nodes"])
- while time.time() <= done and not failed:
- lpc = lpc + 1
-
- self.change_state(node, self.r_dlm, "true")
- if not self.wait_on_state(node, self.r_dlm, clone_max-1):
- self.failure("%s did not go down correctly" % self.r_dlm)
- failed = lpc
-
- self.change_state(node, self.r_dlm, "false")
- if not self.wait_on_state(node, self.r_dlm, clone_max):
- self.failure("%s did not come up correctly" % self.r_dlm)
- failed = lpc
-
- if not self.wait_on_state(node, self.r_o2cb, clone_max):
- self.failure("%s did not come up correctly" % self.r_o2cb)
- failed = lpc
-
- for fs in self.r_ocfs2:
- if not self.wait_on_state(node, fs, clone_max):
- self.failure("%s did not come up correctly" % fs)
- failed = lpc
-
- if failed:
- return self.failure("iteration %d failed" % failed)
- return self.success()
-
-AllTestClasses.append(HAEStandbyTest)
-
-
-class NearQuorumPointTest(CTSTest):
- '''
- This test brings larger clusters near the quorum point (50%).
- In addition, it will test doing starts and stops at the same time.
-
- Here is how I think it should work:
- - loop over the nodes and decide randomly which will be up and which
- will be down Use a 50% probability for each of up/down.
- - figure out what to do to get into that state from the current state
- - in parallel, bring up those going up and bring those going down.
- '''
-
- def __init__(self, cm):
- CTSTest.__init__(self,cm)
- self.name = "NearQuorumPoint"
-
- def __call__(self, dummy):
- '''Perform the 'NearQuorumPoint' test. '''
- self.incr("calls")
- startset = []
- stopset = []
-
- stonith = self.CM.prepare_fencing_watcher("NearQuorumPoint")
- #decide what to do with each node
- for node in self.Env["nodes"]:
- action = self.Env.random_gen.choice(["start","stop"])
- #action = self.Env.random_gen.choice(["start","stop","no change"])
- if action == "start" :
- startset.append(node)
- elif action == "stop" :
- stopset.append(node)
-
- self.debug("start nodes:" + repr(startset))
- self.debug("stop nodes:" + repr(stopset))
-
- #add search patterns
- watchpats = [ ]
- for node in stopset:
- if self.CM.ShouldBeStatus[node] == "up":
- watchpats.append(self.templates["Pat:We_stopped"] % node)
-
- for node in startset:
- if self.CM.ShouldBeStatus[node] == "down":
- #watchpats.append(self.templates["Pat:NonDC_started"] % node)
- watchpats.append(self.templates["Pat:Local_started"] % node)
- else:
- for stopping in stopset:
- if self.CM.ShouldBeStatus[stopping] == "up":
- watchpats.append(self.templates["Pat:They_stopped"] % (node, self.CM.key_for_node(stopping)))
-
- if len(watchpats) == 0:
- return self.skipped()
-
- if len(startset) != 0:
- watchpats.append(self.templates["Pat:DC_IDLE"])
-
- watch = self.create_watch(watchpats, self.Env["DeadTime"]+10)
-
- watch.set_watch()
-
- #begin actions
- for node in stopset:
- if self.CM.ShouldBeStatus[node] == "up":
- self.CM.StopaCMnoBlock(node)
-
- for node in startset:
- if self.CM.ShouldBeStatus[node] == "down":
- self.CM.StartaCMnoBlock(node)
-
- #get the result
- if watch.look_for_all():
- self.CM.cluster_stable()
- self.CM.fencing_cleanup("NearQuorumPoint", stonith)
- return self.success()
-
- self.logger.log("Warn: Patterns not found: " + repr(watch.unmatched))
-
- #get the "bad" nodes
- upnodes = []
- for node in stopset:
- if self.CM.StataCM(node) == 1:
- upnodes.append(node)
-
- downnodes = []
- for node in startset:
- if self.CM.StataCM(node) == 0:
- downnodes.append(node)
-
- self.CM.fencing_cleanup("NearQuorumPoint", stonith)
- if upnodes == [] and downnodes == []:
- self.CM.cluster_stable()
-
- # Make sure they're completely down with no residule
- for node in stopset:
- self.rsh(node, self.templates["StopCmd"])
-
- return self.success()
-
- if len(upnodes) > 0:
- self.logger.log("Warn: Unstoppable nodes: " + repr(upnodes))
-
- if len(downnodes) > 0:
- self.logger.log("Warn: Unstartable nodes: " + repr(downnodes))
-
- return self.failure()
-
- def is_applicable(self):
- return True
-
-AllTestClasses.append(NearQuorumPointTest)
-
-
-class RollingUpgradeTest(CTSTest):
- '''Perform a rolling upgrade of the cluster'''
- def __init__(self, cm):
- CTSTest.__init__(self,cm)
- self.name = "RollingUpgrade"
- self.start = StartTest(cm)
- self.stop = StopTest(cm)
- self.stopall = SimulStopLite(cm)
- self.startall = SimulStartLite(cm)
-
- def setup(self, node):
- # Start all remaining nodes
- ret = self.stopall(None)
- if not ret:
- return self.failure("Couldn't stop all nodes")
-
- for node in self.Env["nodes"]:
- if not self.downgrade(node, None):
- return self.failure("Couldn't downgrade %s" % node)
-
- ret = self.startall(None)
- if not ret:
- return self.failure("Couldn't start all nodes")
- return self.success()
-
- def teardown(self, node):
- # Stop everything
- ret = self.stopall(None)
- if not ret:
- return self.failure("Couldn't stop all nodes")
-
- for node in self.Env["nodes"]:
- if not self.upgrade(node, None):
- return self.failure("Couldn't upgrade %s" % node)
-
- return self.success()
-
- def install(self, node, version, start=1, flags="--force"):
-
- target_dir = "/tmp/rpm-%s" % version
- src_dir = "%s/%s" % (self.Env["rpm-dir"], version)
-
- self.logger.log("Installing %s on %s with %s" % (version, node, flags))
- if not self.stop(node):
- return self.failure("stop failure: "+node)
-
- self.rsh(node, "mkdir -p %s" % target_dir)
- self.rsh(node, "rm -f %s/*.rpm" % target_dir)
- (_, lines) = self.rsh(node, "ls -1 %s/*.rpm" % src_dir, verbose=1)
- for line in lines:
- line = line[:-1]
- rc = self.rsh.copy("%s" % (line), "%s:%s/" % (node, target_dir))
- self.rsh(node, "rpm -Uvh %s %s/*.rpm" % (flags, target_dir))
-
- if start and not self.start(node):
- return self.failure("start failure: "+node)
-
- return self.success()
-
- def upgrade(self, node, start=1):
- return self.install(node, self.Env["current-version"], start)
-
- def downgrade(self, node, start=1):
- return self.install(node, self.Env["previous-version"], start, "--force --nodeps")
-
- def __call__(self, node):
- '''Perform the 'Rolling Upgrade' test. '''
- self.incr("calls")
-
- for node in self.Env["nodes"]:
- if self.upgrade(node):
- return self.failure("Couldn't upgrade %s" % node)
-
- self.CM.cluster_stable()
-
- return self.success()
-
- def is_applicable(self):
- if not self.is_applicable_common():
- return None
-
- if not "rpm-dir" in list(self.Env.keys()):
- return None
- if not "current-version" in list(self.Env.keys()):
- return None
- if not "previous-version" in list(self.Env.keys()):
- return None
-
- return 1
-
-# Register RestartTest as a good test to run
-AllTestClasses.append(RollingUpgradeTest)
-
-
-class BSC_AddResource(CTSTest):
- '''Add a resource to the cluster'''
- def __init__(self, cm):
- CTSTest.__init__(self, cm)
- self.name = "AddResource"
- self.resource_offset = 0
- self.cib_cmd = """cibadmin -C -o %s -X '%s' """
-
- def __call__(self, node):
- self.incr("calls")
- self.resource_offset = self.resource_offset + 1
-
- r_id = "bsc-rsc-%s-%d" % (node, self.resource_offset)
- start_pat = "pacemaker-controld.*%s_start_0.*confirmed.*ok"
-
- patterns = []
- patterns.append(start_pat % r_id)
-
- watch = self.create_watch(patterns, self.Env["DeadTime"])
- watch.set_watch()
-
- ip = self.NextIP()
- if not self.make_ip_resource(node, r_id, "ocf", "IPaddr", ip):
- return self.failure("Make resource %s failed" % r_id)
-
- failed = 0
- watch_result = watch.look_for_all()
- if watch.unmatched:
- for regex in watch.unmatched:
- self.logger.log ("Warn: Pattern not found: %s" % (regex))
- failed = 1
-
- if failed:
- return self.failure("Resource pattern(s) not found")
-
- if not self.CM.cluster_stable(self.Env["DeadTime"]):
- return self.failure("Unstable cluster")
-
- return self.success()
-
- def NextIP(self):
- ip = self.Env["IPBase"]
- if ":" in ip:
- fields = ip.rpartition(":")
- fields[2] = str(hex(int(fields[2], 16)+1))
- print(str(hex(int(f[2], 16)+1)))
- else:
- fields = ip.rpartition('.')
- fields[2] = str(int(fields[2])+1)
-
- ip = fields[0] + fields[1] + fields[3];
- self.Env["IPBase"] = ip
- return ip.strip()
-
- def make_ip_resource(self, node, id, rclass, type, ip):
- self.logger.log("Creating %s:%s:%s (%s) on %s" % (rclass,type,id,ip,node))
- rsc_xml="""
-<primitive id="%s" class="%s" type="%s" provider="heartbeat">
- <instance_attributes id="%s"><attributes>
- <nvpair id="%s" name="ip" value="%s"/>
- </attributes></instance_attributes>
-</primitive>""" % (id, rclass, type, id, id, ip)
-
- node_constraint = """
- <rsc_location id="run_%s" rsc="%s">
- <rule id="pref_run_%s" score="100">
- <expression id="%s_loc_expr" attribute="#uname" operation="eq" value="%s"/>
- </rule>
- </rsc_location>""" % (id, id, id, id, node)
-
- rc = 0
- (rc, _) = self.rsh(node, self.cib_cmd % ("constraints", node_constraint), verbose=1)
- if rc != 0:
- self.logger.log("Constraint creation failed: %d" % rc)
- return None
-
- (rc, _) = self.rsh(node, self.cib_cmd % ("resources", rsc_xml), verbose=1)
- if rc != 0:
- self.logger.log("Resource creation failed: %d" % rc)
- return None
-
- return 1
-
- def is_applicable(self):
- if self.Env["DoBSC"]:
- return True
- return None
-
-AllTestClasses.append(BSC_AddResource)
-
-
-class SimulStopLite(CTSTest):
- '''Stop any active nodes ~ simultaneously'''
- def __init__(self, cm):
- CTSTest.__init__(self,cm)
- self.name = "SimulStopLite"
-
- def __call__(self, dummy):
- '''Perform the 'SimulStopLite' setup work. '''
- self.incr("calls")
-
- self.debug("Setup: " + self.name)
-
- # We ignore the "node" parameter...
- watchpats = [ ]
-
- for node in self.Env["nodes"]:
- if self.CM.ShouldBeStatus[node] == "up":
- self.incr("WasStarted")
- watchpats.append(self.templates["Pat:We_stopped"] % node)
-
- if len(watchpats) == 0:
- return self.success()
-
- # Stop all the nodes - at about the same time...
- watch = self.create_watch(watchpats, self.Env["DeadTime"]+10)
-
- watch.set_watch()
- self.set_timer()
- for node in self.Env["nodes"]:
- if self.CM.ShouldBeStatus[node] == "up":
- self.CM.StopaCMnoBlock(node)
- if watch.look_for_all():
- # Make sure they're completely down with no residule
- for node in self.Env["nodes"]:
- self.rsh(node, self.templates["StopCmd"])
-
- return self.success()
-
- did_fail = 0
- up_nodes = []
- for node in self.Env["nodes"]:
- if self.CM.StataCM(node) == 1:
- did_fail = 1
- up_nodes.append(node)
-
- if did_fail:
- return self.failure("Active nodes exist: " + repr(up_nodes))
-
- self.logger.log("Warn: All nodes stopped but CTS didn't detect: "
- + repr(watch.unmatched))
-
- return self.failure("Missing log message: "+repr(watch.unmatched))
-
- def is_applicable(self):
- '''SimulStopLite is a setup test and never applicable'''
- return False
-
-
-class SimulStartLite(CTSTest):
- '''Start any stopped nodes ~ simultaneously'''
- def __init__(self, cm):
- CTSTest.__init__(self,cm)
- self.name = "SimulStartLite"
-
- def __call__(self, dummy):
- '''Perform the 'SimulStartList' setup work. '''
- self.incr("calls")
- self.debug("Setup: " + self.name)
-
- # We ignore the "node" parameter...
- node_list = []
- for node in self.Env["nodes"]:
- if self.CM.ShouldBeStatus[node] == "down":
- self.incr("WasStopped")
- node_list.append(node)
-
- self.set_timer()
- while len(node_list) > 0:
- # Repeat until all nodes come up
- watchpats = [ ]
-
- uppat = self.templates["Pat:NonDC_started"]
- if self.CM.upcount() == 0:
- uppat = self.templates["Pat:Local_started"]
-
- watchpats.append(self.templates["Pat:DC_IDLE"])
- for node in node_list:
- watchpats.append(uppat % node)
- watchpats.append(self.templates["Pat:InfraUp"] % node)
- watchpats.append(self.templates["Pat:PacemakerUp"] % node)
-
- # Start all the nodes - at about the same time...
- watch = self.create_watch(watchpats, self.Env["DeadTime"]+10)
- watch.set_watch()
-
- stonith = self.CM.prepare_fencing_watcher(self.name)
-
- for node in node_list:
- self.CM.StartaCMnoBlock(node)
-
- watch.look_for_all()
-
- node_list = self.CM.fencing_cleanup(self.name, stonith)
-
- if node_list == None:
- return self.failure("Cluster did not stabilize")
-
- # Remove node_list messages from watch.unmatched
- for node in node_list:
- self.logger.debug("Dealing with stonith operations for %s" % repr(node_list))
- if watch.unmatched:
- try:
- watch.unmatched.remove(uppat % node)
- except:
- self.debug("Already matched: %s" % (uppat % node))
- try:
- watch.unmatched.remove(self.templates["Pat:InfraUp"] % node)
- except:
- self.debug("Already matched: %s" % (self.templates["Pat:InfraUp"] % node))
- try:
- watch.unmatched.remove(self.templates["Pat:PacemakerUp"] % node)
- except:
- self.debug("Already matched: %s" % (self.templates["Pat:PacemakerUp"] % node))
-
- if watch.unmatched:
- for regex in watch.unmatched:
- self.logger.log ("Warn: Startup pattern not found: %s" %(regex))
-
- if not self.CM.cluster_stable():
- return self.failure("Cluster did not stabilize")
-
- did_fail = 0
- unstable = []
- for node in self.Env["nodes"]:
- if self.CM.StataCM(node) == 0:
- did_fail = 1
- unstable.append(node)
-
- if did_fail:
- return self.failure("Unstarted nodes exist: " + repr(unstable))
-
- unstable = []
- for node in self.Env["nodes"]:
- if not self.CM.node_stable(node):
- did_fail = 1
- unstable.append(node)
-
- if did_fail:
- return self.failure("Unstable cluster nodes exist: " + repr(unstable))
-
- return self.success()
-
- def is_applicable(self):
- '''SimulStartLite is a setup test and never applicable'''
- return False
-
-
-def TestList(cm, audits):
- result = []
- for testclass in AllTestClasses:
- bound_test = testclass(cm)
- if bound_test.is_applicable():
- bound_test.Audits = audits
- result.append(bound_test)
- return result
-
-
-class RemoteLXC(CTSTest):
- def __init__(self, cm):
- CTSTest.__init__(self,cm)
- self.name = "RemoteLXC"
- self.start = StartTest(cm)
- self.startall = SimulStartLite(cm)
- self.num_containers = 2
- self.is_container = 1
- self.failed = 0
- self.fail_string = ""
-
- def start_lxc_simple(self, node):
-
- # restore any artifacts laying around from a previous test.
- self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -s -R &>/dev/null")
-
- # generate the containers, put them in the config, add some resources to them
- pats = [ ]
- watch = self.create_watch(pats, 120)
- watch.set_watch()
- pats.append(self.templates["Pat:RscOpOK"] % ("start", "lxc1"))
- pats.append(self.templates["Pat:RscOpOK"] % ("start", "lxc2"))
- pats.append(self.templates["Pat:RscOpOK"] % ("start", "lxc-ms"))
- pats.append(self.templates["Pat:RscOpOK"] % ("promote", "lxc-ms"))
-
- self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -g -a -m -s -c %d &>/dev/null" % self.num_containers)
- self.set_timer("remoteSimpleInit")
- watch.look_for_all()
- self.log_timer("remoteSimpleInit")
- if watch.unmatched:
- self.fail_string = "Unmatched patterns: %s" % (repr(watch.unmatched))
- self.failed = 1
-
- def cleanup_lxc_simple(self, node):
-
- pats = [ ]
- # if the test failed, attempt to clean up the cib and libvirt environment
- # as best as possible
- if self.failed == 1:
- # restore libvirt and cib
- self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -s -R &>/dev/null")
- return
-
- watch = self.create_watch(pats, 120)
- watch.set_watch()
-
- pats.append(self.templates["Pat:RscOpOK"] % ("stop", "container1"))
- pats.append(self.templates["Pat:RscOpOK"] % ("stop", "container2"))
-
- self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -p &>/dev/null")
- self.set_timer("remoteSimpleCleanup")
- watch.look_for_all()
- self.log_timer("remoteSimpleCleanup")
-
- if watch.unmatched:
- self.fail_string = "Unmatched patterns: %s" % (repr(watch.unmatched))
- self.failed = 1
-
- # cleanup libvirt
- self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -s -R &>/dev/null")
-
- def __call__(self, node):
- '''Perform the 'RemoteLXC' test. '''
- self.incr("calls")
-
- ret = self.startall(None)
- if not ret:
- return self.failure("Setup failed, start all nodes failed.")
-
- (rc, _) = self.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -v &>/dev/null")
- if rc == 1:
- self.log("Environment test for lxc support failed.")
- return self.skipped()
-
- self.start_lxc_simple(node)
- self.cleanup_lxc_simple(node)
-
- self.debug("Waiting for the cluster to recover")
- self.CM.cluster_stable()
-
- if self.failed == 1:
- return self.failure(self.fail_string)
-
- return self.success()
-
- def errorstoignore(self):
- '''Return list of errors which should be ignored'''
- return [
- r"Updating failcount for ping",
- r"schedulerd.*: Recover\s+(ping|lxc-ms|container)\s+\(.*\)",
- # The orphaned lxc-ms resource causes an expected transition error
- # that is a result of the scheduler not having knowledge that the
- # promotable resource used to be a clone. As a result, it looks like that
- # resource is running in multiple locations when it shouldn't... But in
- # this instance we know why this error is occurring and that it is expected.
- r"Calculated [Tt]ransition .*pe-error",
- r"Resource lxc-ms .* is active on 2 nodes attempting recovery",
- r"Unknown operation: fail",
- r"VirtualDomain.*ERROR: Unable to determine emulator",
- ]
-
-AllTestClasses.append(RemoteLXC)
-
-
-class RemoteDriver(CTSTest):
-
- def __init__(self, cm):
- CTSTest.__init__(self,cm)
- self.name = self.__class__.__name__
- self.start = StartTest(cm)
- self.startall = SimulStartLite(cm)
- self.stop = StopTest(cm)
- self.remote_rsc = "remote-rsc"
- self.cib_cmd = """cibadmin -C -o %s -X '%s' """
- self.reset()
-
- def reset(self):
- self.pcmk_started = 0
- self.failed = False
- self.fail_string = ""
- self.remote_node_added = 0
- self.remote_rsc_added = 0
- self.remote_use_reconnect_interval = self.Env.random_gen.choice([True,False])
-
- def fail(self, msg):
- """ Mark test as failed. """
-
- self.failed = True
-
- # Always log the failure.
- self.logger.log(msg)
-
- # Use first failure as test status, as it's likely to be most useful.
- if not self.fail_string:
- self.fail_string = msg
-
- def get_othernode(self, node):
- for othernode in self.Env["nodes"]:
- if othernode == node:
- # we don't want to try and use the cib that we just shutdown.
- # find a cluster node that is not our soon to be remote-node.
- continue
- else:
- return othernode
-
- def del_rsc(self, node, rsc):
- othernode = self.get_othernode(node)
- (rc, _) = self.rsh(othernode, "crm_resource -D -r %s -t primitive" % (rsc))
- if rc != 0:
- self.fail("Removal of resource '%s' failed" % rsc)
-
- def add_rsc(self, node, rsc_xml):
- othernode = self.get_othernode(node)
- (rc, _) = self.rsh(othernode, self.cib_cmd % ("resources", rsc_xml))
- if rc != 0:
- self.fail("resource creation failed")
-
- def add_primitive_rsc(self, node):
- rsc_xml = """
-<primitive class="ocf" id="%(node)s" provider="heartbeat" type="Dummy">
- <meta_attributes id="%(node)s-meta_attributes"/>
- <operations>
- <op id="%(node)s-monitor-interval-20s" interval="20s" name="monitor"/>
- </operations>
-</primitive>""" % { "node": self.remote_rsc }
- self.add_rsc(node, rsc_xml)
- if not self.failed:
- self.remote_rsc_added = 1
-
- def add_connection_rsc(self, node):
- rsc_xml = """
-<primitive class="ocf" id="%(node)s" provider="pacemaker" type="remote">
- <instance_attributes id="%(node)s-instance_attributes">
- <nvpair id="%(node)s-instance_attributes-server" name="server" value="%(server)s"/>
-""" % { "node": self.remote_node, "server": node }
-
- if self.remote_use_reconnect_interval:
- # Set reconnect interval on resource
- rsc_xml = rsc_xml + """
- <nvpair id="%s-instance_attributes-reconnect_interval" name="reconnect_interval" value="60s"/>
-""" % (self.remote_node)
-
- rsc_xml = rsc_xml + """
- </instance_attributes>
- <operations>
- <op id="%(node)s-start" name="start" interval="0" timeout="120s"/>
- <op id="%(node)s-monitor-20s" name="monitor" interval="20s" timeout="45s"/>
- </operations>
-</primitive>
-""" % { "node": self.remote_node }
-
- self.add_rsc(node, rsc_xml)
- if not self.failed:
- self.remote_node_added = 1
-
- def disable_services(self, node):
- self.corosync_enabled = self.Env.service_is_enabled(node, "corosync")
- if self.corosync_enabled:
- self.Env.disable_service(node, "corosync")
-
- self.pacemaker_enabled = self.Env.service_is_enabled(node, "pacemaker")
- if self.pacemaker_enabled:
- self.Env.disable_service(node, "pacemaker")
-
- def restore_services(self, node):
- if self.corosync_enabled:
- self.Env.enable_service(node, "corosync")
-
- if self.pacemaker_enabled:
- self.Env.enable_service(node, "pacemaker")
-
- def stop_pcmk_remote(self, node):
- # disable pcmk remote
- for i in range(10):
- (rc, _) = self.rsh(node, "service pacemaker_remote stop")
- if rc != 0:
- time.sleep(6)
- else:
- break
-
- def start_pcmk_remote(self, node):
- for i in range(10):
- (rc, _) = self.rsh(node, "service pacemaker_remote start")
- if rc != 0:
- time.sleep(6)
- else:
- self.pcmk_started = 1
- break
-
- def freeze_pcmk_remote(self, node):
- """ Simulate a Pacemaker Remote daemon failure. """
-
- # We freeze the process.
- self.rsh(node, "killall -STOP pacemaker-remoted")
-
- def resume_pcmk_remote(self, node):
- # We resume the process.
- self.rsh(node, "killall -CONT pacemaker-remoted")
-
- def start_metal(self, node):
- # Cluster nodes are reused as remote nodes in remote tests. If cluster
- # services were enabled at boot, in case the remote node got fenced, the
- # cluster node would join instead of the expected remote one. Meanwhile
- # pacemaker_remote would not be able to start. Depending on the chances,
- # the situations might not be able to be orchestrated gracefully any more.
- #
- # Temporarily disable any enabled cluster serivces.
- self.disable_services(node)
-
- pcmk_started = 0
-
- # make sure the resource doesn't already exist for some reason
- self.rsh(node, "crm_resource -D -r %s -t primitive" % (self.remote_rsc))
- self.rsh(node, "crm_resource -D -r %s -t primitive" % (self.remote_node))
-
- if not self.stop(node):
- self.fail("Failed to shutdown cluster node %s" % node)
- return
-
- self.start_pcmk_remote(node)
-
- if self.pcmk_started == 0:
- self.fail("Failed to start pacemaker_remote on node %s" % node)
- return
-
- # Convert node to baremetal now that it has shutdown the cluster stack
- pats = [ ]
- watch = self.create_watch(pats, 120)
- watch.set_watch()
- pats.append(self.templates["Pat:RscOpOK"] % ("start", self.remote_node))
- pats.append(self.templates["Pat:DC_IDLE"])
-
- self.add_connection_rsc(node)
-
- self.set_timer("remoteMetalInit")
- watch.look_for_all()
- self.log_timer("remoteMetalInit")
- if watch.unmatched:
- self.fail("Unmatched patterns: %s" % watch.unmatched)
-
- def migrate_connection(self, node):
- if self.failed:
- return
-
- pats = [ ]
- pats.append(self.templates["Pat:RscOpOK"] % ("migrate_to", self.remote_node))
- pats.append(self.templates["Pat:RscOpOK"] % ("migrate_from", self.remote_node))
- pats.append(self.templates["Pat:DC_IDLE"])
- watch = self.create_watch(pats, 120)
- watch.set_watch()
-
- (rc, _) = self.rsh(node, "crm_resource -M -r %s" % (self.remote_node), verbose=1)
- if rc != 0:
- self.fail("failed to move remote node connection resource")
- return
-
- self.set_timer("remoteMetalMigrate")
- watch.look_for_all()
- self.log_timer("remoteMetalMigrate")
-
- if watch.unmatched:
- self.fail("Unmatched patterns: %s" % watch.unmatched)
- return
-
- def fail_rsc(self, node):
- if self.failed:
- return
-
- watchpats = [ ]
- watchpats.append(self.templates["Pat:RscRemoteOpOK"] % ("stop", self.remote_rsc, self.remote_node))
- watchpats.append(self.templates["Pat:RscRemoteOpOK"] % ("start", self.remote_rsc, self.remote_node))
- watchpats.append(self.templates["Pat:DC_IDLE"])
-
- watch = self.create_watch(watchpats, 120)
- watch.set_watch()
-
- self.debug("causing dummy rsc to fail.")
-
- self.rsh(node, "rm -f /var/run/resource-agents/Dummy*")
-
- self.set_timer("remoteRscFail")
- watch.look_for_all()
- self.log_timer("remoteRscFail")
- if watch.unmatched:
- self.fail("Unmatched patterns during rsc fail: %s" % watch.unmatched)
-
- def fail_connection(self, node):
- if self.failed:
- return
-
- watchpats = [ ]
- watchpats.append(self.templates["Pat:Fencing_ok"] % self.remote_node)
- watchpats.append(self.templates["Pat:NodeFenced"] % self.remote_node)
-
- watch = self.create_watch(watchpats, 120)
- watch.set_watch()
-
- # freeze the pcmk remote daemon. this will result in fencing
- self.debug("Force stopped active remote node")
- self.freeze_pcmk_remote(node)
-
- self.debug("Waiting for remote node to be fenced.")
- self.set_timer("remoteMetalFence")
- watch.look_for_all()
- self.log_timer("remoteMetalFence")
- if watch.unmatched:
- self.fail("Unmatched patterns: %s" % watch.unmatched)
- return
-
- self.debug("Waiting for the remote node to come back up")
- self.CM.ns.wait_for_node(node, 120);
-
- pats = [ ]
- watch = self.create_watch(pats, 240)
- watch.set_watch()
- pats.append(self.templates["Pat:RscOpOK"] % ("start", self.remote_node))
- if self.remote_rsc_added == 1:
- pats.append(self.templates["Pat:RscRemoteOpOK"] % ("start", self.remote_rsc, self.remote_node))
-
- # start the remote node again watch it integrate back into cluster.
- self.start_pcmk_remote(node)
- if self.pcmk_started == 0:
- self.fail("Failed to start pacemaker_remote on node %s" % node)
- return
-
- self.debug("Waiting for remote node to rejoin cluster after being fenced.")
- self.set_timer("remoteMetalRestart")
- watch.look_for_all()
- self.log_timer("remoteMetalRestart")
- if watch.unmatched:
- self.fail("Unmatched patterns: %s" % watch.unmatched)
- return
-
- def add_dummy_rsc(self, node):
- if self.failed:
- return
-
- # verify we can put a resource on the remote node
- pats = [ ]
- watch = self.create_watch(pats, 120)
- watch.set_watch()
- pats.append(self.templates["Pat:RscRemoteOpOK"] % ("start", self.remote_rsc, self.remote_node))
- pats.append(self.templates["Pat:DC_IDLE"])
-
- # Add a resource that must live on remote-node
- self.add_primitive_rsc(node)
-
- # force that rsc to prefer the remote node.
- (rc, _) = self.CM.rsh(node, "crm_resource -M -r %s -N %s -f" % (self.remote_rsc, self.remote_node), verbose=1)
- if rc != 0:
- self.fail("Failed to place remote resource on remote node.")
- return
-
- self.set_timer("remoteMetalRsc")
- watch.look_for_all()
- self.log_timer("remoteMetalRsc")
- if watch.unmatched:
- self.fail("Unmatched patterns: %s" % watch.unmatched)
-
- def test_attributes(self, node):
- if self.failed:
- return
-
- # This verifies permanent attributes can be set on a remote-node. It also
- # verifies the remote-node can edit its own cib node section remotely.
- (rc, line) = self.CM.rsh(node, "crm_attribute -l forever -n testattr -v testval -N %s" % (self.remote_node), verbose=1)
- if rc != 0:
- self.fail("Failed to set remote-node attribute. rc:%s output:%s" % (rc, line))
- return
-
- (rc, _) = self.CM.rsh(node, "crm_attribute -l forever -n testattr -q -N %s" % (self.remote_node), verbose=1)
- if rc != 0:
- self.fail("Failed to get remote-node attribute")
- return
-
- (rc, _) = self.CM.rsh(node, "crm_attribute -l forever -n testattr -D -N %s" % (self.remote_node), verbose=1)
- if rc != 0:
- self.fail("Failed to delete remote-node attribute")
- return
-
- def cleanup_metal(self, node):
- self.restore_services(node)
-
- if self.pcmk_started == 0:
- return
-
- pats = [ ]
-
- watch = self.create_watch(pats, 120)
- watch.set_watch()
-
- if self.remote_rsc_added == 1:
- pats.append(self.templates["Pat:RscOpOK"] % ("stop", self.remote_rsc))
- if self.remote_node_added == 1:
- pats.append(self.templates["Pat:RscOpOK"] % ("stop", self.remote_node))
-
- self.set_timer("remoteMetalCleanup")
-
- self.resume_pcmk_remote(node)
-
- if self.remote_rsc_added == 1:
-
- # Remove dummy resource added for remote node tests
- self.debug("Cleaning up dummy rsc put on remote node")
- self.rsh(self.get_othernode(node), "crm_resource -U -r %s" % self.remote_rsc)
- self.del_rsc(node, self.remote_rsc)
-
- if self.remote_node_added == 1:
-
- # Remove remote node's connection resource
- self.debug("Cleaning up remote node connection resource")
- self.rsh(self.get_othernode(node), "crm_resource -U -r %s" % (self.remote_node))
- self.del_rsc(node, self.remote_node)
-
- watch.look_for_all()
- self.log_timer("remoteMetalCleanup")
-
- if watch.unmatched:
- self.fail("Unmatched patterns: %s" % watch.unmatched)
-
- self.stop_pcmk_remote(node)
-
- self.debug("Waiting for the cluster to recover")
- self.CM.cluster_stable()
-
- if self.remote_node_added == 1:
- # Remove remote node itself
- self.debug("Cleaning up node entry for remote node")
- self.rsh(self.get_othernode(node), "crm_node --force --remove %s" % self.remote_node)
-
- def setup_env(self, node):
-
- self.remote_node = "remote-%s" % (node)
-
- # we are assuming if all nodes have a key, that it is
- # the right key... If any node doesn't have a remote
- # key, we regenerate it everywhere.
- if self.rsh.exists_on_all("/etc/pacemaker/authkey", self.Env["nodes"]):
- return
-
- # create key locally
- (handle, keyfile) = tempfile.mkstemp(".cts")
- os.close(handle)
- subprocess.check_call(["dd", "if=/dev/urandom", "of=%s" % keyfile, "bs=4096", "count=1"],
- stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
-
- # sync key throughout the cluster
- for node in self.Env["nodes"]:
- self.rsh(node, "mkdir -p --mode=0750 /etc/pacemaker")
- self.rsh.copy(keyfile, "root@%s:/etc/pacemaker/authkey" % node)
- self.rsh(node, "chgrp haclient /etc/pacemaker /etc/pacemaker/authkey")
- self.rsh(node, "chmod 0640 /etc/pacemaker/authkey")
- os.unlink(keyfile)
-
- def is_applicable(self):
- if not self.is_applicable_common():
- return False
-
- for node in self.Env["nodes"]:
- (rc, _) = self.rsh(node, "which pacemaker-remoted >/dev/null 2>&1")
- if rc != 0:
- return False
- return True
-
- def start_new_test(self, node):
- self.incr("calls")
- self.reset()
-
- ret = self.startall(None)
- if not ret:
- return self.failure("setup failed: could not start all nodes")
-
- self.setup_env(node)
- self.start_metal(node)
- self.add_dummy_rsc(node)
- return True
-
- def __call__(self, node):
- return self.failure("This base class is not meant to be called directly.")
-
- def errorstoignore(self):
- '''Return list of errors which should be ignored'''
- return [ r"""is running on remote.*which isn't allowed""",
- r"""Connection terminated""",
- r"""Could not send remote""",
- ]
-
-# RemoteDriver is just a base class for other tests, so it is not added to AllTestClasses
-
-
-class RemoteBasic(RemoteDriver):
-
- def __call__(self, node):
- '''Perform the 'RemoteBaremetal' test. '''
-
- if not self.start_new_test(node):
- return self.failure(self.fail_string)
-
- self.test_attributes(node)
- self.cleanup_metal(node)
-
- self.debug("Waiting for the cluster to recover")
- self.CM.cluster_stable()
- if self.failed:
- return self.failure(self.fail_string)
-
- return self.success()
-
-AllTestClasses.append(RemoteBasic)
-
-class RemoteStonithd(RemoteDriver):
-
- def __call__(self, node):
- '''Perform the 'RemoteStonithd' test. '''
-
- if not self.start_new_test(node):
- return self.failure(self.fail_string)
-
- self.fail_connection(node)
- self.cleanup_metal(node)
-
- self.debug("Waiting for the cluster to recover")
- self.CM.cluster_stable()
- if self.failed:
- return self.failure(self.fail_string)
-
- return self.success()
-
- def is_applicable(self):
- if not RemoteDriver.is_applicable(self):
- return False
-
- if "DoFencing" in list(self.Env.keys()):
- return self.Env["DoFencing"]
-
- return True
-
- def errorstoignore(self):
- ignore_pats = [
- r"Lost connection to Pacemaker Remote node",
- r"Software caused connection abort",
- r"pacemaker-controld.*:\s+error.*: Operation remote-.*_monitor",
- r"pacemaker-controld.*:\s+error.*: Result of monitor operation for remote-.*",
- r"schedulerd.*:\s+Recover\s+remote-.*\s+\(.*\)",
- r"error: Result of monitor operation for .* on remote-.*: Internal communication failure",
- ]
-
- ignore_pats.extend(RemoteDriver.errorstoignore(self))
- return ignore_pats
-
-AllTestClasses.append(RemoteStonithd)
-
-
-class RemoteMigrate(RemoteDriver):
-
- def __call__(self, node):
- '''Perform the 'RemoteMigrate' test. '''
-
- if not self.start_new_test(node):
- return self.failure(self.fail_string)
-
- self.migrate_connection(node)
- self.cleanup_metal(node)
-
- self.debug("Waiting for the cluster to recover")
- self.CM.cluster_stable()
- if self.failed:
- return self.failure(self.fail_string)
-
- return self.success()
-
- def is_applicable(self):
- if not RemoteDriver.is_applicable(self):
- return 0
- # This test requires at least three nodes: one to convert to a
- # remote node, one to host the connection originally, and one
- # to migrate the connection to.
- if len(self.Env["nodes"]) < 3:
- return 0
- return 1
-
-AllTestClasses.append(RemoteMigrate)
-
-
-class RemoteRscFailure(RemoteDriver):
-
- def __call__(self, node):
- '''Perform the 'RemoteRscFailure' test. '''
-
- if not self.start_new_test(node):
- return self.failure(self.fail_string)
-
- # This is an important step. We are migrating the connection
- # before failing the resource. This verifies that the migration
- # has properly maintained control over the remote-node.
- self.migrate_connection(node)
-
- self.fail_rsc(node)
- self.cleanup_metal(node)
-
- self.debug("Waiting for the cluster to recover")
- self.CM.cluster_stable()
- if self.failed:
- return self.failure(self.fail_string)
-
- return self.success()
-
- def errorstoignore(self):
- ignore_pats = [
- r"schedulerd.*: Recover\s+remote-rsc\s+\(.*\)",
- r"Dummy.*: No process state file found",
- ]
-
- ignore_pats.extend(RemoteDriver.errorstoignore(self))
- return ignore_pats
-
- def is_applicable(self):
- if not RemoteDriver.is_applicable(self):
- return 0
- # This test requires at least three nodes: one to convert to a
- # remote node, one to host the connection originally, and one
- # to migrate the connection to.
- if len(self.Env["nodes"]) < 3:
- return 0
- return 1
-
-AllTestClasses.append(RemoteRscFailure)
-
-# vim:ts=4:sw=4:et:
diff --git a/cts/lab/ClusterManager.py b/cts/lab/ClusterManager.py
deleted file mode 100644
index fda4cfb..0000000
--- a/cts/lab/ClusterManager.py
+++ /dev/null
@@ -1,940 +0,0 @@
-""" ClusterManager class for Pacemaker's Cluster Test Suite (CTS)
-"""
-
-__copyright__ = """Copyright 2000-2023 the Pacemaker project contributors.
-Certain portions by Huang Zhen <zhenhltc@cn.ibm.com> are copyright 2004
-International Business Machines. The version control history for this file
-may have further details."""
-__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
-
-import os
-import re
-import time
-
-from collections import UserDict
-
-from cts.CIB import ConfigFactory
-from cts.CTStests import AuditResource
-
-from pacemaker.buildoptions import BuildOptions
-from pacemaker._cts.CTS import NodeStatus, Process
-from pacemaker._cts.environment import EnvFactory
-from pacemaker._cts.logging import LogFactory
-from pacemaker._cts.patterns import PatternSelector
-from pacemaker._cts.remote import RemoteFactory
-from pacemaker._cts.watcher import LogWatcher
-
-class ClusterManager(UserDict):
- '''The Cluster Manager class.
- This is an subclass of the Python dictionary class.
- (this is because it contains lots of {name,value} pairs,
- not because it's behavior is that terribly similar to a
- dictionary in other ways.)
-
- This is an abstract class which class implements high-level
- operations on the cluster and/or its cluster managers.
- Actual cluster managers classes are subclassed from this type.
-
- One of the things we do is track the state we think every node should
- be in.
- '''
-
- def __InitialConditions(self):
- #if os.geteuid() != 0:
- # raise ValueError("Must Be Root!")
- None
-
- def _finalConditions(self):
- for key in list(self.keys()):
- if self[key] == None:
- raise ValueError("Improper derivation: self[" + key + "] must be overridden by subclass.")
-
- def __init__(self):
- self.Env = EnvFactory().getInstance()
- self.templates = PatternSelector(self.Env["Name"])
- self.__InitialConditions()
- self.logger = LogFactory()
- self.TestLoggingLevel=0
- self.data = {}
- self.name = self.Env["Name"]
-
- self.rsh = RemoteFactory().getInstance()
- self.ShouldBeStatus={}
- self.ns = NodeStatus(self.Env)
- self.OurNode = os.uname()[1].lower()
- self.__instance_errorstoignore = []
-
- self.cib_installed = 0
- self.config = None
- self.cluster_monitor = 0
- self.use_short_names = 1
-
- if self.Env["DoBSC"]:
- del self.templates["Pat:They_stopped"]
-
- self._finalConditions()
-
- self.check_transitions = 0
- self.check_elections = 0
- self.CIBsync = {}
- self.CibFactory = ConfigFactory(self)
- self.cib = self.CibFactory.createConfig(self.Env["Schema"])
-
- def __getitem__(self, key):
- if key == "Name":
- return self.name
-
- print("FIXME: Getting %s from %s" % (key, repr(self)))
- if key in self.data:
- return self.data[key]
-
- return self.templates.get_patterns(key)
-
- def __setitem__(self, key, value):
- print("FIXME: Setting %s=%s on %s" % (key, value, repr(self)))
- self.data[key] = value
-
- def key_for_node(self, node):
- return node
-
- def instance_errorstoignore_clear(self):
- '''Allows the test scenario to reset instance errors to ignore on each iteration.'''
- self.__instance_errorstoignore = []
-
- def instance_errorstoignore(self):
- '''Return list of errors which are 'normal' for a specific test instance'''
- return self.__instance_errorstoignore
-
- def log(self, args):
- self.logger.log(args)
-
- def debug(self, args):
- self.logger.debug(args)
-
- def upcount(self):
- '''How many nodes are up?'''
- count = 0
- for node in self.Env["nodes"]:
- if self.ShouldBeStatus[node] == "up":
- count = count + 1
- return count
-
- def install_support(self, command="install"):
- for node in self.Env["nodes"]:
- self.rsh(node, BuildOptions.DAEMON_DIR + "/cts-support " + command)
-
- def prepare_fencing_watcher(self, name):
- # If we don't have quorum now but get it as a result of starting this node,
- # then a bunch of nodes might get fenced
- upnode = None
- if self.HasQuorum(None):
- self.debug("Have quorum")
- return None
-
- if not self.templates["Pat:Fencing_start"]:
- print("No start pattern")
- return None
-
- if not self.templates["Pat:Fencing_ok"]:
- print("No ok pattern")
- return None
-
- stonith = None
- stonithPats = []
- for peer in self.Env["nodes"]:
- if self.ShouldBeStatus[peer] != "up":
- stonithPats.append(self.templates["Pat:Fencing_ok"] % peer)
- stonithPats.append(self.templates["Pat:Fencing_start"] % peer)
-
- stonith = LogWatcher(self.Env["LogFileName"], stonithPats, self.Env["nodes"], self.Env["LogWatcher"], "StartupFencing", 0)
- stonith.set_watch()
- return stonith
-
- def fencing_cleanup(self, node, stonith):
- peer_list = []
- peer_state = {}
-
- self.debug("Looking for nodes that were fenced as a result of %s starting" % node)
-
- # If we just started a node, we may now have quorum (and permission to fence)
- if not stonith:
- self.debug("Nothing to do")
- return peer_list
-
- q = self.HasQuorum(None)
- if not q and len(self.Env["nodes"]) > 2:
- # We didn't gain quorum - we shouldn't have shot anyone
- self.debug("Quorum: %d Len: %d" % (q, len(self.Env["nodes"])))
- return peer_list
-
- for n in self.Env["nodes"]:
- peer_state[n] = "unknown"
-
- # Now see if any states need to be updated
- self.debug("looking for: " + repr(stonith.regexes))
- shot = stonith.look(0)
- while shot:
- line = repr(shot)
- self.debug("Found: " + line)
- del stonith.regexes[stonith.whichmatch]
-
- # Extract node name
- for n in self.Env["nodes"]:
- if re.search(self.templates["Pat:Fencing_ok"] % n, shot):
- peer = n
- peer_state[peer] = "complete"
- self.__instance_errorstoignore.append(self.templates["Pat:Fencing_ok"] % peer)
-
- elif peer_state[n] != "complete" and re.search(self.templates["Pat:Fencing_start"] % n, shot):
- # TODO: Correctly detect multiple fencing operations for the same host
- peer = n
- peer_state[peer] = "in-progress"
- self.__instance_errorstoignore.append(self.templates["Pat:Fencing_start"] % peer)
-
- if not peer:
- self.logger.log("ERROR: Unknown stonith match: %s" % line)
-
- elif not peer in peer_list:
- self.debug("Found peer: " + peer)
- peer_list.append(peer)
-
- # Get the next one
- shot = stonith.look(60)
-
- for peer in peer_list:
-
- self.debug(" Peer %s was fenced as a result of %s starting: %s" % (peer, node, peer_state[peer]))
- if self.Env["at-boot"]:
- self.ShouldBeStatus[peer] = "up"
- else:
- self.ShouldBeStatus[peer] = "down"
-
- if peer_state[peer] == "in-progress":
- # Wait for any in-progress operations to complete
- shot = stonith.look(60)
- while len(stonith.regexes) and shot:
- line = repr(shot)
- self.debug("Found: " + line)
- del stonith.regexes[stonith.whichmatch]
- shot = stonith.look(60)
-
- # Now make sure the node is alive too
- self.ns.wait_for_node(peer, self.Env["DeadTime"])
-
- # Poll until it comes up
- if self.Env["at-boot"]:
- if not self.StataCM(peer):
- time.sleep(self.Env["StartTime"])
-
- if not self.StataCM(peer):
- self.logger.log("ERROR: Peer %s failed to restart after being fenced" % peer)
- return None
-
- return peer_list
-
- def StartaCM(self, node, verbose=False):
-
- '''Start up the cluster manager on a given node'''
- if verbose: self.logger.log("Starting %s on node %s" % (self.templates["Name"], node))
- else: self.debug("Starting %s on node %s" % (self.templates["Name"], node))
- ret = 1
-
- if not node in self.ShouldBeStatus:
- self.ShouldBeStatus[node] = "down"
-
- if self.ShouldBeStatus[node] != "down":
- return 1
-
- patterns = []
- # Technically we should always be able to notice ourselves starting
- patterns.append(self.templates["Pat:Local_started"] % node)
- if self.upcount() == 0:
- patterns.append(self.templates["Pat:DC_started"] % node)
- else:
- patterns.append(self.templates["Pat:NonDC_started"] % node)
-
- watch = LogWatcher(
- self.Env["LogFileName"], patterns, self.Env["nodes"], self.Env["LogWatcher"], "StartaCM", self.Env["StartTime"]+10)
-
- self.install_config(node)
-
- self.ShouldBeStatus[node] = "any"
- if self.StataCM(node) and self.cluster_stable(self.Env["DeadTime"]):
- self.logger.log ("%s was already started" % (node))
- return 1
-
- stonith = self.prepare_fencing_watcher(node)
- watch.set_watch()
-
- (rc, _) = self.rsh(node, self.templates["StartCmd"])
- if rc != 0:
- self.logger.log ("Warn: Start command failed on node %s" % (node))
- self.fencing_cleanup(node, stonith)
- return None
-
- self.ShouldBeStatus[node] = "up"
- watch_result = watch.look_for_all()
-
- if watch.unmatched:
- for regex in watch.unmatched:
- self.logger.log ("Warn: Startup pattern not found: %s" % (regex))
-
- if watch_result and self.cluster_stable(self.Env["DeadTime"]):
- #self.debug("Found match: "+ repr(watch_result))
- self.fencing_cleanup(node, stonith)
- return 1
-
- elif self.StataCM(node) and self.cluster_stable(self.Env["DeadTime"]):
- self.fencing_cleanup(node, stonith)
- return 1
-
- self.logger.log ("Warn: Start failed for node %s" % (node))
- return None
-
- def StartaCMnoBlock(self, node, verbose=False):
-
- '''Start up the cluster manager on a given node with none-block mode'''
-
- if verbose: self.logger.log("Starting %s on node %s" % (self["Name"], node))
- else: self.debug("Starting %s on node %s" % (self["Name"], node))
-
- self.install_config(node)
- self.rsh(node, self.templates["StartCmd"], synchronous=False)
- self.ShouldBeStatus[node] = "up"
- return 1
-
- def StopaCM(self, node, verbose=False, force=False):
-
- '''Stop the cluster manager on a given node'''
-
- if verbose: self.logger.log("Stopping %s on node %s" % (self["Name"], node))
- else: self.debug("Stopping %s on node %s" % (self["Name"], node))
-
- if self.ShouldBeStatus[node] != "up" and force == False:
- return 1
-
- (rc, _) = self.rsh(node, self.templates["StopCmd"])
- if rc == 0:
- # Make sure we can continue even if corosync leaks
- # fdata-* is the old name
- #self.rsh(node, "rm -rf /dev/shm/qb-* /dev/shm/fdata-*")
- self.ShouldBeStatus[node] = "down"
- self.cluster_stable(self.Env["DeadTime"])
- return 1
- else:
- self.logger.log ("ERROR: Could not stop %s on node %s" % (self["Name"], node))
-
- return None
-
- def StopaCMnoBlock(self, node):
-
- '''Stop the cluster manager on a given node with none-block mode'''
-
- self.debug("Stopping %s on node %s" % (self["Name"], node))
-
- self.rsh(node, self.templates["StopCmd"], synchronous=False)
- self.ShouldBeStatus[node] = "down"
- return 1
-
- def RereadCM(self, node):
-
- '''Force the cluster manager on a given node to reread its config
- This may be a no-op on certain cluster managers.
- '''
- (rc, _) = self.rsh(node, self.templates["RereadCmd"])
- if rc == 0:
- return 1
- else:
- self.logger.log ("Could not force %s on node %s to reread its config"
- % (self["Name"], node))
- return None
-
- def startall(self, nodelist=None, verbose=False, quick=False):
-
- '''Start the cluster manager on every node in the cluster.
- We can do it on a subset of the cluster if nodelist is not None.
- '''
- map = {}
- if not nodelist:
- nodelist = self.Env["nodes"]
-
- for node in nodelist:
- if self.ShouldBeStatus[node] == "down":
- self.ns.wait_for_all_nodes(nodelist, 300)
-
- if not quick:
- # This is used for "basic sanity checks", so only start one node ...
- if not self.StartaCM(node, verbose=verbose):
- return 0
- return 1
-
- # Approximation of SimulStartList for --boot
- watchpats = [ ]
- watchpats.append(self.templates["Pat:DC_IDLE"])
- for node in nodelist:
- watchpats.append(self.templates["Pat:InfraUp"] % node)
- watchpats.append(self.templates["Pat:PacemakerUp"] % node)
- watchpats.append(self.templates["Pat:Local_started"] % node)
- watchpats.append(self.templates["Pat:They_up"] % (nodelist[0], node))
-
- # Start all the nodes - at about the same time...
- watch = LogWatcher(self.Env["LogFileName"], watchpats, self.Env["nodes"], self.Env["LogWatcher"], "fast-start", self.Env["DeadTime"]+10)
- watch.set_watch()
-
- if not self.StartaCM(nodelist[0], verbose=verbose):
- return 0
- for node in nodelist:
- self.StartaCMnoBlock(node, verbose=verbose)
-
- watch.look_for_all()
- if watch.unmatched:
- for regex in watch.unmatched:
- self.logger.log ("Warn: Startup pattern not found: %s" % (regex))
-
- if not self.cluster_stable():
- self.logger.log("Cluster did not stabilize")
- return 0
-
- return 1
-
- def stopall(self, nodelist=None, verbose=False, force=False):
-
- '''Stop the cluster managers on every node in the cluster.
- We can do it on a subset of the cluster if nodelist is not None.
- '''
-
- ret = 1
- map = {}
- if not nodelist:
- nodelist = self.Env["nodes"]
- for node in self.Env["nodes"]:
- if self.ShouldBeStatus[node] == "up" or force == True:
- if not self.StopaCM(node, verbose=verbose, force=force):
- ret = 0
- return ret
-
- def rereadall(self, nodelist=None):
-
- '''Force the cluster managers on every node in the cluster
- to reread their config files. We can do it on a subset of the
- cluster if nodelist is not None.
- '''
-
- map = {}
- if not nodelist:
- nodelist = self.Env["nodes"]
- for node in self.Env["nodes"]:
- if self.ShouldBeStatus[node] == "up":
- self.RereadCM(node)
-
- def statall(self, nodelist=None):
-
- '''Return the status of the cluster managers in the cluster.
- We can do it on a subset of the cluster if nodelist is not None.
- '''
-
- result = {}
- if not nodelist:
- nodelist = self.Env["nodes"]
- for node in nodelist:
- if self.StataCM(node):
- result[node] = "up"
- else:
- result[node] = "down"
- return result
-
- def isolate_node(self, target, nodes=None):
- '''isolate the communication between the nodes'''
- if not nodes:
- nodes = self.Env["nodes"]
-
- for node in nodes:
- if node != target:
- rc = self.rsh(target, self.templates["BreakCommCmd"] % self.key_for_node(node))
- if rc != 0:
- self.logger.log("Could not break the communication between %s and %s: %d" % (target, node, rc))
- return None
- else:
- self.debug("Communication cut between %s and %s" % (target, node))
- return 1
-
- def unisolate_node(self, target, nodes=None):
- '''fix the communication between the nodes'''
- if not nodes:
- nodes = self.Env["nodes"]
-
- for node in nodes:
- if node != target:
- restored = 0
-
- # Limit the amount of time we have asynchronous connectivity for
- # Restore both sides as simultaneously as possible
- self.rsh(target, self.templates["FixCommCmd"] % self.key_for_node(node), synchronous=False)
- self.rsh(node, self.templates["FixCommCmd"] % self.key_for_node(target), synchronous=False)
- self.debug("Communication restored between %s and %s" % (target, node))
-
- def oprofileStart(self, node=None):
- if not node:
- for n in self.Env["oprofile"]:
- self.oprofileStart(n)
-
- elif node in self.Env["oprofile"]:
- self.debug("Enabling oprofile on %s" % node)
- self.rsh(node, "opcontrol --init")
- self.rsh(node, "opcontrol --setup --no-vmlinux --separate=lib --callgraph=20 --image=all")
- self.rsh(node, "opcontrol --start")
- self.rsh(node, "opcontrol --reset")
-
- def oprofileSave(self, test, node=None):
- if not node:
- for n in self.Env["oprofile"]:
- self.oprofileSave(test, n)
-
- elif node in self.Env["oprofile"]:
- self.rsh(node, "opcontrol --dump")
- self.rsh(node, "opcontrol --save=cts.%d" % test)
- # Read back with: opreport -l session:cts.0 image:<directory>/c*
- if None:
- self.rsh(node, "opcontrol --reset")
- else:
- self.oprofileStop(node)
- self.oprofileStart(node)
-
- def oprofileStop(self, node=None):
- if not node:
- for n in self.Env["oprofile"]:
- self.oprofileStop(n)
-
- elif node in self.Env["oprofile"]:
- self.debug("Stopping oprofile on %s" % node)
- self.rsh(node, "opcontrol --reset")
- self.rsh(node, "opcontrol --shutdown 2>&1 > /dev/null")
-
- def errorstoignore(self):
- # At some point implement a more elegant solution that
- # also produces a report at the end
- """ Return a list of known error messages that should be ignored """
- return self.templates.get_patterns("BadNewsIgnore")
-
- def install_config(self, node):
- if not self.ns.wait_for_node(node):
- self.log("Node %s is not up." % node)
- return None
-
- if not node in self.CIBsync and self.Env["ClobberCIB"]:
- self.CIBsync[node] = 1
- self.rsh(node, "rm -f " + BuildOptions.CIB_DIR + "/cib*")
-
- # Only install the CIB on the first node, all the other ones will pick it up from there
- if self.cib_installed == 1:
- return None
-
- self.cib_installed = 1
- if self.Env["CIBfilename"] == None:
- self.log("Installing Generated CIB on node %s" % (node))
- self.cib.install(node)
-
- else:
- self.log("Installing CIB (%s) on node %s" % (self.Env["CIBfilename"], node))
- if self.rsh.copy(self.Env["CIBfilename"], "root@" + (self.templates["CIBfile"] % node)) != 0:
- raise ValueError("Can not scp file to %s %d"%(node))
-
- self.rsh(node, "chown " + BuildOptions.DAEMON_USER + " " + BuildOptions.CIB_DIR + "/cib.xml")
-
- def prepare(self):
- '''Finish the Initialization process. Prepare to test...'''
-
- self.partitions_expected = 1
- for node in self.Env["nodes"]:
- self.ShouldBeStatus[node] = ""
- if self.Env["experimental-tests"]:
- self.unisolate_node(node)
- self.StataCM(node)
-
- def test_node_CM(self, node):
- '''Report the status of the cluster manager on a given node'''
-
- watchpats = [ ]
- watchpats.append("Current ping state: (S_IDLE|S_NOT_DC)")
- watchpats.append(self.templates["Pat:NonDC_started"] % node)
- watchpats.append(self.templates["Pat:DC_started"] % node)
- idle_watch = LogWatcher(self.Env["LogFileName"], watchpats, [node], self.Env["LogWatcher"], "ClusterIdle")
- idle_watch.set_watch()
-
- (_, out) = self.rsh(node, self.templates["StatusCmd"]%node, verbose=1)
-
- if not out:
- out = ""
- else:
- out = out[0].strip()
-
- self.debug("Node %s status: '%s'" %(node, out))
-
- if out.find('ok') < 0:
- if self.ShouldBeStatus[node] == "up":
- self.log(
- "Node status for %s is %s but we think it should be %s"
- % (node, "down", self.ShouldBeStatus[node]))
- self.ShouldBeStatus[node] = "down"
- return 0
-
- if self.ShouldBeStatus[node] == "down":
- self.log(
- "Node status for %s is %s but we think it should be %s: %s"
- % (node, "up", self.ShouldBeStatus[node], out))
-
- self.ShouldBeStatus[node] = "up"
-
- # check the output first - because syslog-ng loses messages
- if out.find('S_NOT_DC') != -1:
- # Up and stable
- return 2
- if out.find('S_IDLE') != -1:
- # Up and stable
- return 2
-
- # fall back to syslog-ng and wait
- if not idle_watch.look():
- # just up
- self.debug("Warn: Node %s is unstable: %s" % (node, out))
- return 1
-
- # Up and stable
- return 2
-
- # Is the node up or is the node down
- def StataCM(self, node):
- '''Report the status of the cluster manager on a given node'''
-
- if self.test_node_CM(node) > 0:
- return 1
- return None
-
- # Being up and being stable is not the same question...
- def node_stable(self, node):
- '''Report the status of the cluster manager on a given node'''
-
- if self.test_node_CM(node) == 2:
- return 1
- self.log("Warn: Node %s not stable" % (node))
- return None
-
- def partition_stable(self, nodes, timeout=None):
- watchpats = [ ]
- watchpats.append("Current ping state: S_IDLE")
- watchpats.append(self.templates["Pat:DC_IDLE"])
- self.debug("Waiting for cluster stability...")
-
- if timeout == None:
- timeout = self.Env["DeadTime"]
-
- if len(nodes) < 3:
- self.debug("Cluster is inactive")
- return 1
-
- idle_watch = LogWatcher(self.Env["LogFileName"], watchpats, nodes.split(), self.Env["LogWatcher"], "ClusterStable", timeout)
- idle_watch.set_watch()
-
- for node in nodes.split():
- # have each node dump its current state
- self.rsh(node, self.templates["StatusCmd"] % node, verbose=1)
-
- ret = idle_watch.look()
- while ret:
- self.debug(ret)
- for node in nodes.split():
- if re.search(node, ret):
- return 1
- ret = idle_watch.look()
-
- self.debug("Warn: Partition %s not IDLE after %ds" % (repr(nodes), timeout))
- return None
-
- def cluster_stable(self, timeout=None, double_check=False):
- partitions = self.find_partitions()
-
- for partition in partitions:
- if not self.partition_stable(partition, timeout):
- return None
-
- if double_check:
- # Make sure we are really stable and that all resources,
- # including those that depend on transient node attributes,
- # are started if they were going to be
- time.sleep(5)
- for partition in partitions:
- if not self.partition_stable(partition, timeout):
- return None
-
- return 1
-
- def is_node_dc(self, node, status_line=None):
- rc = 0
-
- if not status_line:
- (_, out) = self.rsh(node, self.templates["StatusCmd"]%node, verbose=1)
-
- if out:
- status_line = out[0].strip()
-
- if not status_line:
- rc = 0
- elif status_line.find('S_IDLE') != -1:
- rc = 1
- elif status_line.find('S_INTEGRATION') != -1:
- rc = 1
- elif status_line.find('S_FINALIZE_JOIN') != -1:
- rc = 1
- elif status_line.find('S_POLICY_ENGINE') != -1:
- rc = 1
- elif status_line.find('S_TRANSITION_ENGINE') != -1:
- rc = 1
-
- return rc
-
- def active_resources(self, node):
- (_, output) = self.rsh(node, "crm_resource -c", verbose=1)
- resources = []
- for line in output:
- if re.search("^Resource", line):
- tmp = AuditResource(self, line)
- if tmp.type == "primitive" and tmp.host == node:
- resources.append(tmp.id)
- return resources
-
- def ResourceLocation(self, rid):
- ResourceNodes = []
- for node in self.Env["nodes"]:
- if self.ShouldBeStatus[node] == "up":
-
- cmd = self.templates["RscRunning"] % (rid)
- (rc, lines) = self.rsh(node, cmd)
-
- if rc == 127:
- self.log("Command '%s' failed. Binary or pacemaker-cts package not installed?" % cmd)
- for line in lines:
- self.log("Output: "+line)
- elif rc == 0:
- ResourceNodes.append(node)
-
- return ResourceNodes
-
- def find_partitions(self):
- ccm_partitions = []
-
- for node in self.Env["nodes"]:
- if self.ShouldBeStatus[node] == "up":
- (_, out) = self.rsh(node, self.templates["PartitionCmd"], verbose=1)
-
- if not out:
- self.log("no partition details for %s" % node)
- continue
-
- partition = out[0].strip()
-
- if len(partition) > 2:
- nodes = partition.split()
- nodes.sort()
- partition = ' '.join(nodes)
-
- found = 0
- for a_partition in ccm_partitions:
- if partition == a_partition:
- found = 1
- if found == 0:
- self.debug("Adding partition from %s: %s" % (node, partition))
- ccm_partitions.append(partition)
- else:
- self.debug("Partition '%s' from %s is consistent with existing entries" % (partition, node))
-
- else:
- self.log("bad partition details for %s" % node)
- else:
- self.debug("Node %s is down... skipping" % node)
-
- self.debug("Found partitions: %s" % repr(ccm_partitions) )
- return ccm_partitions
-
- def HasQuorum(self, node_list):
- # If we are auditing a partition, then one side will
- # have quorum and the other not.
- # So the caller needs to tell us which we are checking
- # If no value for node_list is specified... assume all nodes
- if not node_list:
- node_list = self.Env["nodes"]
-
- for node in node_list:
- if self.ShouldBeStatus[node] == "up":
- (_, quorum) = self.rsh(node, self.templates["QuorumCmd"], verbose=1)
- quorum = quorum[0].strip()
-
- if quorum.find("1") != -1:
- return 1
- elif quorum.find("0") != -1:
- return 0
- else:
- self.debug("WARN: Unexpected quorum test result from " + node + ":" + quorum)
-
- return 0
-
- def Components(self):
- complist = []
- common_ignore = [
- "Pending action:",
- "(ERROR|error): crm_log_message_adv:",
- "(ERROR|error): MSG: No message to dump",
- "pending LRM operations at shutdown",
- "Lost connection to the CIB manager",
- "Connection to the CIB terminated...",
- "Sending message to the CIB manager FAILED",
- "Action A_RECOVER .* not supported",
- "(ERROR|error): stonithd_op_result_ready: not signed on",
- "pingd.*(ERROR|error): send_update: Could not send update",
- "send_ipc_message: IPC Channel to .* is not connected",
- "unconfirmed_actions: Waiting on .* unconfirmed actions",
- "cib_native_msgready: Message pending on command channel",
- r": Performing A_EXIT_1 - forcefully exiting ",
- r"Resource .* was active at shutdown. You may ignore this error if it is unmanaged.",
- ]
-
- stonith_ignore = [
- r"Updating failcount for child_DoFencing",
- r"error.*: Fencer connection failed \(will retry\)",
- "pacemaker-execd.*(ERROR|error): stonithd_receive_ops_result failed.",
- ]
-
- stonith_ignore.extend(common_ignore)
-
- ccm = Process(self, "ccm", pats = [
- "State transition .* S_RECOVERY",
- "pacemaker-controld.*Action A_RECOVER .* not supported",
- r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover",
- r"pacemaker-controld.*: Could not recover from internal error",
- "pacemaker-controld.*I_ERROR.*crmd_cib_connection_destroy",
- # these status numbers are likely wrong now
- r"pacemaker-controld.*exited with status 2",
- r"attrd.*exited with status 1",
- r"cib.*exited with status 2",
-
-# Not if it was fenced
-# "A new node joined the cluster",
-
-# "WARN: determine_online_status: Node .* is unclean",
-# "Scheduling node .* for fencing",
-# "Executing .* fencing operation",
-# "tengine_stonith_callback: .*result=0",
-# "Processing I_NODE_JOIN:.* cause=C_HA_MESSAGE",
-# "State transition S_.* -> S_INTEGRATION.*input=I_NODE_JOIN",
- "State transition S_STARTING -> S_PENDING",
- ], badnews_ignore = common_ignore)
-
- based = Process(self, "pacemaker-based", pats = [
- "State transition .* S_RECOVERY",
- "Lost connection to the CIB manager",
- "Connection to the CIB manager terminated",
- r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover",
- "pacemaker-controld.*I_ERROR.*crmd_cib_connection_destroy",
- r"pacemaker-controld.*: Could not recover from internal error",
- # these status numbers are likely wrong now
- r"pacemaker-controld.*exited with status 2",
- r"attrd.*exited with status 1",
- ], badnews_ignore = common_ignore)
-
- execd = Process(self, "pacemaker-execd", pats = [
- "State transition .* S_RECOVERY",
- "LRM Connection failed",
- "pacemaker-controld.*I_ERROR.*lrm_connection_destroy",
- "State transition S_STARTING -> S_PENDING",
- r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover",
- r"pacemaker-controld.*: Could not recover from internal error",
- # this status number is likely wrong now
- r"pacemaker-controld.*exited with status 2",
- ], badnews_ignore = common_ignore)
-
- controld = Process(self, "pacemaker-controld",
- pats = [
-# "WARN: determine_online_status: Node .* is unclean",
-# "Scheduling node .* for fencing",
-# "Executing .* fencing operation",
-# "tengine_stonith_callback: .*result=0",
- "State transition .* S_IDLE",
- "State transition S_STARTING -> S_PENDING",
- ], badnews_ignore = common_ignore)
-
- schedulerd = Process(self, "pacemaker-schedulerd", pats = [
- "State transition .* S_RECOVERY",
- r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover",
- r"pacemaker-controld.*: Could not recover from internal error",
- r"pacemaker-controld.*CRIT.*: Connection to the scheduler failed",
- "pacemaker-controld.*I_ERROR.*save_cib_contents",
- # this status number is likely wrong now
- r"pacemaker-controld.*exited with status 2",
- ], badnews_ignore = common_ignore, dc_only=True)
-
- if self.Env["DoFencing"]:
- complist.append(Process(self, "stoniths", dc_pats = [
- r"pacemaker-controld.*CRIT.*: Fencing daemon connection failed",
- "Attempting connection to fencing daemon",
- ], badnews_ignore = stonith_ignore))
-
- ccm.pats.extend([
- # these status numbers are likely wrong now
- r"attrd.*exited with status 1",
- r"pacemaker-(based|controld).*exited with status 2",
- ])
- based.pats.extend([
- # these status numbers are likely wrong now
- r"attrd.*exited with status 1",
- r"pacemaker-controld.*exited with status 2",
- ])
- execd.pats.extend([
- # these status numbers are likely wrong now
- r"pacemaker-controld.*exited with status 2",
- ])
-
- complist.append(ccm)
- complist.append(based)
- complist.append(execd)
- complist.append(controld)
- complist.append(schedulerd)
-
- return complist
-
- def StandbyStatus(self, node):
- (_, out) = self.rsh(node, self.templates["StandbyQueryCmd"] % node, verbose=1)
- if not out:
- return "off"
- out = out[0].strip()
- self.debug("Standby result: "+out)
- return out
-
- # status == "on" : Enter Standby mode
- # status == "off": Enter Active mode
- def SetStandbyMode(self, node, status):
- current_status = self.StandbyStatus(node)
- cmd = self.templates["StandbyCmd"] % (node, status)
- self.rsh(node, cmd)
- return True
-
- def AddDummyRsc(self, node, rid):
- rsc_xml = """ '<resources>
- <primitive class=\"ocf\" id=\"%s\" provider=\"pacemaker\" type=\"Dummy\">
- <operations>
- <op id=\"%s-interval-10s\" interval=\"10s\" name=\"monitor\"/
- </operations>
- </primitive>
- </resources>'""" % (rid, rid)
- constraint_xml = """ '<constraints>
- <rsc_location id=\"location-%s-%s\" node=\"%s\" rsc=\"%s\" score=\"INFINITY\"/>
- </constraints>'
- """ % (rid, node, node, rid)
-
- self.rsh(node, self.templates['CibAddXml'] % (rsc_xml))
- self.rsh(node, self.templates['CibAddXml'] % (constraint_xml))
-
- def RemoveDummyRsc(self, node, rid):
- constraint = "\"//rsc_location[@rsc='%s']\"" % (rid)
- rsc = "\"//primitive[@id='%s']\"" % (rid)
-
- self.rsh(node, self.templates['CibDelXpath'] % constraint)
- self.rsh(node, self.templates['CibDelXpath'] % rsc)
diff --git a/cts/lab/Makefile.am b/cts/lab/Makefile.am
deleted file mode 100644
index 27e39b3..0000000
--- a/cts/lab/Makefile.am
+++ /dev/null
@@ -1,31 +0,0 @@
-#
-# Copyright 2001-2023 the Pacemaker project contributors
-#
-# The version control history for this file may have further details.
-#
-# This source code is licensed under the GNU General Public License version 2
-# or later (GPLv2+) WITHOUT ANY WARRANTY.
-#
-
-MAINTAINERCLEANFILES = Makefile.in
-
-noinst_SCRIPTS = cluster_test \
- OCFIPraTest.py
-
-# Commands intended to be run only via other commands
-halibdir = $(CRM_DAEMON_DIR)
-dist_halib_SCRIPTS = cts-log-watcher
-
-ctslibdir = $(pythondir)/cts
-ctslib_PYTHON = __init__.py \
- CIB.py \
- cib_xml.py \
- ClusterManager.py \
- CM_corosync.py \
- CTSaudits.py \
- CTSscenarios.py \
- CTStests.py
-
-ctsdir = $(datadir)/$(PACKAGE)/tests/cts
-cts_SCRIPTS = CTSlab.py \
- cts
diff --git a/cts/lab/OCFIPraTest.py.in b/cts/lab/OCFIPraTest.py.in
deleted file mode 100644
index 2cce304..0000000
--- a/cts/lab/OCFIPraTest.py.in
+++ /dev/null
@@ -1,173 +0,0 @@
-#!@PYTHON@
-
-'''OCF IPaddr/IPaddr2 Resource Agent Test'''
-
-__copyright__ = """Original Author: Huang Zhen <zhenhltc@cn.ibm.com>
-Copyright 2004 International Business Machines
-
-with later changes copyright 2005-2023 the Pacemaker project contributors.
-The version control history for this file may have further details.
-"""
-__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
-
-import os
-import sys
-import time
-import random
-import struct
-import syslog
-
-from pacemaker import BuildOptions
-
-
-def usage():
- print("usage: " + sys.argv[0] \
- + " [-2]"\
- + " [--ipbase|-i first-test-ip]"\
- + " [--ipnum|-n test-ip-num]"\
- + " [--help|-h]"\
- + " [--perform|-p op]"\
- + " [number-of-iterations]")
- sys.exit(1)
-
-
-def perform_op(ra, ip, op):
- os.environ["OCF_RA_VERSION_MAJOR"] = "1"
- os.environ["OCF_RA_VERSION_MINOR"] = "0"
- os.environ["OCF_ROOT"] = BuildOptions.OCF_ROOT_DIR
- os.environ["OCF_RESOURCE_INSTANCE"] = ip
- os.environ["OCF_RESOURCE_TYPE"] = ra
- os.environ["OCF_RESKEY_ip"] = ip
- os.environ["HA_LOGFILE"] = "/dev/null"
- os.environ["HA_LOGFACILITY"] = "local7"
- path = BuildOptions.OCF_ROOT_DIR + "/resource.d/heartbeat/" + ra
- return os.spawnvpe(os.P_WAIT, path, [ra, op], os.environ)
-
-
-def audit(ra, iplist, ipstatus, summary):
- passed = 1
- for ip in iplist:
- ret = perform_op(ra, ip, "monitor")
- if ret != ipstatus[ip]:
- passed = 0
- log("audit: status of %s should be %d but it is %d\t [failure]" %
- (ip,ipstatus[ip],ret))
- ipstatus[ip] = ret
- summary["audit"]["called"] += 1;
- if passed :
- summary["audit"]["success"] += 1
- else :
- summary["audit"]["failure"] += 1
-
-
-def log(towrite):
- t = time.strftime("%Y/%m/%d_%H:%M:%S\t", time.localtime(time.time()))
- logstr = t + " "+str(towrite)
- syslog.syslog(logstr)
- print(logstr)
-
-if __name__ == '__main__':
- ra = "IPaddr"
- ipbase = "127.0.0.10"
- ipnum = 1
- itnum = 50
- perform = None
- summary = {
- "start":{"called":0,"success":0,"failure":0},
- "stop" :{"called":0,"success":0,"failure":0},
- "audit":{"called":0,"success":0,"failure":0}
- }
- syslog.openlog(sys.argv[0], 0, syslog.LOG_LOCAL7)
-
- # Process arguments...
- skipthis = None
- args = sys.argv[1:]
- for i in range(0, len(args)) :
- if skipthis :
- skipthis = None
- continue
- elif args[i] == "-2" :
- ra = "IPaddr2"
- elif args[i] == "--ip" or args[i] == "-i" :
- skipthis = 1
- ipbase = args[i+1]
- elif args[i] == "--ipnum" or args[i] == "-n" :
- skipthis = 1
- ipnum = int(args[i+1])
- elif args[i] == "--perform" or args[i] == "-p" :
- skipthis = 1
- perform = args[i+1]
- elif args[i] == "--help" or args[i] == "-h" :
- usage()
- else:
- itnum = int(args[i])
-
- log("Begin OCF IPaddr/IPaddr2 Test")
-
- # Generate the test ips
- iplist = []
- ipstatus = {}
- fields = ipbase.split('.')
- for i in range(0, ipnum) :
- ip = fields.join('.')
- iplist.append(ip)
- ipstatus[ip] = perform_op(ra,ip,"monitor")
- fields[3] = str(int(fields[3])+1)
- log("Test ip:" + str(iplist))
-
- # If use ask perform an operation
- if perform != None:
- log("Perform opeartion %s"%perform)
- for ip in iplist:
- perform_op(ra, ip, perform)
- log("Done")
- sys.exit()
-
- log("RA Type:" + ra)
- log("Test Count:" + str(itnum))
-
- # Prepare Random
- f = open("/dev/urandom", "r")
- seed = struct.unpack("BBB", f.read(3))
- f.close()
- #seed=(123,321,231)
- rand = random.Random()
- rand.seed(seed[0])
- log("Test Random Seed:" + str(seed))
-
- #
- # Begin Tests
-
- log(">>>>>>>>>>>>>>>>>>>>>>>>")
- for i in range(0, itnum):
- ip = rand.choice(iplist)
- if ipstatus[ip] == 0:
- op = "stop"
- elif ipstatus[ip] == 7:
- op = "start"
- else :
- op = rand.choice(["start","stop"])
-
- ret = perform_op(ra, ip, op)
- # update status
- if op == "start" and ret == 0:
- ipstatus[ip] = 0
- elif op == "stop" and ret == 0:
- ipstatus[ip] = 7
- else :
- ipstatus[ip] = 1
- result = ""
- if ret == 0:
- result = "success"
- else :
- result = "failure"
- summary[op]["called"] += 1
- summary[op][result] += 1
- log( "%d:%s %s \t[%s]"%(i, op, ip, result))
- audit(ra, iplist, ipstatus, summary)
-
- log("<<<<<<<<<<<<<<<<<<<<<<<<")
- log("start:\t" + str(summary["start"]))
- log("stop: \t" + str(summary["stop"]))
- log("audit:\t" + str(summary["audit"]))
-
diff --git a/cts/lab/__init__.py b/cts/lab/__init__.py
deleted file mode 100644
index abed502..0000000
--- a/cts/lab/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-"""Python modules for Pacemaker's Cluster Test Suite (CTS)
-
-This package provides the following modules:
-
-CIB
-cib_xml
-CM_common
-CM_corosync
-CTSaudits
-CTS
-CTSscenarios
-CTStests
-patterns
-watcher
-"""
diff --git a/cts/lab/cib_xml.py b/cts/lab/cib_xml.py
deleted file mode 100644
index 378dd29..0000000
--- a/cts/lab/cib_xml.py
+++ /dev/null
@@ -1,319 +0,0 @@
-""" CIB XML generator for Pacemaker's Cluster Test Suite (CTS)
-"""
-
-__copyright__ = "Copyright 2008-2023 the Pacemaker project contributors"
-__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
-
-import sys
-
-from cts.CIB import CibBase
-
-
-class XmlBase(CibBase):
- def __init__(self, Factory, tag, _id, **kwargs):
- CibBase.__init__(self, Factory, tag, _id, **kwargs)
-
- def show(self):
- text = '''<%s''' % self.tag
- if self.name:
- text += ''' id="%s"''' % (self.name)
- for k in list(self.kwargs.keys()):
- text += ''' %s="%s"''' % (k, self.kwargs[k])
-
- if not self.children:
- text += '''/>'''
- return text
-
- text += '''>'''
-
- for c in self.children:
- text += c.show()
-
- text += '''</%s>''' % self.tag
- return text
-
- def _run(self, operation, xml, section="all", options=""):
- if self.name:
- label = self.name
- else:
- label = "<%s>" % self.tag
- self.Factory.debug("Writing out %s" % label)
- fixed = "HOME=/root CIB_file="+self.Factory.tmpfile
- fixed += " cibadmin --%s --scope %s %s --xml-text '%s'" % (operation, section, options, xml)
- (rc, _) = self.Factory.rsh(self.Factory.target, fixed)
- if rc != 0:
- self.Factory.log("Configure call failed: "+fixed)
- sys.exit(1)
-
-
-class InstanceAttributes(XmlBase):
- """ Create an <instance_attributes> section with name-value pairs """
-
- def __init__(self, Factory, name, attrs):
- XmlBase.__init__(self, Factory, "instance_attributes", name)
-
- # Create an <nvpair> for each attribute
- for (attr, value) in list(attrs.items()):
- self.add_child(XmlBase(Factory, "nvpair", "%s-%s" % (name, attr),
- name=attr, value=value))
-
-
-class Node(XmlBase):
- """ Create a <node> section with node attributes for one node """
-
- def __init__(self, Factory, node_name, node_id, node_attrs):
- XmlBase.__init__(self, Factory, "node", node_id, uname=node_name)
- self.add_child(InstanceAttributes(Factory, "%s-1" % node_name, node_attrs))
-
-
-class Nodes(XmlBase):
- """ Create a <nodes> section """
-
- def __init__(self, Factory):
- XmlBase.__init__(self, Factory, "nodes", None)
-
- def add_node(self, node_name, node_id, node_attrs):
- self.add_child(Node(self.Factory, node_name, node_id, node_attrs))
-
- def commit(self):
- self._run("modify", self.show(), "configuration", "--allow-create")
-
-
-class FencingTopology(XmlBase):
- def __init__(self, Factory):
- XmlBase.__init__(self, Factory, "fencing-topology", None)
-
- def level(self, index, target, devices, target_attr=None, target_value=None):
- # Generate XML ID (sanitizing target-by-attribute levels)
-
- if target:
- xml_id = "cts-%s.%d" % (target, index)
- self.add_child(XmlBase(self.Factory, "fencing-level", xml_id, target=target, index=index, devices=devices))
-
- else:
- xml_id = "%s-%s.%d" % (target_attr, target_value, index)
- child = XmlBase(self.Factory, "fencing-level", xml_id, index=index, devices=devices)
- child["target-attribute"]=target_attr
- child["target-value"]=target_value
- self.add_child(child)
-
- def commit(self):
- self._run("create", self.show(), "configuration", "--allow-create")
-
-
-class Option(XmlBase):
- def __init__(self, Factory, section="cib-bootstrap-options"):
- XmlBase.__init__(self, Factory, "cluster_property_set", section)
-
- def __setitem__(self, key, value):
- self.add_child(XmlBase(self.Factory, "nvpair", "cts-%s" % key, name=key, value=value))
-
- def commit(self):
- self._run("modify", self.show(), "crm_config", "--allow-create")
-
-
-class OpDefaults(XmlBase):
- def __init__(self, Factory):
- XmlBase.__init__(self, Factory, "op_defaults", None)
- self.meta = XmlBase(self.Factory, "meta_attributes", "cts-op_defaults-meta")
- self.add_child(self.meta)
-
- def __setitem__(self, key, value):
- self.meta.add_child(XmlBase(self.Factory, "nvpair", "cts-op_defaults-%s" % key, name=key, value=value))
-
- def commit(self):
- self._run("modify", self.show(), "configuration", "--allow-create")
-
-
-class Alerts(XmlBase):
- def __init__(self, Factory):
- XmlBase.__init__(self, Factory, "alerts", None)
- self.alert_count = 0
-
- def add_alert(self, path, recipient):
- self.alert_count = self.alert_count + 1
- alert = XmlBase(self.Factory, "alert", "alert-%d" % self.alert_count,
- path=path)
- recipient1 = XmlBase(self.Factory, "recipient",
- "alert-%d-recipient-1" % self.alert_count,
- value=recipient)
- alert.add_child(recipient1)
- self.add_child(alert)
-
- def commit(self):
- self._run("modify", self.show(), "configuration", "--allow-create")
-
-
-class Expression(XmlBase):
- def __init__(self, Factory, name, attr, op, value=None):
- XmlBase.__init__(self, Factory, "expression", name, attribute=attr, operation=op)
- if value:
- self["value"] = value
-
-
-class Rule(XmlBase):
- def __init__(self, Factory, name, score, op="and", expr=None):
- XmlBase.__init__(self, Factory, "rule", "%s" % name)
- self["boolean-op"] = op
- self["score"] = score
- if expr:
- self.add_child(expr)
-
-
-class Resource(XmlBase):
- def __init__(self, Factory, name, rtype, standard, provider=None):
- XmlBase.__init__(self, Factory, "native", name)
-
- self.rtype = rtype
- self.standard = standard
- self.provider = provider
-
- self.op = []
- self.meta = {}
- self.param = {}
-
- self.scores = {}
- self.needs = {}
- self.coloc = {}
-
- if self.standard == "ocf" and not provider:
- self.provider = "heartbeat"
- elif self.standard == "lsb":
- self.provider = None
-
- def __setitem__(self, key, value):
- self.add_param(key, value)
-
- def add_op(self, name, interval, **kwargs):
- self.op.append(
- XmlBase(self.Factory, "op", "%s-%s" % (name, interval), name=name, interval=interval, **kwargs))
-
- def add_param(self, name, value):
- self.param[name] = value
-
- def add_meta(self, name, value):
- self.meta[name] = value
-
- def prefer(self, node, score="INFINITY", rule=None):
- if not rule:
- rule = Rule(self.Factory, "prefer-%s-r" % node, score,
- expr=Expression(self.Factory, "prefer-%s-e" % node, "#uname", "eq", node))
- self.scores[node] = rule
-
- def after(self, resource, kind="Mandatory", first="start", then="start", **kwargs):
- kargs = kwargs.copy()
- kargs["kind"] = kind
- if then:
- kargs["first-action"] = "start"
- kargs["then-action"] = then
-
- if first:
- kargs["first-action"] = first
-
- self.needs[resource] = kargs
-
- def colocate(self, resource, score="INFINITY", role=None, withrole=None, **kwargs):
- kargs = kwargs.copy()
- kargs["score"] = score
- if role:
- kargs["rsc-role"] = role
- if withrole:
- kargs["with-rsc-role"] = withrole
-
- self.coloc[resource] = kargs
-
- def constraints(self):
- text = "<constraints>"
-
- for k in list(self.scores.keys()):
- text += '''<rsc_location id="prefer-%s" rsc="%s">''' % (k, self.name)
- text += self.scores[k].show()
- text += '''</rsc_location>'''
-
- for k in list(self.needs.keys()):
- text += '''<rsc_order id="%s-after-%s" first="%s" then="%s"''' % (self.name, k, k, self.name)
- kargs = self.needs[k]
- for kw in list(kargs.keys()):
- text += ''' %s="%s"''' % (kw, kargs[kw])
- text += '''/>'''
-
- for k in list(self.coloc.keys()):
- text += '''<rsc_colocation id="%s-with-%s" rsc="%s" with-rsc="%s"''' % (self.name, k, self.name, k)
- kargs = self.coloc[k]
- for kw in list(kargs.keys()):
- text += ''' %s="%s"''' % (kw, kargs[kw])
- text += '''/>'''
-
- text += "</constraints>"
- return text
-
- def show(self):
- text = '''<primitive id="%s" class="%s" type="%s"''' % (self.name, self.standard, self.rtype)
- if self.provider:
- text += ''' provider="%s"''' % (self.provider)
- text += '''>'''
-
- if len(self.meta) > 0:
- text += '''<meta_attributes id="%s-meta">''' % self.name
- for p in list(self.meta.keys()):
- text += '''<nvpair id="%s-%s" name="%s" value="%s"/>''' % (self.name, p, p, self.meta[p])
- text += '''</meta_attributes>'''
-
- if len(self.param) > 0:
- text += '''<instance_attributes id="%s-params">''' % self.name
- for p in list(self.param.keys()):
- text += '''<nvpair id="%s-%s" name="%s" value="%s"/>''' % (self.name, p, p, self.param[p])
- text += '''</instance_attributes>'''
-
- if len(self.op) > 0:
- text += '''<operations>'''
- for o in self.op:
- key = o.name
- o.name = "%s-%s" % (self.name, key)
- text += o.show()
- o.name = key
- text += '''</operations>'''
-
- text += '''</primitive>'''
- return text
-
- def commit(self):
- self._run("create", self.show(), "resources")
- self._run("modify", self.constraints())
-
-
-class Group(Resource):
- def __init__(self, Factory, name):
- Resource.__init__(self, Factory, name, None, None)
- self.tag = "group"
-
- def __setitem__(self, key, value):
- self.add_meta(key, value)
-
- def show(self):
- text = '''<%s id="%s">''' % (self.tag, self.name)
-
- if len(self.meta) > 0:
- text += '''<meta_attributes id="%s-meta">''' % self.name
- for p in list(self.meta.keys()):
- text += '''<nvpair id="%s-%s" name="%s" value="%s"/>''' % (self.name, p, p, self.meta[p])
- text += '''</meta_attributes>'''
-
- for c in self.children:
- text += c.show()
- text += '''</%s>''' % self.tag
- return text
-
-
-class Clone(Group):
- def __init__(self, Factory, name, child=None):
- Group.__init__(self, Factory, name)
- self.tag = "clone"
- if child:
- self.add_child(child)
-
- def add_child(self, resource):
- if not self.children:
- self.children.append(resource)
- else:
- self.Factory.log("Clones can only have a single child. Ignoring %s" % resource.name)
diff --git a/cts/lab/cts.in b/cts/lab/cts.in
deleted file mode 100755
index 5b3aaab..0000000
--- a/cts/lab/cts.in
+++ /dev/null
@@ -1,262 +0,0 @@
-#!@BASH_PATH@
-#
-# Copyright 2012-2023 the Pacemaker project contributors
-#
-# The version control history for this file may have further details.
-#
-# This source code is licensed under the GNU General Public License version 2
-# or later (GPLv2+) WITHOUT ANY WARRANTY.
-#
-
-# e.g. /etc/sysconfig or /etc/default
-CONFIG_DIR=@CONFIGDIR@
-
-cts_root=`dirname $0`
-
-logfile=0
-summary=0
-verbose=0
-watch=0
-saved=0
-tests=""
-
-install=0
-clean=0
-kill=0
-run=0
-boot=0
-setup=0
-target=rhel-7
-cmd=""
-trace=""
-
-custom_log=""
-patterns="-e CTS:"
-
-
-helpmsg=$(cat <<EOF
-Usage: %s [options] {[setup [TARGET]] | [OTHER-CMDS]}
-
-[--]help, -h show help screen and exit
--x turn on debugging
--a show relevant screen sessions and exit
--c,-g CLUSTER_NAME set the cluster name
--S show summary from the last CTS run
--s show summary for the current log (see -l)
--v increase verbosity
--p (currently unused)
--e PATTERN grep pattern to apply when 'summary' or 'watch' requested
--l print the filename of the log that would be operated on
--w continous (filtered) monitoring of the log file
--f,-sf FILE show summary for the provided log
--t TEST, [0-9]* add a test to the working set
-[--]kill request termination of cluster software
-[--]run request CTS run (passing remaining arguments through)
-[--]boot, start request CTS run (with --boot option)
-[--]clean request cleaning up after CTS run
-[--]install, --inst request installing packages to get ready to run CTS
-[--]setup request initialization to get ready to run CTS
-trace-ls, tls list traced functions
-trace-add, tadd FUNC add a function to the list of traced ones
-trace-rm, trm FUNC remove a function from the list of traced ones
-trace-set, tset FUNC set function(s) as the only to be traced
-(f|fedora|r|rhel).* specify target distro
--- delimits tests that follow
-EOF
-)
-
-while true; do
- case $1 in
- -h|--help|help) printf "${helpmsg}\n" "$0"; exit;;
- -x) set -x; shift;;
- -a)
- screen -ls | grep cts
- exit 0;;
- -c|-g) cluster_name=$2; shift; shift;;
- -S) summary=1; saved=1; shift;;
- -s) summary=1; shift;;
- -v) verbose=`expr $verbose + 1`; shift;;
- -p) shift;;
- -e) patterns="$patterns -e `echo $2 | sed 's/ /\\\W/g'`"; shift; shift;;
- -l) logfile=1; shift;;
- -w) watch=1; shift;;
- -f|-sf) summary=1; custom_log=$2; shift; shift;;
- -t) tests="$tests $2"; shift; shift;;
- [0-9]*) tests="$tests $1"; shift;;
- --kill|kill) kill=1; shift; break;;
- --run|run) run=1; shift; break;;
- --boot|boot|start) boot=1; clean=1; shift; break;;
- --clean|clean) clean=1; shift;;
- --inst|--install|install) install=1; clean=1; shift;;
- --setup|setup) setup=1; shift;;
- trace-ls|tls) cmd=$1; shift;;
- trace-add|tadd|trace-rm|trm|trace-set|tset) cmd=$1; trace=$2; shift; shift;;
- f*)
- target="fedora-`echo $1 | sed -e s/fedora// -e s/-// -e s/f//`"
- shift;;
- r|rhel) target="rhel-7"; shift;;
- r*)
- target="rhel-`echo $1 | sed -e s/rhel// -e s/-// -e s/r//`"
- shift;;
- --) shift; tests="$tests $*"; break;;
- "") break;;
- *) echo "Unknown argument: $1"; exit 1;;
- esac
-done
-
-# Add the location of this script
-export PATH="$PATH:$cts_root"
-which cluster-helper &>/dev/null
-if [ $? != 0 ]; then
- echo $0 needs the cluster-helper script to be in your path
- exit 1
-fi
-
-which cluster-clean &>/dev/null
-if [ $? != 0 ]; then
- echo $0 needs the cluster-clean script to be in your path
- exit 1
-fi
-
-if [ "x$cluster_name" = x ] || [ "x$cluster_name" = xpick ]; then
- clusters=`ls -1 ~/.dsh/group/[a-z]+[0-9] | sed s/.*group.// | tr '\n' ' ' `
-
- echo "custom) interactively define a cluster"
- for i in $clusters; do
- echo "$i) `cluster-helper --list short -g $i`"
- done
-
- read -p "Choose a cluster [custom]: " cluster_name
- echo
-fi
-
-if [ -z $cluster_name ]; then
- cluster_name=custom
-fi
-
-
-case $cluster_name in
- custom)
- read -p "Cluster name: " cluster_name
- read -p "Cluster hosts: " cluster_hosts
- read -p "Cluster log file: " cluster_log
- cluster-helper add -g "$cluster_name" -w "$cluster_hosts"
- ;;
- *)
- cluster_hosts=`cluster-helper --list short -g $cluster_name`
- cluster_log=~/cluster-$cluster_name.log;
- ;;
-esac
-
-if [ x$cmd != x ]; then
- config="${CONFIG_DIR}/pacemaker"
- case $cmd in
- trace-ls|tls)
- cluster-helper -g $cluster_name -- grep PCMK_trace_functions $config
- ;;
- trace-add|tadd)
- echo "Adding $trace to PCMK_trace_functions"
- cluster-helper -g $cluster_name -- sed -i "s/.*PCMK_trace_functions=/PCMK_trace_functions=$trace,/" $config
- ;;
- trace-rm|trm)
- echo "Removing $trace from PCMK_trace_functions"
- cluster-helper -g $cluster_name -- sed -i "s/.*PCMK_trace_functions=\\\\\\(.*\\\\\\)$trace,\\\\\\(.*\\\\\\)/PCMK_trace_functions=\\\\\\1\\\\\\2/" $config
- ;;
- trace-set|tset)
- echo "Setting PCMK_trace_functions to '$trace'"
- cluster-helper -g $cluster_name -- sed -i "s/.*PCMK_trace_functions.*/PCMK_trace_functions=$trace/" $config
- ;;
- esac
- exit 0
-fi
-
-if [ $run = 1 ]; then
- install=1
- clean=1
-fi
-
-if [ $clean = 1 ]; then
- rm -f $cluster_log; cluster-clean -g $cluster_name --kill
-elif [ $kill = 1 ]; then
- cluster-clean -g $cluster_name --kill-only
- exit 0
-fi
-
-if [ $install = 1 ]; then
- cluster-helper -g $cluster_name -- yum install -y pacemaker pacemaker-debuginfo pacemaker-cts libqb libqb-debuginfo
-fi
-
-if [ $setup = 1 ]; then
- cluster-init -g $cluster_name $target -u --test
- exit 0
-
-elif [ $boot = 1 ]; then
- $cts_root/CTSlab.py -r -c -g $cluster_name --boot
- rc=$?
- if [ $rc = 0 ]; then
- echo "The cluster is ready..."
- fi
- exit $rc
-
-elif [ $run = 1 ]; then
- $cts_root/CTSlab.py -r -c -g $cluster_name 500 "$@"
- exit $?
-
-elif [ $clean = 1 ]; then
- exit 0
-fi
-
-screen -ls | grep cts-$cluster_name &>/dev/null
-active=$?
-
-if [ ! -z $custom_log ]; then
- cluster_log=$custom_log
-fi
-
-if [ "x$tests" != x ] && [ "x$tests" != "x " ]; then
- for t in $tests; do
- echo "crm_report --cts-log $cluster_log -d -T $t"
- crm_report --cts-log $cluster_log -d -T $t
- done
-
-elif [ $logfile = 1 ]; then
- echo $cluster_log
-
-elif [ $summary = 1 ]; then
- files=$cluster_log
- if [ $saved = 1 ]; then
- files=`ls -1tr ~/CTS-*/cluster-log.txt`
- fi
- for f in $files; do
- echo $f
- case $verbose in
- 0) cat -n $f | grep $patterns | grep -v "CTS: debug:"
- ;;
- 1) cat -n $f | grep $patterns | grep -v "CTS:.* cmd:"
- ;;
- *) cat -n $f | grep $patterns
- ;;
- esac
- echo ""
- done
-
-elif [ $watch = 1 ]; then
- case $verbose in
- 0) tail -F $cluster_log | grep $patterns | grep -v "CTS: debug:"
- ;;
- 1) tail -F $cluster_log | grep $patterns | grep -v "CTS:.* cmd:"
- ;;
- *) tail -F $cluster_log | grep $patterns
- ;;
- esac
-
-elif [ $active = 0 ]; then
- screen -x cts-$cluster_name
-
-else
- touch $cluster_log
-
-# . ~/.bashrc
- export cluster_name cluster_hosts cluster_log
- screen -S cts-$cluster_name bash
-fi
diff --git a/cts/lxc_autogen.sh.in b/cts/lxc_autogen.sh.in
deleted file mode 100644
index 195d3f9..0000000
--- a/cts/lxc_autogen.sh.in
+++ /dev/null
@@ -1,545 +0,0 @@
-#!@BASH_PATH@
-#
-# Copyright 2013-2022 the Pacemaker project contributors
-#
-# The version control history for this file may have further details.
-#
-# This source code is licensed under the GNU General Public License version 2
-# or later (GPLv2+) WITHOUT ANY WARRANTY.
-#
-
-containers="2"
-download=0
-share_configs=0
-# different than default libvirt network in case this is run nested in a KVM instance
-addr="192.168.123.1"
-restore=0
-restore_pcmk=0
-restore_all=0
-generate=0
-key_gen=0
-cib=0
-anywhere=0
-add_clone=0
-verify=0
-working_dir="@CRM_PACEMAKER_DIR@/cts/lxc"
-run_dirs="/run /var/run /usr/var/run"
-
-# must be on one line b/c used inside quotes
-SSH_RSYNC_OPTS="-o UserKnownHostsFile=/dev/null -o BatchMode=yes -o StrictHostKeyChecking=no"
-
-function helptext() {
- echo "lxc_autogen.sh - generate libvirt LXC containers for testing purposes"
- echo ""
- echo "Usage: lxc-autogen [options]"
- echo ""
- echo "Options:"
- echo "-g, --generate Generate libvirt LXC environment in directory this script is run from"
- echo "-k, --key-gen Generate Pacemaker Remote key only"
- echo "-r, --restore-libvirt Restore the default network and libvirt config to before this script ran"
- echo "-p, --restore-cib Remove CIB entries this script generated"
- echo "-R, --restore-all Restore both libvirt and CIB, and clean working directory"
- echo " (libvirt xml files are not removed, so resource can be stopped properly)"
- echo ""
- echo "-A, --allow-anywhere Allow the containers to live anywhere in the cluster"
- echo "-a, --add-cib Add CIB entries to create a guest node for each LXC instance"
- echo "-C, --add-clone Add promotable clone resource shared between LXC guest nodes"
- echo "-d, --download-agent Download and install latest VirtualDomain agent"
- echo "-s, --share-configs Synchronize on all known cluster nodes"
- echo "-c, --containers Specify number of containers to generate (default $containers; used with -g)"
- echo "-n, --network Network to override libvirt default (example: -n 192.168.123.1; used with -g)"
- echo "-v, --verify Verify environment is capable of running LXC"
- echo ""
- exit "$1"
-}
-
-while true ; do
- case "$1" in
- --help|-h|-\?) helptext 0;;
- -c|--containers) containers="$2"; shift; shift;;
- -d|--download-agent) download=1; shift;;
- -s|--share-configs) share_configs=1; shift;;
- -n|--network) addr="$2"; shift; shift;;
- -r|--restore-libvirt) restore=1; shift;;
- -p|--restore-cib) restore_pcmk=1; shift;;
- -R|--restore-all)
- restore_all=1
- restore=1
- restore_pcmk=1
- shift;;
- -g|--generate) generate=1; key_gen=1; shift;;
- -k|--key-gen) key_gen=1; shift;;
- -a|--add-cib) cib=1; shift;;
- -A|--allow-anywhere) anywhere=1; shift;;
- -C|--add-clone) add_clone=1; shift;;
- -m|--add-master)
- echo "$1 is deprecated (use -C/--add-clone instead)"
- echo
- add_clone=1
- shift
- ;;
- -v|--verify) verify=1; shift;;
- "") break;;
- *) helptext 1;;
- esac
-done
-
-if [ $verify -eq 1 ]; then
- # verify virsh tool is available and that
- # we can connect to lxc driver.
- virsh -c lxc:/// list --all > /dev/null 2>&1
- if [ $? -ne 0 ]; then
- echo "libvirt LXC driver must be installed (could not connect 'virsh -c lxc:///')"
- # yum install -y libvirt-daemon-driver-lxc libvirt-daemon-lxc libvirt-login-shell
- exit 1
- fi
-
- SELINUX=$(getenforce)
- if [ "$SELINUX" != "Enforcing" ] && [ "$SELINUX" != "Permissive" ]; then
- echo "SELINUX must be set to permissive or enforcing mode"
- exit 1
- fi
-
- ps ax | grep "[l]ibvirtd"
- if [ $? -ne 0 ]; then
- echo "libvirtd must be running"
- exit 1
- fi
-
- which rsync > /dev/null 2>&1
- if [ $? -ne 0 ]; then
- echo "rsync must be installed"
- fi
-
- which pacemaker-remoted > /dev/null 2>&1
- if [ $? -ne 0 ]; then
- echo "pacemaker-remoted must be installed"
- fi
-fi
-
-#strip last digits off addr
-addr="$(echo "$addr" | awk -F. '{print $1"."$2"."$3}')"
-
-node_exec() {
- ssh -o StrictHostKeyChecking=no \
- -o ConnectTimeout=30 \
- -o BatchMode=yes \
- -l root -T "$@"
-}
-
-this_node()
-{
- crm_node -n
-}
-
-other_nodes()
-{
- crm_node -l | awk "\$2 != \"$(this_node)\" {print \$2}"
-}
-
-make_directory()
-{
- # argument must be full path
- DIR="$1"
-
- mkdir -p "$DIR"
- if [ $share_configs -eq 1 ]; then
- for node in $(other_nodes); do
- node_exec "$node" mkdir -p "$DIR"
- done
- fi
-}
-
-sync_file()
-{
- TARGET="$1"
-
- if [ $share_configs -eq 1 ]; then
- for node in $(other_nodes); do
- rsync -ave "ssh $SSH_RSYNC_OPTS" "$TARGET" "${node}:${TARGET}"
- done
- fi
-}
-
-download_agent()
-{
- wget https://raw.github.com/ClusterLabs/resource-agents/main/heartbeat/VirtualDomain
- chmod 755 VirtualDomain
- mv -f VirtualDomain /usr/lib/ocf/resource.d/heartbeat/VirtualDomain
- sync_file /usr/lib/ocf/resource.d/heartbeat/VirtualDomain
-}
-
-set_network()
-{
- rm -f cur_network.xml
- cat << END >> cur_network.xml
-<network>
- <name>default</name>
- <uuid>41ebdb84-7134-1111-a136-91f0f1119225</uuid>
- <forward mode='nat'/>
- <bridge name='virbr0' stp='on' delay='0' />
- <mac address='52:54:00:A8:12:35'/>
- <ip address='$addr.1' netmask='255.255.255.0'>
- <dhcp>
- <range start='$addr.2' end='$addr.254' />
- </dhcp>
- </ip>
-</network>
-END
- sync_file "${working_dir}"/cur_network.xml
-}
-
-distribute_configs()
-{
- for node in $(other_nodes); do
- rsync -ave "ssh $SSH_RSYNC_OPTS" "${working_dir}"/lxc*.xml "${node}:${working_dir}"
- rsync -ave "ssh $SSH_RSYNC_OPTS" "${working_dir}"/lxc*-filesystem "${node}:${working_dir}"
- done
-}
-
-start_network()
-{
- NODE="$1"
-
- node_exec "$NODE" <<-EOF
- cd "$working_dir"
- virsh net-info default >/dev/null 2>&1
- if [ \$? -eq 0 ]; then
- if [ ! -f restore_default.xml ]; then
- virsh net-dumpxml default > restore_default.xml
- fi
- virsh net-destroy default
- virsh net-undefine default
- fi
- virsh net-define cur_network.xml
- virsh net-start default
- virsh net-autostart default
-EOF
-}
-
-start_network_all()
-{
- start_network "$(this_node)"
- if [ $share_configs -eq 1 ]; then
- for node in $(other_nodes); do
- start_network "$node"
- done
- fi
-}
-
-add_hosts_entry()
-{
- IP="$1"
- HNAME="$2"
-
- echo "$IP $HNAME" >>/etc/hosts
- if [ $share_configs -eq 1 ]; then
- for node in $(other_nodes); do
- node_exec "$node" "echo $IP $HNAME >>/etc/hosts"
- done
- fi
-}
-
-generate_key()
-{
- if [ ! -e /etc/pacemaker/authkey ]; then
- make_directory /etc/pacemaker
- dd if=/dev/urandom of=/etc/pacemaker/authkey bs=4096 count=1
- sync_file /etc/pacemaker/authkey
- fi
-}
-
-generate()
-{
- set_network
-
- # Generate libvirt domains in xml
- for (( c=1; c <= containers; c++ ))
- do
- # Clean any previous definition
- rm -rf "lxc$c.xml" "lxc$c-filesystem"
-
- # Create a basic filesystem with run directories
- for dir in $run_dirs; do
- mkdir -p "lxc$c-filesystem/$dir"
- done
-
- # Create libvirt definition
- suffix=$((10 + c))
- prefix="$(echo "$addr" | awk -F. '{print $1"."$2}')"
- subnet="$(echo "$addr" | awk -F. '{print $3}')"
- while [ $suffix -gt 255 ]; do
- subnet=$((subnet + 1))
- suffix=$((subnet - 255))
- done
- cip="$prefix.$subnet.$suffix"
-
- cat << END >> lxc$c.xml
-<domain type='lxc'>
- <name>lxc$c</name>
- <memory unit='KiB'>200704</memory>
- <os>
- <type>exe</type>
- <init>$working_dir/lxc$c-filesystem/launch-helper</init>
- </os>
- <devices>
- <console type='pty'/>
- <filesystem type='ram'>
- <source usage='150528'/>
- <target dir='/dev/shm'/>
- </filesystem>
-END
- for dir in $run_dirs; do
- cat << END >> lxc$c.xml
- <filesystem type='mount'>
- <source dir='$working_dir/lxc$c-filesystem${dir}'/>
- <target dir='$dir'/>
- </filesystem>
-END
- done
- cat << END >> lxc$c.xml
- <interface type='network'>
- <mac address='52:54:$((RANDOM % 9))$((RANDOM % 9)):$((RANDOM % 9))$((RANDOM % 9)):$((RANDOM % 9))$((RANDOM % 9)):$((RANDOM % 9))$((RANDOM % 9))'/>
- <source network='default'/>
- </interface>
- </devices>
-</domain>
-END
-
- # Create CIB definition
- rm -f "container$c.cib"
- cat << END >> "container$c.cib"
- <primitive class="ocf" id="container$c" provider="heartbeat" type="VirtualDomain">
- <instance_attributes id="container$c-instance_attributes">
- <nvpair id="container$c-instance_attributes-force_stop" name="force_stop" value="true"/>
- <nvpair id="container$c-instance_attributes-hypervisor" name="hypervisor" value="lxc:///"/>
- <nvpair id="container$c-instance_attributes-config" name="config" value="$working_dir/lxc$c.xml"/>
- </instance_attributes>
- <utilization id="container$c-utilization">
- <nvpair id="container$c-utilization-cpu" name="cpu" value="1"/>
- <nvpair id="container$c-utilization-hv_memory" name="hv_memory" value="100"/>
- </utilization>
- <meta_attributes id="container$c-meta_attributes">
- <nvpair id="container$c-meta_attributes-remote-node" name="remote-node" value="lxc$c"/>
- </meta_attributes>
- <operations>
- <op id="container$c-monitor-20s" interval="20s" name="monitor"/>
- </operations>
- </primitive>
-END
-
- # Create container init
- rm -f "lxc$c-filesystem/launch-helper"
- cat << END >> "lxc$c-filesystem/launch-helper"
-#!@BASH_PATH@
-ip -f inet addr add "$cip/24" dev eth0
-ip link set eth0 up
-ip route add default via "$addr.1"
-hostname "lxc$c"
-df > "$working_dir/lxc$c-filesystem/disk_usage.txt"
-export PCMK_debugfile="@CRM_LOG_DIR@/pacemaker_remote_lxc$c.log"
-/usr/sbin/pacemaker-remoted
-END
- chmod 711 "lxc$c-filesystem/launch-helper"
-
- add_hosts_entry "$cip" "lxc$c"
- done
-
- # Create CIB fragment for a promotable clone resource
- cat << END > lxc-clone.cib
- <clone id="lxc-clone">
- <primitive class="ocf" id="lxc-rsc" provider="pacemaker" type="Stateful">
- <instance_attributes id="lxc-rsc-instance_attributes"/>
- <operations>
- <op id="lxc-rsc-monitor-interval-10s" interval="10s" name="monitor" role="Promoted" timeout="20s"/>
- <op id="lxc-rsc-monitor-interval-11s" interval="11s" name="monitor" role="Unpromoted" timeout="20s"/>
- </operations>
- </primitive>
- <meta_attributes id="lxc-clone-meta_attributes">
- <nvpair id="lxc-clone-meta_attributes-promotable" name="promotable" value="true"/>
- <nvpair id="lxc-clone-meta_attributes-promoted-max" name="promoted-max" value="1"/>
- <nvpair id="lxc-clone-meta_attributes-clone-max" name="clone-max" value="$containers"/>
- </meta_attributes>
- </clone>
-END
-}
-
-container_names() {
- find . -maxdepth 1 -name "lxc*.xml" -exec basename -s .xml "{}" ";"
-}
-
-apply_cib_clone()
-{
- cibadmin -Q > cur.cib
- export CIB_file=cur.cib
-
- cibadmin -o resources -Mc -x lxc-clone.cib
- for tmp in $(container_names); do
- echo "<rsc_location id=\"lxc-clone-location-${tmp}\" node=\"${tmp}\" rsc=\"lxc-clone\" score=\"INFINITY\"/>" > tmp_constraint
- cibadmin -o constraints -Mc -x tmp_constraint
- done
- # Make sure the version changes even if the content doesn't
- cibadmin -B
- unset CIB_file
-
- cibadmin --replace -o configuration --xml-file cur.cib
- rm -f cur.cib
-}
-
-apply_cib_entries()
-{
- cibadmin -Q > cur.cib
- export CIB_file=cur.cib
- for tmp in container*.cib; do
- cibadmin -o resources -Mc -x "$tmp"
-
- remote_node="$(grep remote-node "${tmp}" | sed -n -e 's/^.*value=\"\(.*\)\".*/\1/p')"
- if [ $anywhere -eq 0 ]; then
- crm_resource -M -r "${tmp//\.cib/}" -H "$(this_node)"
- fi
- echo "<rsc_location id=\"lxc-ping-location-${remote_node}\" node=\"${remote_node}\" rsc=\"Connectivity\" score=\"-INFINITY\"/>" > tmp_constraint
- # Ignore any failure; this constraint is just to help with CTS when the
- # connectivity resources (which fail the guest nodes) are in use.
- cibadmin -o constraints -Mc -x tmp_constraint > /dev/null 2>&1
-
- for rsc in $(crm_resource -l | grep rsc_ ); do
- echo "<rsc_location id=\"lxc-${rsc}-location-${remote_node}\" node=\"${remote_node}\" rsc=\"${rsc}\" score=\"-INFINITY\"/>" > tmp_constraint
- cibadmin -o constraints -Mc -x tmp_constraint > /dev/null 2>&1
- done
-
- rm -f tmp_constraint
- done
-
- # Make sure the version changes even if the content doesn't
- cibadmin -B
-
- unset CIB_file
-
- cibadmin --replace -o configuration --xml-file cur.cib
- rm -f cur.cib
-}
-
-restore_cib()
-{
- cibadmin -Q > cur.cib
- export CIB_file=cur.cib
-
- for tmp in $(container_names); do
- echo "<rsc_location id=\"lxc-clone-location-${tmp}\" node=\"${tmp}\" rsc=\"lxc-clone\" score=\"INFINITY\"/>" > tmp_constraint
- cibadmin -o constraints -D -x tmp_constraint
- echo "<rsc_location id=\"lxc-ping-location-${tmp}\" node=\"${tmp}\" rsc=\"Connectivity\" score=\"-INFINITY\"/>" > tmp_constraint
- cibadmin -o constraints -D -x tmp_constraint
-
- for rsc in $(crm_resource -l | grep rsc_ ); do
- echo "<rsc_location id=\"lxc-${rsc}-location-${tmp}\" node=\"${tmp}\" rsc=\"${rsc}\" score=\"-INFINITY\"/>" > tmp_constraint
- cibadmin -o constraints -D -x tmp_constraint
- done
- rm -f tmp_constraint
- done
- cibadmin -o resources -D -x lxc-clone.cib
-
- for tmp in container*.cib; do
- tmp="${tmp//\.cib/}"
- crm_resource -U -r "$tmp" -H "$(this_node)"
- crm_resource -D -r "$tmp" -t primitive
- done
- # Make sure the version changes even if the content doesn't
- cibadmin -B
- unset CIB_file
-
- cibadmin --replace -o configuration --xml-file cur.cib
- rm -f cur.cib
-
- # Allow the cluster to stabilize before continuing
- crm_resource --wait
-
- # Purge nodes from caches and CIB status section
- for tmp in $(container_names); do
- crm_node --force --remove "$tmp"
- done
-}
-
-restore_network()
-{
- NODE="$1"
-
- node_exec "$NODE" <<-EOF
- cd "$working_dir"
- for tmp in \$(ls lxc*.xml | sed -e 's/\.xml//g'); do
- virsh -c lxc:/// destroy "\$tmp" >/dev/null 2>&1
- virsh -c lxc:/// undefine "\$tmp" >/dev/null 2>&1
- sed -i.bak "/...\....\....\..* \${tmp}/d" /etc/hosts
- done
- virsh net-destroy default >/dev/null 2>&1
- virsh net-undefine default >/dev/null 2>&1
- if [ -f restore_default.xml ]; then
- virsh net-define restore_default.xml
- virsh net-start default
- rm restore_default.xml
- fi
-EOF
- echo "Containers destroyed and default network restored on $NODE"
-}
-
-restore_libvirt()
-{
- restore_network "$(this_node)"
- if [ $share_configs -eq 1 ]; then
- for node in $(other_nodes); do
- restore_network "$node"
- done
- fi
-}
-
-restore_files()
-{
- find . -maxdepth 1 -not -name "lxc*.xml" -a -not -name . -exec rm -rf "{}" ";"
- if [ $share_configs -eq 1 ]; then
- for node in $(other_nodes); do
- node_exec "$node" rm -rf \
- "$working_dir"/lxc*-filesystem \
- "$working_dir"/cur_network.xml
- done
- fi
-}
-
-make_directory "$working_dir"
-cd "$working_dir" || exit 1
-
-# Generate files as requested
-if [ $download -eq 1 ]; then
- download_agent
-fi
-if [ $key_gen -eq 1 ]; then
- generate_key
-fi
-if [ $generate -eq 1 ]; then
- generate
-fi
-if [ $share_configs -eq 1 ]; then
- distribute_configs
-fi
-if [ $generate -eq 1 ]; then
- start_network_all
-fi
-
-# Update cluster as requested
-if [ $cib -eq 1 ]; then
- apply_cib_entries
-fi
-if [ $add_clone -eq 1 ]; then
- apply_cib_clone
-fi
-
-# Restore original state as requested
-if [ $restore_pcmk -eq 1 ]; then
- restore_cib
-fi
-if [ $restore -eq 1 ]; then
- restore_libvirt
-fi
-if [ $restore_all -eq 1 ]; then
- restore_files
-fi
-
-# vim: set expandtab tabstop=8 softtabstop=4 shiftwidth=4 textwidth=80:
diff --git a/cts/scheduler/Makefile.am b/cts/scheduler/Makefile.am
index 9074390..aed7714 100644
--- a/cts/scheduler/Makefile.am
+++ b/cts/scheduler/Makefile.am
@@ -10,9 +10,15 @@ MAINTAINERCLEANFILES = Makefile.in
pedir = $(datadir)/$(PACKAGE)/tests/scheduler
+.PHONY: list
list:
@for T in "$(srcdir)"/xml/*.xml; do \
echo $$(basename $$T .xml); \
done
-SUBDIRS = dot exp scores stderr summary xml
+SUBDIRS = dot \
+ exp \
+ scores \
+ stderr \
+ summary \
+ xml
diff --git a/cts/scheduler/dot/bug-lf-2422.dot b/cts/scheduler/dot/bug-lf-2422.dot
index 72ad12a..6fe2208 100644
--- a/cts/scheduler/dot/bug-lf-2422.dot
+++ b/cts/scheduler/dot/bug-lf-2422.dot
@@ -68,9 +68,12 @@
"ocfs:2_stop_0 qa-suse-3" -> "c-ocfs_stopped_0" [ style = bold]
"ocfs:2_stop_0 qa-suse-3" -> "o2stage:2_stop_0" [ style = bold]
"ocfs:2_stop_0 qa-suse-3" -> "ocfs:0_stop_0 qa-suse-4" [ style = bold]
+"ocfs:2_stop_0 qa-suse-3" -> "ocfs:1_stop_0 qa-suse-1" [ style = bold]
"ocfs:2_stop_0 qa-suse-3" [ style=bold color="green" fontcolor="black"]
"ocfs:3_stop_0 qa-suse-2" -> "c-ocfs_stopped_0" [ style = bold]
"ocfs:3_stop_0 qa-suse-2" -> "o2stage:3_stop_0" [ style = bold]
+"ocfs:3_stop_0 qa-suse-2" -> "ocfs:0_stop_0 qa-suse-4" [ style = bold]
+"ocfs:3_stop_0 qa-suse-2" -> "ocfs:1_stop_0 qa-suse-1" [ style = bold]
"ocfs:3_stop_0 qa-suse-2" -> "ocfs:2_stop_0 qa-suse-3" [ style = bold]
"ocfs:3_stop_0 qa-suse-2" [ style=bold color="green" fontcolor="black"]
"sbd_stonith_monitor_15000 qa-suse-2" [ style=bold color="green" fontcolor="black"]
diff --git a/cts/scheduler/dot/bundle-interleave-start.dot b/cts/scheduler/dot/bundle-interleave-start.dot
index bf6ed7f..109a6cb 100644
--- a/cts/scheduler/dot/bundle-interleave-start.dot
+++ b/cts/scheduler/dot/bundle-interleave-start.dot
@@ -41,9 +41,15 @@
"app-bundle-2_monitor_0 node5" [ style=bold color="green" fontcolor="black"]
"app-bundle-2_monitor_30000 node4" [ style=bold color="green" fontcolor="black"]
"app-bundle-2_start_0 node4" -> "app-bundle-2_monitor_30000 node4" [ style = bold]
-"app-bundle-2_start_0 node4" -> "app:2_monitor_16000 app-bundle-2" [ style = bold]
+"app-bundle-2_start_0 node4" -> "app:2_monitor_15000 app-bundle-2" [ style = bold]
+"app-bundle-2_start_0 node4" -> "app:2_promote_0 app-bundle-2" [ style = bold]
"app-bundle-2_start_0 node4" -> "app:2_start_0 app-bundle-2" [ style = bold]
"app-bundle-2_start_0 node4" [ style=bold color="green" fontcolor="black"]
+"app-bundle-clone_promote_0" -> "app:2_promote_0 app-bundle-2" [ style = bold]
+"app-bundle-clone_promote_0" [ style=bold color="green" fontcolor="orange"]
+"app-bundle-clone_promoted_0" -> "app-bundle_promoted_0" [ style = bold]
+"app-bundle-clone_promoted_0" [ style=bold color="green" fontcolor="orange"]
+"app-bundle-clone_running_0" -> "app-bundle-clone_promote_0" [ style = bold]
"app-bundle-clone_running_0" -> "app-bundle_running_0" [ style = bold]
"app-bundle-clone_running_0" [ style=bold color="green" fontcolor="orange"]
"app-bundle-clone_start_0" -> "app-bundle-clone_running_0" [ style = bold]
@@ -133,8 +139,13 @@
"app-bundle-podman-2_start_0 node4" -> "app-bundle-2_start_0 node4" [ style = bold]
"app-bundle-podman-2_start_0 node4" -> "app-bundle-podman-2_monitor_60000 node4" [ style = bold]
"app-bundle-podman-2_start_0 node4" -> "app-bundle_running_0" [ style = bold]
+"app-bundle-podman-2_start_0 node4" -> "app:2_promote_0 app-bundle-2" [ style = bold]
"app-bundle-podman-2_start_0 node4" -> "app:2_start_0 app-bundle-2" [ style = bold]
"app-bundle-podman-2_start_0 node4" [ style=bold color="green" fontcolor="black"]
+"app-bundle_promote_0" -> "app-bundle-clone_promote_0" [ style = bold]
+"app-bundle_promote_0" [ style=bold color="green" fontcolor="orange"]
+"app-bundle_promoted_0" [ style=bold color="green" fontcolor="orange"]
+"app-bundle_running_0" -> "app-bundle_promote_0" [ style = bold]
"app-bundle_running_0" [ style=bold color="green" fontcolor="orange"]
"app-bundle_start_0" -> "app-bundle-clone_start_0" [ style = bold]
"app-bundle_start_0" -> "app-bundle-podman-0_start_0 node2" [ style = bold]
@@ -145,15 +156,20 @@
"app:0_start_0 app-bundle-0" -> "app-bundle-clone_running_0" [ style = bold]
"app:0_start_0 app-bundle-0" -> "app:0_monitor_16000 app-bundle-0" [ style = bold]
"app:0_start_0 app-bundle-0" -> "app:1_start_0 app-bundle-1" [ style = bold]
+"app:0_start_0 app-bundle-0" -> "app:2_start_0 app-bundle-2" [ style = bold]
"app:0_start_0 app-bundle-0" [ style=bold color="green" fontcolor="black"]
"app:1_monitor_16000 app-bundle-1" [ style=bold color="green" fontcolor="black"]
"app:1_start_0 app-bundle-1" -> "app-bundle-clone_running_0" [ style = bold]
"app:1_start_0 app-bundle-1" -> "app:1_monitor_16000 app-bundle-1" [ style = bold]
"app:1_start_0 app-bundle-1" -> "app:2_start_0 app-bundle-2" [ style = bold]
"app:1_start_0 app-bundle-1" [ style=bold color="green" fontcolor="black"]
-"app:2_monitor_16000 app-bundle-2" [ style=bold color="green" fontcolor="black"]
+"app:2_monitor_15000 app-bundle-2" [ style=bold color="green" fontcolor="black"]
+"app:2_promote_0 app-bundle-2" -> "app-bundle-clone_promoted_0" [ style = bold]
+"app:2_promote_0 app-bundle-2" -> "app:2_monitor_15000 app-bundle-2" [ style = bold]
+"app:2_promote_0 app-bundle-2" [ style=bold color="green" fontcolor="black"]
"app:2_start_0 app-bundle-2" -> "app-bundle-clone_running_0" [ style = bold]
-"app:2_start_0 app-bundle-2" -> "app:2_monitor_16000 app-bundle-2" [ style = bold]
+"app:2_start_0 app-bundle-2" -> "app:2_monitor_15000 app-bundle-2" [ style = bold]
+"app:2_start_0 app-bundle-2" -> "app:2_promote_0 app-bundle-2" [ style = bold]
"app:2_start_0 app-bundle-2" [ style=bold color="green" fontcolor="black"]
"base-bundle-0_monitor_0 node1" -> "base-bundle-0_start_0 node2" [ style = bold]
"base-bundle-0_monitor_0 node1" [ style=bold color="green" fontcolor="black"]
@@ -197,9 +213,15 @@
"base-bundle-2_monitor_0 node5" [ style=bold color="green" fontcolor="black"]
"base-bundle-2_monitor_30000 node4" [ style=bold color="green" fontcolor="black"]
"base-bundle-2_start_0 node4" -> "base-bundle-2_monitor_30000 node4" [ style = bold]
-"base-bundle-2_start_0 node4" -> "base:2_monitor_16000 base-bundle-2" [ style = bold]
+"base-bundle-2_start_0 node4" -> "base:2_monitor_15000 base-bundle-2" [ style = bold]
+"base-bundle-2_start_0 node4" -> "base:2_promote_0 base-bundle-2" [ style = bold]
"base-bundle-2_start_0 node4" -> "base:2_start_0 base-bundle-2" [ style = bold]
"base-bundle-2_start_0 node4" [ style=bold color="green" fontcolor="black"]
+"base-bundle-clone_promote_0" -> "base:2_promote_0 base-bundle-2" [ style = bold]
+"base-bundle-clone_promote_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle-clone_promoted_0" -> "base-bundle_promoted_0" [ style = bold]
+"base-bundle-clone_promoted_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle-clone_running_0" -> "base-bundle-clone_promote_0" [ style = bold]
"base-bundle-clone_running_0" -> "base-bundle_running_0" [ style = bold]
"base-bundle-clone_running_0" [ style=bold color="green" fontcolor="orange"]
"base-bundle-clone_start_0" -> "base-bundle-clone_running_0" [ style = bold]
@@ -289,9 +311,15 @@
"base-bundle-podman-2_start_0 node4" -> "base-bundle-2_start_0 node4" [ style = bold]
"base-bundle-podman-2_start_0 node4" -> "base-bundle-podman-2_monitor_60000 node4" [ style = bold]
"base-bundle-podman-2_start_0 node4" -> "base-bundle_running_0" [ style = bold]
+"base-bundle-podman-2_start_0 node4" -> "base:2_promote_0 base-bundle-2" [ style = bold]
"base-bundle-podman-2_start_0 node4" -> "base:2_start_0 base-bundle-2" [ style = bold]
"base-bundle-podman-2_start_0 node4" [ style=bold color="green" fontcolor="black"]
+"base-bundle_promote_0" -> "base-bundle-clone_promote_0" [ style = bold]
+"base-bundle_promote_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle_promoted_0" -> "app-bundle_promote_0" [ style = bold]
+"base-bundle_promoted_0" [ style=bold color="green" fontcolor="orange"]
"base-bundle_running_0" -> "app-bundle_start_0" [ style = bold]
+"base-bundle_running_0" -> "base-bundle_promote_0" [ style = bold]
"base-bundle_running_0" [ style=bold color="green" fontcolor="orange"]
"base-bundle_start_0" -> "base-bundle-clone_start_0" [ style = bold]
"base-bundle_start_0" -> "base-bundle-podman-0_start_0 node2" [ style = bold]
@@ -303,6 +331,7 @@
"base:0_start_0 base-bundle-0" -> "base-bundle-clone_running_0" [ style = bold]
"base:0_start_0 base-bundle-0" -> "base:0_monitor_16000 base-bundle-0" [ style = bold]
"base:0_start_0 base-bundle-0" -> "base:1_start_0 base-bundle-1" [ style = bold]
+"base:0_start_0 base-bundle-0" -> "base:2_start_0 base-bundle-2" [ style = bold]
"base:0_start_0 base-bundle-0" [ style=bold color="green" fontcolor="black"]
"base:1_monitor_16000 base-bundle-1" [ style=bold color="green" fontcolor="black"]
"base:1_start_0 base-bundle-1" -> "app-bundle-podman-1_start_0 node3" [ style = bold]
@@ -310,9 +339,14 @@
"base:1_start_0 base-bundle-1" -> "base:1_monitor_16000 base-bundle-1" [ style = bold]
"base:1_start_0 base-bundle-1" -> "base:2_start_0 base-bundle-2" [ style = bold]
"base:1_start_0 base-bundle-1" [ style=bold color="green" fontcolor="black"]
-"base:2_monitor_16000 base-bundle-2" [ style=bold color="green" fontcolor="black"]
+"base:2_monitor_15000 base-bundle-2" [ style=bold color="green" fontcolor="black"]
+"base:2_promote_0 base-bundle-2" -> "app:2_promote_0 app-bundle-2" [ style = bold]
+"base:2_promote_0 base-bundle-2" -> "base-bundle-clone_promoted_0" [ style = bold]
+"base:2_promote_0 base-bundle-2" -> "base:2_monitor_15000 base-bundle-2" [ style = bold]
+"base:2_promote_0 base-bundle-2" [ style=bold color="green" fontcolor="black"]
"base:2_start_0 base-bundle-2" -> "app-bundle-podman-2_start_0 node4" [ style = bold]
"base:2_start_0 base-bundle-2" -> "base-bundle-clone_running_0" [ style = bold]
-"base:2_start_0 base-bundle-2" -> "base:2_monitor_16000 base-bundle-2" [ style = bold]
+"base:2_start_0 base-bundle-2" -> "base:2_monitor_15000 base-bundle-2" [ style = bold]
+"base:2_start_0 base-bundle-2" -> "base:2_promote_0 base-bundle-2" [ style = bold]
"base:2_start_0 base-bundle-2" [ style=bold color="green" fontcolor="black"]
}
diff --git a/cts/scheduler/dot/bundle-nested-colocation.dot b/cts/scheduler/dot/bundle-nested-colocation.dot
index 87b6c0d..c11447a 100644
--- a/cts/scheduler/dot/bundle-nested-colocation.dot
+++ b/cts/scheduler/dot/bundle-nested-colocation.dot
@@ -139,6 +139,7 @@
"rabbitmq:0_start_0 rabbitmq-bundle-0" -> "rabbitmq-bundle-clone_running_0" [ style = bold]
"rabbitmq:0_start_0 rabbitmq-bundle-0" -> "rabbitmq:0_monitor_10000 rabbitmq-bundle-0" [ style = bold]
"rabbitmq:0_start_0 rabbitmq-bundle-0" -> "rabbitmq:1_start_0 rabbitmq-bundle-1" [ style = bold]
+"rabbitmq:0_start_0 rabbitmq-bundle-0" -> "rabbitmq:2_start_0 rabbitmq-bundle-2" [ style = bold]
"rabbitmq:0_start_0 rabbitmq-bundle-0" [ style=bold color="green" fontcolor="black"]
"rabbitmq:1_monitor_0 rabbitmq-bundle-1" -> "rabbitmq-bundle-clone_start_0" [ style = bold]
"rabbitmq:1_monitor_0 rabbitmq-bundle-1" [ style=bold color="green" fontcolor="black"]
diff --git a/cts/scheduler/dot/bundle-order-startup-clone-2.dot b/cts/scheduler/dot/bundle-order-startup-clone-2.dot
index b04e9f7..93877f4 100644
--- a/cts/scheduler/dot/bundle-order-startup-clone-2.dot
+++ b/cts/scheduler/dot/bundle-order-startup-clone-2.dot
@@ -114,6 +114,7 @@
"galera:0_start_0 galera-bundle-0" -> "galera:0_monitor_20000 galera-bundle-0" [ style = bold]
"galera:0_start_0 galera-bundle-0" -> "galera:0_monitor_30000 galera-bundle-0" [ style = bold]
"galera:0_start_0 galera-bundle-0" -> "galera:1_start_0 galera-bundle-1" [ style = bold]
+"galera:0_start_0 galera-bundle-0" -> "galera:2_start_0 galera-bundle-2" [ style = bold]
"galera:0_start_0 galera-bundle-0" [ style=bold color="green" fontcolor="black"]
"galera:1_monitor_20000 galera-bundle-1" [ style=bold color="green" fontcolor="black"]
"galera:1_monitor_30000 galera-bundle-1" [ style=bold color="green" fontcolor="black"]
@@ -343,6 +344,7 @@
"redis:0_start_0 redis-bundle-0" -> "redis:0_monitor_20000 redis-bundle-0" [ style = bold]
"redis:0_start_0 redis-bundle-0" -> "redis:0_promote_0 redis-bundle-0" [ style = bold]
"redis:0_start_0 redis-bundle-0" -> "redis:1_start_0 redis-bundle-1" [ style = bold]
+"redis:0_start_0 redis-bundle-0" -> "redis:2_start_0 redis-bundle-2" [ style = bold]
"redis:0_start_0 redis-bundle-0" [ style=bold color="green" fontcolor="black"]
"redis:1_monitor_20000 redis-bundle-1" [ style=bold color="green" fontcolor="black"]
"redis:1_post_notify_promote_0 redis-bundle-1" -> "redis-bundle-master_confirmed-post_notify_promoted_0" [ style = bold]
diff --git a/cts/scheduler/dot/bundle-probe-remotes.dot b/cts/scheduler/dot/bundle-probe-remotes.dot
index 958cc90..260b0be 100644
--- a/cts/scheduler/dot/bundle-probe-remotes.dot
+++ b/cts/scheduler/dot/bundle-probe-remotes.dot
@@ -53,21 +53,31 @@
"dummy1:0_monitor_10000 scale1-bundle-0" [ style=bold color="green" fontcolor="black"]
"dummy1:0_start_0 scale1-bundle-0" -> "dummy1:0_monitor_10000 scale1-bundle-0" [ style = bold]
"dummy1:0_start_0 scale1-bundle-0" -> "dummy1:1_start_0 scale1-bundle-1" [ style = bold]
+"dummy1:0_start_0 scale1-bundle-0" -> "dummy1:2_start_0 scale1-bundle-2" [ style = bold]
+"dummy1:0_start_0 scale1-bundle-0" -> "dummy1:3_start_0 scale1-bundle-3" [ style = bold]
+"dummy1:0_start_0 scale1-bundle-0" -> "dummy1:4_start_0 scale1-bundle-4" [ style = bold]
+"dummy1:0_start_0 scale1-bundle-0" -> "dummy1:5_start_0 scale1-bundle-5" [ style = bold]
"dummy1:0_start_0 scale1-bundle-0" -> "scale1-bundle-clone_running_0" [ style = bold]
"dummy1:0_start_0 scale1-bundle-0" [ style=bold color="green" fontcolor="black"]
"dummy1:1_monitor_10000 scale1-bundle-1" [ style=bold color="green" fontcolor="black"]
"dummy1:1_start_0 scale1-bundle-1" -> "dummy1:1_monitor_10000 scale1-bundle-1" [ style = bold]
"dummy1:1_start_0 scale1-bundle-1" -> "dummy1:2_start_0 scale1-bundle-2" [ style = bold]
+"dummy1:1_start_0 scale1-bundle-1" -> "dummy1:3_start_0 scale1-bundle-3" [ style = bold]
+"dummy1:1_start_0 scale1-bundle-1" -> "dummy1:4_start_0 scale1-bundle-4" [ style = bold]
+"dummy1:1_start_0 scale1-bundle-1" -> "dummy1:5_start_0 scale1-bundle-5" [ style = bold]
"dummy1:1_start_0 scale1-bundle-1" -> "scale1-bundle-clone_running_0" [ style = bold]
"dummy1:1_start_0 scale1-bundle-1" [ style=bold color="green" fontcolor="black"]
"dummy1:2_monitor_10000 scale1-bundle-2" [ style=bold color="green" fontcolor="black"]
"dummy1:2_start_0 scale1-bundle-2" -> "dummy1:2_monitor_10000 scale1-bundle-2" [ style = bold]
"dummy1:2_start_0 scale1-bundle-2" -> "dummy1:3_start_0 scale1-bundle-3" [ style = bold]
+"dummy1:2_start_0 scale1-bundle-2" -> "dummy1:4_start_0 scale1-bundle-4" [ style = bold]
+"dummy1:2_start_0 scale1-bundle-2" -> "dummy1:5_start_0 scale1-bundle-5" [ style = bold]
"dummy1:2_start_0 scale1-bundle-2" -> "scale1-bundle-clone_running_0" [ style = bold]
"dummy1:2_start_0 scale1-bundle-2" [ style=bold color="green" fontcolor="black"]
"dummy1:3_monitor_10000 scale1-bundle-3" [ style=bold color="green" fontcolor="black"]
"dummy1:3_start_0 scale1-bundle-3" -> "dummy1:3_monitor_10000 scale1-bundle-3" [ style = bold]
"dummy1:3_start_0 scale1-bundle-3" -> "dummy1:4_start_0 scale1-bundle-4" [ style = bold]
+"dummy1:3_start_0 scale1-bundle-3" -> "dummy1:5_start_0 scale1-bundle-5" [ style = bold]
"dummy1:3_start_0 scale1-bundle-3" -> "scale1-bundle-clone_running_0" [ style = bold]
"dummy1:3_start_0 scale1-bundle-3" [ style=bold color="green" fontcolor="black"]
"dummy1:4_monitor_10000 scale1-bundle-4" [ style=bold color="green" fontcolor="black"]
diff --git a/cts/scheduler/dot/bundle-promoted-anticolocation-1.dot b/cts/scheduler/dot/bundle-promoted-anticolocation-1.dot
new file mode 100644
index 0000000..ce2cf6a
--- /dev/null
+++ b/cts/scheduler/dot/bundle-promoted-anticolocation-1.dot
@@ -0,0 +1,7 @@
+ digraph "g" {
+"vip_monitor_10000 node1" [ style=bold color="green" fontcolor="black"]
+"vip_start_0 node1" -> "vip_monitor_10000 node1" [ style = bold]
+"vip_start_0 node1" [ style=bold color="green" fontcolor="black"]
+"vip_stop_0 node3" -> "vip_start_0 node1" [ style = bold]
+"vip_stop_0 node3" [ style=bold color="green" fontcolor="black"]
+}
diff --git a/cts/scheduler/dot/bundle-promoted-anticolocation-2.dot b/cts/scheduler/dot/bundle-promoted-anticolocation-2.dot
new file mode 100644
index 0000000..ce2cf6a
--- /dev/null
+++ b/cts/scheduler/dot/bundle-promoted-anticolocation-2.dot
@@ -0,0 +1,7 @@
+ digraph "g" {
+"vip_monitor_10000 node1" [ style=bold color="green" fontcolor="black"]
+"vip_start_0 node1" -> "vip_monitor_10000 node1" [ style = bold]
+"vip_start_0 node1" [ style=bold color="green" fontcolor="black"]
+"vip_stop_0 node3" -> "vip_start_0 node1" [ style = bold]
+"vip_stop_0 node3" [ style=bold color="green" fontcolor="black"]
+}
diff --git a/cts/scheduler/dot/bundle-promoted-anticolocation-3.dot b/cts/scheduler/dot/bundle-promoted-anticolocation-3.dot
new file mode 100644
index 0000000..a71ec35
--- /dev/null
+++ b/cts/scheduler/dot/bundle-promoted-anticolocation-3.dot
@@ -0,0 +1,32 @@
+ digraph "g" {
+"Cancel base_monitor_15000 base-bundle-2" -> "base_demote_0 base-bundle-2" [ style = bold]
+"Cancel base_monitor_15000 base-bundle-2" [ style=bold color="green" fontcolor="black"]
+"Cancel base_monitor_16000 base-bundle-1" -> "base_promote_0 base-bundle-1" [ style = bold]
+"Cancel base_monitor_16000 base-bundle-1" [ style=bold color="green" fontcolor="black"]
+"base-bundle-clone_demote_0" -> "base-bundle-clone_demoted_0" [ style = bold]
+"base-bundle-clone_demote_0" -> "base_demote_0 base-bundle-2" [ style = bold]
+"base-bundle-clone_demote_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle-clone_demoted_0" -> "base-bundle-clone_promote_0" [ style = bold]
+"base-bundle-clone_demoted_0" -> "base-bundle_demoted_0" [ style = bold]
+"base-bundle-clone_demoted_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle-clone_promote_0" -> "base_promote_0 base-bundle-1" [ style = bold]
+"base-bundle-clone_promote_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle-clone_promoted_0" -> "base-bundle_promoted_0" [ style = bold]
+"base-bundle-clone_promoted_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle_demote_0" -> "base-bundle-clone_demote_0" [ style = bold]
+"base-bundle_demote_0" -> "base-bundle_demoted_0" [ style = bold]
+"base-bundle_demote_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle_demoted_0" -> "base-bundle_promote_0" [ style = bold]
+"base-bundle_demoted_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle_promote_0" -> "base-bundle-clone_promote_0" [ style = bold]
+"base-bundle_promote_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle_promoted_0" [ style=bold color="green" fontcolor="orange"]
+"base_demote_0 base-bundle-2" -> "base-bundle-clone_demoted_0" [ style = bold]
+"base_demote_0 base-bundle-2" -> "base_monitor_16000 base-bundle-2" [ style = bold]
+"base_demote_0 base-bundle-2" [ style=bold color="green" fontcolor="black"]
+"base_monitor_15000 base-bundle-1" [ style=bold color="green" fontcolor="black"]
+"base_monitor_16000 base-bundle-2" [ style=bold color="green" fontcolor="black"]
+"base_promote_0 base-bundle-1" -> "base-bundle-clone_promoted_0" [ style = bold]
+"base_promote_0 base-bundle-1" -> "base_monitor_15000 base-bundle-1" [ style = bold]
+"base_promote_0 base-bundle-1" [ style=bold color="green" fontcolor="black"]
+}
diff --git a/cts/scheduler/dot/bundle-promoted-anticolocation-4.dot b/cts/scheduler/dot/bundle-promoted-anticolocation-4.dot
new file mode 100644
index 0000000..a71ec35
--- /dev/null
+++ b/cts/scheduler/dot/bundle-promoted-anticolocation-4.dot
@@ -0,0 +1,32 @@
+ digraph "g" {
+"Cancel base_monitor_15000 base-bundle-2" -> "base_demote_0 base-bundle-2" [ style = bold]
+"Cancel base_monitor_15000 base-bundle-2" [ style=bold color="green" fontcolor="black"]
+"Cancel base_monitor_16000 base-bundle-1" -> "base_promote_0 base-bundle-1" [ style = bold]
+"Cancel base_monitor_16000 base-bundle-1" [ style=bold color="green" fontcolor="black"]
+"base-bundle-clone_demote_0" -> "base-bundle-clone_demoted_0" [ style = bold]
+"base-bundle-clone_demote_0" -> "base_demote_0 base-bundle-2" [ style = bold]
+"base-bundle-clone_demote_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle-clone_demoted_0" -> "base-bundle-clone_promote_0" [ style = bold]
+"base-bundle-clone_demoted_0" -> "base-bundle_demoted_0" [ style = bold]
+"base-bundle-clone_demoted_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle-clone_promote_0" -> "base_promote_0 base-bundle-1" [ style = bold]
+"base-bundle-clone_promote_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle-clone_promoted_0" -> "base-bundle_promoted_0" [ style = bold]
+"base-bundle-clone_promoted_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle_demote_0" -> "base-bundle-clone_demote_0" [ style = bold]
+"base-bundle_demote_0" -> "base-bundle_demoted_0" [ style = bold]
+"base-bundle_demote_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle_demoted_0" -> "base-bundle_promote_0" [ style = bold]
+"base-bundle_demoted_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle_promote_0" -> "base-bundle-clone_promote_0" [ style = bold]
+"base-bundle_promote_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle_promoted_0" [ style=bold color="green" fontcolor="orange"]
+"base_demote_0 base-bundle-2" -> "base-bundle-clone_demoted_0" [ style = bold]
+"base_demote_0 base-bundle-2" -> "base_monitor_16000 base-bundle-2" [ style = bold]
+"base_demote_0 base-bundle-2" [ style=bold color="green" fontcolor="black"]
+"base_monitor_15000 base-bundle-1" [ style=bold color="green" fontcolor="black"]
+"base_monitor_16000 base-bundle-2" [ style=bold color="green" fontcolor="black"]
+"base_promote_0 base-bundle-1" -> "base-bundle-clone_promoted_0" [ style = bold]
+"base_promote_0 base-bundle-1" -> "base_monitor_15000 base-bundle-1" [ style = bold]
+"base_promote_0 base-bundle-1" [ style=bold color="green" fontcolor="black"]
+}
diff --git a/cts/scheduler/dot/bundle-promoted-anticolocation-5.dot b/cts/scheduler/dot/bundle-promoted-anticolocation-5.dot
new file mode 100644
index 0000000..b3db02e
--- /dev/null
+++ b/cts/scheduler/dot/bundle-promoted-anticolocation-5.dot
@@ -0,0 +1,32 @@
+ digraph "g" {
+"Cancel bundle-a-rsc_monitor_15000 bundle-a-1" -> "bundle-a-rsc_demote_0 bundle-a-1" [ style = bold]
+"Cancel bundle-a-rsc_monitor_15000 bundle-a-1" [ style=bold color="green" fontcolor="black"]
+"Cancel bundle-a-rsc_monitor_16000 bundle-a-2" -> "bundle-a-rsc_promote_0 bundle-a-2" [ style = bold]
+"Cancel bundle-a-rsc_monitor_16000 bundle-a-2" [ style=bold color="green" fontcolor="black"]
+"bundle-a-clone_demote_0" -> "bundle-a-clone_demoted_0" [ style = bold]
+"bundle-a-clone_demote_0" -> "bundle-a-rsc_demote_0 bundle-a-1" [ style = bold]
+"bundle-a-clone_demote_0" [ style=bold color="green" fontcolor="orange"]
+"bundle-a-clone_demoted_0" -> "bundle-a-clone_promote_0" [ style = bold]
+"bundle-a-clone_demoted_0" -> "bundle-a_demoted_0" [ style = bold]
+"bundle-a-clone_demoted_0" [ style=bold color="green" fontcolor="orange"]
+"bundle-a-clone_promote_0" -> "bundle-a-rsc_promote_0 bundle-a-2" [ style = bold]
+"bundle-a-clone_promote_0" [ style=bold color="green" fontcolor="orange"]
+"bundle-a-clone_promoted_0" -> "bundle-a_promoted_0" [ style = bold]
+"bundle-a-clone_promoted_0" [ style=bold color="green" fontcolor="orange"]
+"bundle-a-rsc_demote_0 bundle-a-1" -> "bundle-a-clone_demoted_0" [ style = bold]
+"bundle-a-rsc_demote_0 bundle-a-1" -> "bundle-a-rsc_monitor_16000 bundle-a-1" [ style = bold]
+"bundle-a-rsc_demote_0 bundle-a-1" [ style=bold color="green" fontcolor="black"]
+"bundle-a-rsc_monitor_15000 bundle-a-2" [ style=bold color="green" fontcolor="black"]
+"bundle-a-rsc_monitor_16000 bundle-a-1" [ style=bold color="green" fontcolor="black"]
+"bundle-a-rsc_promote_0 bundle-a-2" -> "bundle-a-clone_promoted_0" [ style = bold]
+"bundle-a-rsc_promote_0 bundle-a-2" -> "bundle-a-rsc_monitor_15000 bundle-a-2" [ style = bold]
+"bundle-a-rsc_promote_0 bundle-a-2" [ style=bold color="green" fontcolor="black"]
+"bundle-a_demote_0" -> "bundle-a-clone_demote_0" [ style = bold]
+"bundle-a_demote_0" -> "bundle-a_demoted_0" [ style = bold]
+"bundle-a_demote_0" [ style=bold color="green" fontcolor="orange"]
+"bundle-a_demoted_0" -> "bundle-a_promote_0" [ style = bold]
+"bundle-a_demoted_0" [ style=bold color="green" fontcolor="orange"]
+"bundle-a_promote_0" -> "bundle-a-clone_promote_0" [ style = bold]
+"bundle-a_promote_0" [ style=bold color="green" fontcolor="orange"]
+"bundle-a_promoted_0" [ style=bold color="green" fontcolor="orange"]
+}
diff --git a/cts/scheduler/dot/bundle-promoted-anticolocation-6.dot b/cts/scheduler/dot/bundle-promoted-anticolocation-6.dot
new file mode 100644
index 0000000..b3db02e
--- /dev/null
+++ b/cts/scheduler/dot/bundle-promoted-anticolocation-6.dot
@@ -0,0 +1,32 @@
+ digraph "g" {
+"Cancel bundle-a-rsc_monitor_15000 bundle-a-1" -> "bundle-a-rsc_demote_0 bundle-a-1" [ style = bold]
+"Cancel bundle-a-rsc_monitor_15000 bundle-a-1" [ style=bold color="green" fontcolor="black"]
+"Cancel bundle-a-rsc_monitor_16000 bundle-a-2" -> "bundle-a-rsc_promote_0 bundle-a-2" [ style = bold]
+"Cancel bundle-a-rsc_monitor_16000 bundle-a-2" [ style=bold color="green" fontcolor="black"]
+"bundle-a-clone_demote_0" -> "bundle-a-clone_demoted_0" [ style = bold]
+"bundle-a-clone_demote_0" -> "bundle-a-rsc_demote_0 bundle-a-1" [ style = bold]
+"bundle-a-clone_demote_0" [ style=bold color="green" fontcolor="orange"]
+"bundle-a-clone_demoted_0" -> "bundle-a-clone_promote_0" [ style = bold]
+"bundle-a-clone_demoted_0" -> "bundle-a_demoted_0" [ style = bold]
+"bundle-a-clone_demoted_0" [ style=bold color="green" fontcolor="orange"]
+"bundle-a-clone_promote_0" -> "bundle-a-rsc_promote_0 bundle-a-2" [ style = bold]
+"bundle-a-clone_promote_0" [ style=bold color="green" fontcolor="orange"]
+"bundle-a-clone_promoted_0" -> "bundle-a_promoted_0" [ style = bold]
+"bundle-a-clone_promoted_0" [ style=bold color="green" fontcolor="orange"]
+"bundle-a-rsc_demote_0 bundle-a-1" -> "bundle-a-clone_demoted_0" [ style = bold]
+"bundle-a-rsc_demote_0 bundle-a-1" -> "bundle-a-rsc_monitor_16000 bundle-a-1" [ style = bold]
+"bundle-a-rsc_demote_0 bundle-a-1" [ style=bold color="green" fontcolor="black"]
+"bundle-a-rsc_monitor_15000 bundle-a-2" [ style=bold color="green" fontcolor="black"]
+"bundle-a-rsc_monitor_16000 bundle-a-1" [ style=bold color="green" fontcolor="black"]
+"bundle-a-rsc_promote_0 bundle-a-2" -> "bundle-a-clone_promoted_0" [ style = bold]
+"bundle-a-rsc_promote_0 bundle-a-2" -> "bundle-a-rsc_monitor_15000 bundle-a-2" [ style = bold]
+"bundle-a-rsc_promote_0 bundle-a-2" [ style=bold color="green" fontcolor="black"]
+"bundle-a_demote_0" -> "bundle-a-clone_demote_0" [ style = bold]
+"bundle-a_demote_0" -> "bundle-a_demoted_0" [ style = bold]
+"bundle-a_demote_0" [ style=bold color="green" fontcolor="orange"]
+"bundle-a_demoted_0" -> "bundle-a_promote_0" [ style = bold]
+"bundle-a_demoted_0" [ style=bold color="green" fontcolor="orange"]
+"bundle-a_promote_0" -> "bundle-a-clone_promote_0" [ style = bold]
+"bundle-a_promote_0" [ style=bold color="green" fontcolor="orange"]
+"bundle-a_promoted_0" [ style=bold color="green" fontcolor="orange"]
+}
diff --git a/cts/scheduler/dot/bundle-promoted-colocation-1.dot b/cts/scheduler/dot/bundle-promoted-colocation-1.dot
new file mode 100644
index 0000000..6b857e5
--- /dev/null
+++ b/cts/scheduler/dot/bundle-promoted-colocation-1.dot
@@ -0,0 +1,7 @@
+ digraph "g" {
+"vip_monitor_10000 node3" [ style=bold color="green" fontcolor="black"]
+"vip_start_0 node3" -> "vip_monitor_10000 node3" [ style = bold]
+"vip_start_0 node3" [ style=bold color="green" fontcolor="black"]
+"vip_stop_0 node1" -> "vip_start_0 node3" [ style = bold]
+"vip_stop_0 node1" [ style=bold color="green" fontcolor="black"]
+}
diff --git a/cts/scheduler/dot/bundle-promoted-colocation-2.dot b/cts/scheduler/dot/bundle-promoted-colocation-2.dot
new file mode 100644
index 0000000..6b857e5
--- /dev/null
+++ b/cts/scheduler/dot/bundle-promoted-colocation-2.dot
@@ -0,0 +1,7 @@
+ digraph "g" {
+"vip_monitor_10000 node3" [ style=bold color="green" fontcolor="black"]
+"vip_start_0 node3" -> "vip_monitor_10000 node3" [ style = bold]
+"vip_start_0 node3" [ style=bold color="green" fontcolor="black"]
+"vip_stop_0 node1" -> "vip_start_0 node3" [ style = bold]
+"vip_stop_0 node1" [ style=bold color="green" fontcolor="black"]
+}
diff --git a/cts/scheduler/dot/bundle-promoted-colocation-3.dot b/cts/scheduler/dot/bundle-promoted-colocation-3.dot
new file mode 100644
index 0000000..69f6cf5
--- /dev/null
+++ b/cts/scheduler/dot/bundle-promoted-colocation-3.dot
@@ -0,0 +1,32 @@
+ digraph "g" {
+"Cancel base_monitor_15000 base-bundle-2" -> "base_demote_0 base-bundle-2" [ style = bold]
+"Cancel base_monitor_15000 base-bundle-2" [ style=bold color="green" fontcolor="black"]
+"Cancel base_monitor_16000 base-bundle-0" -> "base_promote_0 base-bundle-0" [ style = bold]
+"Cancel base_monitor_16000 base-bundle-0" [ style=bold color="green" fontcolor="black"]
+"base-bundle-clone_demote_0" -> "base-bundle-clone_demoted_0" [ style = bold]
+"base-bundle-clone_demote_0" -> "base_demote_0 base-bundle-2" [ style = bold]
+"base-bundle-clone_demote_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle-clone_demoted_0" -> "base-bundle-clone_promote_0" [ style = bold]
+"base-bundle-clone_demoted_0" -> "base-bundle_demoted_0" [ style = bold]
+"base-bundle-clone_demoted_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle-clone_promote_0" -> "base_promote_0 base-bundle-0" [ style = bold]
+"base-bundle-clone_promote_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle-clone_promoted_0" -> "base-bundle_promoted_0" [ style = bold]
+"base-bundle-clone_promoted_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle_demote_0" -> "base-bundle-clone_demote_0" [ style = bold]
+"base-bundle_demote_0" -> "base-bundle_demoted_0" [ style = bold]
+"base-bundle_demote_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle_demoted_0" -> "base-bundle_promote_0" [ style = bold]
+"base-bundle_demoted_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle_promote_0" -> "base-bundle-clone_promote_0" [ style = bold]
+"base-bundle_promote_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle_promoted_0" [ style=bold color="green" fontcolor="orange"]
+"base_demote_0 base-bundle-2" -> "base-bundle-clone_demoted_0" [ style = bold]
+"base_demote_0 base-bundle-2" -> "base_monitor_16000 base-bundle-2" [ style = bold]
+"base_demote_0 base-bundle-2" [ style=bold color="green" fontcolor="black"]
+"base_monitor_15000 base-bundle-0" [ style=bold color="green" fontcolor="black"]
+"base_monitor_16000 base-bundle-2" [ style=bold color="green" fontcolor="black"]
+"base_promote_0 base-bundle-0" -> "base-bundle-clone_promoted_0" [ style = bold]
+"base_promote_0 base-bundle-0" -> "base_monitor_15000 base-bundle-0" [ style = bold]
+"base_promote_0 base-bundle-0" [ style=bold color="green" fontcolor="black"]
+}
diff --git a/cts/scheduler/dot/bundle-promoted-colocation-4.dot b/cts/scheduler/dot/bundle-promoted-colocation-4.dot
new file mode 100644
index 0000000..69f6cf5
--- /dev/null
+++ b/cts/scheduler/dot/bundle-promoted-colocation-4.dot
@@ -0,0 +1,32 @@
+ digraph "g" {
+"Cancel base_monitor_15000 base-bundle-2" -> "base_demote_0 base-bundle-2" [ style = bold]
+"Cancel base_monitor_15000 base-bundle-2" [ style=bold color="green" fontcolor="black"]
+"Cancel base_monitor_16000 base-bundle-0" -> "base_promote_0 base-bundle-0" [ style = bold]
+"Cancel base_monitor_16000 base-bundle-0" [ style=bold color="green" fontcolor="black"]
+"base-bundle-clone_demote_0" -> "base-bundle-clone_demoted_0" [ style = bold]
+"base-bundle-clone_demote_0" -> "base_demote_0 base-bundle-2" [ style = bold]
+"base-bundle-clone_demote_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle-clone_demoted_0" -> "base-bundle-clone_promote_0" [ style = bold]
+"base-bundle-clone_demoted_0" -> "base-bundle_demoted_0" [ style = bold]
+"base-bundle-clone_demoted_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle-clone_promote_0" -> "base_promote_0 base-bundle-0" [ style = bold]
+"base-bundle-clone_promote_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle-clone_promoted_0" -> "base-bundle_promoted_0" [ style = bold]
+"base-bundle-clone_promoted_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle_demote_0" -> "base-bundle-clone_demote_0" [ style = bold]
+"base-bundle_demote_0" -> "base-bundle_demoted_0" [ style = bold]
+"base-bundle_demote_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle_demoted_0" -> "base-bundle_promote_0" [ style = bold]
+"base-bundle_demoted_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle_promote_0" -> "base-bundle-clone_promote_0" [ style = bold]
+"base-bundle_promote_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle_promoted_0" [ style=bold color="green" fontcolor="orange"]
+"base_demote_0 base-bundle-2" -> "base-bundle-clone_demoted_0" [ style = bold]
+"base_demote_0 base-bundle-2" -> "base_monitor_16000 base-bundle-2" [ style = bold]
+"base_demote_0 base-bundle-2" [ style=bold color="green" fontcolor="black"]
+"base_monitor_15000 base-bundle-0" [ style=bold color="green" fontcolor="black"]
+"base_monitor_16000 base-bundle-2" [ style=bold color="green" fontcolor="black"]
+"base_promote_0 base-bundle-0" -> "base-bundle-clone_promoted_0" [ style = bold]
+"base_promote_0 base-bundle-0" -> "base_monitor_15000 base-bundle-0" [ style = bold]
+"base_promote_0 base-bundle-0" [ style=bold color="green" fontcolor="black"]
+}
diff --git a/cts/scheduler/dot/bundle-promoted-colocation-5.dot b/cts/scheduler/dot/bundle-promoted-colocation-5.dot
new file mode 100644
index 0000000..b3db02e
--- /dev/null
+++ b/cts/scheduler/dot/bundle-promoted-colocation-5.dot
@@ -0,0 +1,32 @@
+ digraph "g" {
+"Cancel bundle-a-rsc_monitor_15000 bundle-a-1" -> "bundle-a-rsc_demote_0 bundle-a-1" [ style = bold]
+"Cancel bundle-a-rsc_monitor_15000 bundle-a-1" [ style=bold color="green" fontcolor="black"]
+"Cancel bundle-a-rsc_monitor_16000 bundle-a-2" -> "bundle-a-rsc_promote_0 bundle-a-2" [ style = bold]
+"Cancel bundle-a-rsc_monitor_16000 bundle-a-2" [ style=bold color="green" fontcolor="black"]
+"bundle-a-clone_demote_0" -> "bundle-a-clone_demoted_0" [ style = bold]
+"bundle-a-clone_demote_0" -> "bundle-a-rsc_demote_0 bundle-a-1" [ style = bold]
+"bundle-a-clone_demote_0" [ style=bold color="green" fontcolor="orange"]
+"bundle-a-clone_demoted_0" -> "bundle-a-clone_promote_0" [ style = bold]
+"bundle-a-clone_demoted_0" -> "bundle-a_demoted_0" [ style = bold]
+"bundle-a-clone_demoted_0" [ style=bold color="green" fontcolor="orange"]
+"bundle-a-clone_promote_0" -> "bundle-a-rsc_promote_0 bundle-a-2" [ style = bold]
+"bundle-a-clone_promote_0" [ style=bold color="green" fontcolor="orange"]
+"bundle-a-clone_promoted_0" -> "bundle-a_promoted_0" [ style = bold]
+"bundle-a-clone_promoted_0" [ style=bold color="green" fontcolor="orange"]
+"bundle-a-rsc_demote_0 bundle-a-1" -> "bundle-a-clone_demoted_0" [ style = bold]
+"bundle-a-rsc_demote_0 bundle-a-1" -> "bundle-a-rsc_monitor_16000 bundle-a-1" [ style = bold]
+"bundle-a-rsc_demote_0 bundle-a-1" [ style=bold color="green" fontcolor="black"]
+"bundle-a-rsc_monitor_15000 bundle-a-2" [ style=bold color="green" fontcolor="black"]
+"bundle-a-rsc_monitor_16000 bundle-a-1" [ style=bold color="green" fontcolor="black"]
+"bundle-a-rsc_promote_0 bundle-a-2" -> "bundle-a-clone_promoted_0" [ style = bold]
+"bundle-a-rsc_promote_0 bundle-a-2" -> "bundle-a-rsc_monitor_15000 bundle-a-2" [ style = bold]
+"bundle-a-rsc_promote_0 bundle-a-2" [ style=bold color="green" fontcolor="black"]
+"bundle-a_demote_0" -> "bundle-a-clone_demote_0" [ style = bold]
+"bundle-a_demote_0" -> "bundle-a_demoted_0" [ style = bold]
+"bundle-a_demote_0" [ style=bold color="green" fontcolor="orange"]
+"bundle-a_demoted_0" -> "bundle-a_promote_0" [ style = bold]
+"bundle-a_demoted_0" [ style=bold color="green" fontcolor="orange"]
+"bundle-a_promote_0" -> "bundle-a-clone_promote_0" [ style = bold]
+"bundle-a_promote_0" [ style=bold color="green" fontcolor="orange"]
+"bundle-a_promoted_0" [ style=bold color="green" fontcolor="orange"]
+}
diff --git a/cts/scheduler/dot/bundle-promoted-colocation-6.dot b/cts/scheduler/dot/bundle-promoted-colocation-6.dot
new file mode 100644
index 0000000..b3db02e
--- /dev/null
+++ b/cts/scheduler/dot/bundle-promoted-colocation-6.dot
@@ -0,0 +1,32 @@
+ digraph "g" {
+"Cancel bundle-a-rsc_monitor_15000 bundle-a-1" -> "bundle-a-rsc_demote_0 bundle-a-1" [ style = bold]
+"Cancel bundle-a-rsc_monitor_15000 bundle-a-1" [ style=bold color="green" fontcolor="black"]
+"Cancel bundle-a-rsc_monitor_16000 bundle-a-2" -> "bundle-a-rsc_promote_0 bundle-a-2" [ style = bold]
+"Cancel bundle-a-rsc_monitor_16000 bundle-a-2" [ style=bold color="green" fontcolor="black"]
+"bundle-a-clone_demote_0" -> "bundle-a-clone_demoted_0" [ style = bold]
+"bundle-a-clone_demote_0" -> "bundle-a-rsc_demote_0 bundle-a-1" [ style = bold]
+"bundle-a-clone_demote_0" [ style=bold color="green" fontcolor="orange"]
+"bundle-a-clone_demoted_0" -> "bundle-a-clone_promote_0" [ style = bold]
+"bundle-a-clone_demoted_0" -> "bundle-a_demoted_0" [ style = bold]
+"bundle-a-clone_demoted_0" [ style=bold color="green" fontcolor="orange"]
+"bundle-a-clone_promote_0" -> "bundle-a-rsc_promote_0 bundle-a-2" [ style = bold]
+"bundle-a-clone_promote_0" [ style=bold color="green" fontcolor="orange"]
+"bundle-a-clone_promoted_0" -> "bundle-a_promoted_0" [ style = bold]
+"bundle-a-clone_promoted_0" [ style=bold color="green" fontcolor="orange"]
+"bundle-a-rsc_demote_0 bundle-a-1" -> "bundle-a-clone_demoted_0" [ style = bold]
+"bundle-a-rsc_demote_0 bundle-a-1" -> "bundle-a-rsc_monitor_16000 bundle-a-1" [ style = bold]
+"bundle-a-rsc_demote_0 bundle-a-1" [ style=bold color="green" fontcolor="black"]
+"bundle-a-rsc_monitor_15000 bundle-a-2" [ style=bold color="green" fontcolor="black"]
+"bundle-a-rsc_monitor_16000 bundle-a-1" [ style=bold color="green" fontcolor="black"]
+"bundle-a-rsc_promote_0 bundle-a-2" -> "bundle-a-clone_promoted_0" [ style = bold]
+"bundle-a-rsc_promote_0 bundle-a-2" -> "bundle-a-rsc_monitor_15000 bundle-a-2" [ style = bold]
+"bundle-a-rsc_promote_0 bundle-a-2" [ style=bold color="green" fontcolor="black"]
+"bundle-a_demote_0" -> "bundle-a-clone_demote_0" [ style = bold]
+"bundle-a_demote_0" -> "bundle-a_demoted_0" [ style = bold]
+"bundle-a_demote_0" [ style=bold color="green" fontcolor="orange"]
+"bundle-a_demoted_0" -> "bundle-a_promote_0" [ style = bold]
+"bundle-a_demoted_0" [ style=bold color="green" fontcolor="orange"]
+"bundle-a_promote_0" -> "bundle-a-clone_promote_0" [ style = bold]
+"bundle-a_promote_0" [ style=bold color="green" fontcolor="orange"]
+"bundle-a_promoted_0" [ style=bold color="green" fontcolor="orange"]
+}
diff --git a/cts/scheduler/dot/bundle-promoted-location-1.dot b/cts/scheduler/dot/bundle-promoted-location-1.dot
new file mode 100644
index 0000000..d8f1c9f
--- /dev/null
+++ b/cts/scheduler/dot/bundle-promoted-location-1.dot
@@ -0,0 +1,2 @@
+ digraph "g" {
+}
diff --git a/cts/scheduler/dot/bundle-promoted-location-2.dot b/cts/scheduler/dot/bundle-promoted-location-2.dot
new file mode 100644
index 0000000..72d1487
--- /dev/null
+++ b/cts/scheduler/dot/bundle-promoted-location-2.dot
@@ -0,0 +1,75 @@
+ digraph "g" {
+"Cancel base_monitor_15000 base-bundle-0" -> "base_demote_0 base-bundle-0" [ style = bold]
+"Cancel base_monitor_15000 base-bundle-0" [ style=bold color="green" fontcolor="black"]
+"Cancel base_monitor_16000 base-bundle-1" -> "base_promote_0 base-bundle-1" [ style = bold]
+"Cancel base_monitor_16000 base-bundle-1" [ style=bold color="green" fontcolor="black"]
+"base-bundle-0_monitor_30000 node3" [ style=dashed color="red" fontcolor="black"]
+"base-bundle-0_start_0 node3" -> "base-bundle-0_monitor_30000 node3" [ style = dashed]
+"base-bundle-0_start_0 node3" -> "base_monitor_16000 base-bundle-0" [ style = dashed]
+"base-bundle-0_start_0 node3" -> "base_start_0 base-bundle-0" [ style = dashed]
+"base-bundle-0_start_0 node3" [ style=dashed color="red" fontcolor="black"]
+"base-bundle-0_stop_0 node3" -> "base-bundle-0_start_0 node3" [ style = dashed]
+"base-bundle-0_stop_0 node3" -> "base-bundle-podman-0_stop_0 node3" [ style = bold]
+"base-bundle-0_stop_0 node3" [ style=bold color="green" fontcolor="black"]
+"base-bundle-clone_demote_0" -> "base-bundle-clone_demoted_0" [ style = bold]
+"base-bundle-clone_demote_0" -> "base_demote_0 base-bundle-0" [ style = bold]
+"base-bundle-clone_demote_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle-clone_demoted_0" -> "base-bundle-clone_promote_0" [ style = bold]
+"base-bundle-clone_demoted_0" -> "base-bundle-clone_start_0" [ style = bold]
+"base-bundle-clone_demoted_0" -> "base-bundle-clone_stop_0" [ style = bold]
+"base-bundle-clone_demoted_0" -> "base-bundle_demoted_0" [ style = bold]
+"base-bundle-clone_demoted_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle-clone_promote_0" -> "base_promote_0 base-bundle-1" [ style = bold]
+"base-bundle-clone_promote_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle-clone_promoted_0" -> "base-bundle_promoted_0" [ style = bold]
+"base-bundle-clone_promoted_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle-clone_running_0" -> "base-bundle-clone_promote_0" [ style = bold]
+"base-bundle-clone_running_0" -> "base-bundle_running_0" [ style = bold]
+"base-bundle-clone_running_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle-clone_start_0" -> "base-bundle-clone_running_0" [ style = bold]
+"base-bundle-clone_start_0" -> "base_start_0 base-bundle-0" [ style = dashed]
+"base-bundle-clone_start_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle-clone_stop_0" -> "base-bundle-clone_stopped_0" [ style = bold]
+"base-bundle-clone_stop_0" -> "base_stop_0 base-bundle-0" [ style = bold]
+"base-bundle-clone_stop_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle-clone_stopped_0" -> "base-bundle-clone_promote_0" [ style = bold]
+"base-bundle-clone_stopped_0" -> "base-bundle-clone_start_0" [ style = bold]
+"base-bundle-clone_stopped_0" -> "base-bundle_stopped_0" [ style = bold]
+"base-bundle-clone_stopped_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle-podman-0_stop_0 node3" -> "base-bundle_stopped_0" [ style = bold]
+"base-bundle-podman-0_stop_0 node3" [ style=bold color="green" fontcolor="black"]
+"base-bundle_demote_0" -> "base-bundle-clone_demote_0" [ style = bold]
+"base-bundle_demote_0" -> "base-bundle_demoted_0" [ style = bold]
+"base-bundle_demote_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle_demoted_0" -> "base-bundle_promote_0" [ style = bold]
+"base-bundle_demoted_0" -> "base-bundle_stop_0" [ style = bold]
+"base-bundle_demoted_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle_promote_0" -> "base-bundle-clone_promote_0" [ style = bold]
+"base-bundle_promote_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle_promoted_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle_running_0" -> "base-bundle_promote_0" [ style = bold]
+"base-bundle_running_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle_stop_0" -> "base-bundle-clone_stop_0" [ style = bold]
+"base-bundle_stop_0" -> "base-bundle-podman-0_stop_0 node3" [ style = bold]
+"base-bundle_stop_0" -> "base_stop_0 base-bundle-0" [ style = bold]
+"base-bundle_stop_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle_stopped_0" -> "base-bundle_promote_0" [ style = bold]
+"base-bundle_stopped_0" [ style=bold color="green" fontcolor="orange"]
+"base_demote_0 base-bundle-0" -> "base-bundle-0_stop_0 node3" [ style = bold]
+"base_demote_0 base-bundle-0" -> "base-bundle-clone_demoted_0" [ style = bold]
+"base_demote_0 base-bundle-0" -> "base_monitor_16000 base-bundle-0" [ style = dashed]
+"base_demote_0 base-bundle-0" -> "base_stop_0 base-bundle-0" [ style = bold]
+"base_demote_0 base-bundle-0" [ style=bold color="green" fontcolor="black"]
+"base_monitor_15000 base-bundle-1" [ style=bold color="green" fontcolor="black"]
+"base_monitor_16000 base-bundle-0" [ style=dashed color="red" fontcolor="black"]
+"base_promote_0 base-bundle-1" -> "base-bundle-clone_promoted_0" [ style = bold]
+"base_promote_0 base-bundle-1" -> "base_monitor_15000 base-bundle-1" [ style = bold]
+"base_promote_0 base-bundle-1" [ style=bold color="green" fontcolor="black"]
+"base_start_0 base-bundle-0" -> "base-bundle-clone_running_0" [ style = dashed]
+"base_start_0 base-bundle-0" -> "base_monitor_16000 base-bundle-0" [ style = dashed]
+"base_start_0 base-bundle-0" [ style=dashed color="red" fontcolor="black"]
+"base_stop_0 base-bundle-0" -> "base-bundle-0_stop_0 node3" [ style = bold]
+"base_stop_0 base-bundle-0" -> "base-bundle-clone_stopped_0" [ style = bold]
+"base_stop_0 base-bundle-0" -> "base_start_0 base-bundle-0" [ style = dashed]
+"base_stop_0 base-bundle-0" [ style=bold color="green" fontcolor="black"]
+}
diff --git a/cts/scheduler/dot/bundle-promoted-location-3.dot b/cts/scheduler/dot/bundle-promoted-location-3.dot
new file mode 100644
index 0000000..d8f1c9f
--- /dev/null
+++ b/cts/scheduler/dot/bundle-promoted-location-3.dot
@@ -0,0 +1,2 @@
+ digraph "g" {
+}
diff --git a/cts/scheduler/dot/bundle-promoted-location-4.dot b/cts/scheduler/dot/bundle-promoted-location-4.dot
new file mode 100644
index 0000000..d8f1c9f
--- /dev/null
+++ b/cts/scheduler/dot/bundle-promoted-location-4.dot
@@ -0,0 +1,2 @@
+ digraph "g" {
+}
diff --git a/cts/scheduler/dot/bundle-promoted-location-5.dot b/cts/scheduler/dot/bundle-promoted-location-5.dot
new file mode 100644
index 0000000..d8f1c9f
--- /dev/null
+++ b/cts/scheduler/dot/bundle-promoted-location-5.dot
@@ -0,0 +1,2 @@
+ digraph "g" {
+}
diff --git a/cts/scheduler/dot/bundle-promoted-location-6.dot b/cts/scheduler/dot/bundle-promoted-location-6.dot
new file mode 100644
index 0000000..9627375
--- /dev/null
+++ b/cts/scheduler/dot/bundle-promoted-location-6.dot
@@ -0,0 +1,37 @@
+ digraph "g" {
+"base-bundle-1_monitor_30000 node2" [ style=dashed color="red" fontcolor="black"]
+"base-bundle-1_start_0 node2" -> "base-bundle-1_monitor_30000 node2" [ style = dashed]
+"base-bundle-1_start_0 node2" -> "base_monitor_16000 base-bundle-1" [ style = dashed]
+"base-bundle-1_start_0 node2" -> "base_start_0 base-bundle-1" [ style = dashed]
+"base-bundle-1_start_0 node2" [ style=dashed color="red" fontcolor="black"]
+"base-bundle-1_stop_0 node2" -> "base-bundle-1_start_0 node2" [ style = dashed]
+"base-bundle-1_stop_0 node2" -> "base-bundle-podman-1_stop_0 node2" [ style = bold]
+"base-bundle-1_stop_0 node2" [ style=bold color="green" fontcolor="black"]
+"base-bundle-clone_running_0" -> "base-bundle_running_0" [ style = bold]
+"base-bundle-clone_running_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle-clone_start_0" -> "base-bundle-clone_running_0" [ style = bold]
+"base-bundle-clone_start_0" -> "base_start_0 base-bundle-1" [ style = dashed]
+"base-bundle-clone_start_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle-clone_stop_0" -> "base-bundle-clone_stopped_0" [ style = bold]
+"base-bundle-clone_stop_0" -> "base_stop_0 base-bundle-1" [ style = bold]
+"base-bundle-clone_stop_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle-clone_stopped_0" -> "base-bundle-clone_start_0" [ style = bold]
+"base-bundle-clone_stopped_0" -> "base-bundle_stopped_0" [ style = bold]
+"base-bundle-clone_stopped_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle-podman-1_stop_0 node2" -> "base-bundle_stopped_0" [ style = bold]
+"base-bundle-podman-1_stop_0 node2" [ style=bold color="green" fontcolor="black"]
+"base-bundle_running_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle_stop_0" -> "base-bundle-clone_stop_0" [ style = bold]
+"base-bundle_stop_0" -> "base-bundle-podman-1_stop_0 node2" [ style = bold]
+"base-bundle_stop_0" -> "base_stop_0 base-bundle-1" [ style = bold]
+"base-bundle_stop_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle_stopped_0" [ style=bold color="green" fontcolor="orange"]
+"base_monitor_16000 base-bundle-1" [ style=dashed color="red" fontcolor="black"]
+"base_start_0 base-bundle-1" -> "base-bundle-clone_running_0" [ style = dashed]
+"base_start_0 base-bundle-1" -> "base_monitor_16000 base-bundle-1" [ style = dashed]
+"base_start_0 base-bundle-1" [ style=dashed color="red" fontcolor="black"]
+"base_stop_0 base-bundle-1" -> "base-bundle-1_stop_0 node2" [ style = bold]
+"base_stop_0 base-bundle-1" -> "base-bundle-clone_stopped_0" [ style = bold]
+"base_stop_0 base-bundle-1" -> "base_start_0 base-bundle-1" [ style = dashed]
+"base_stop_0 base-bundle-1" [ style=bold color="green" fontcolor="black"]
+}
diff --git a/cts/scheduler/dot/bundle-replicas-change.dot b/cts/scheduler/dot/bundle-replicas-change.dot
index 08c20b4..4d98275 100644
--- a/cts/scheduler/dot/bundle-replicas-change.dot
+++ b/cts/scheduler/dot/bundle-replicas-change.dot
@@ -89,6 +89,7 @@
"httpd:0_start_0 httpd-bundle-0" -> "httpd-bundle-clone_running_0" [ style = bold]
"httpd:0_start_0 httpd-bundle-0" -> "httpd:0_monitor_10000 httpd-bundle-0" [ style = bold]
"httpd:0_start_0 httpd-bundle-0" -> "httpd:1_start_0 httpd-bundle-1" [ style = bold]
+"httpd:0_start_0 httpd-bundle-0" -> "httpd:2_start_0 httpd-bundle-2" [ style = bold]
"httpd:0_start_0 httpd-bundle-0" [ style=bold color="green" fontcolor="black"]
"httpd:1_monitor_10000 httpd-bundle-1" [ style=bold color="green" fontcolor="black"]
"httpd:1_start_0 httpd-bundle-1" -> "httpd-bundle-clone_running_0" [ style = bold]
diff --git a/cts/scheduler/dot/cancel-behind-moving-remote.dot b/cts/scheduler/dot/cancel-behind-moving-remote.dot
index 1a0dfc8..0eddcce 100644
--- a/cts/scheduler/dot/cancel-behind-moving-remote.dot
+++ b/cts/scheduler/dot/cancel-behind-moving-remote.dot
@@ -1,50 +1,30 @@
digraph "g" {
-"Cancel ovndb_servers_monitor_30000 ovn-dbs-bundle-1" -> "ovndb_servers_promote_0 ovn-dbs-bundle-1" [ style = bold]
-"Cancel ovndb_servers_monitor_30000 ovn-dbs-bundle-1" [ style=bold color="green" fontcolor="black"]
-"ip-172.17.1.87_monitor_10000 controller-0" [ style=bold color="green" fontcolor="black"]
-"ip-172.17.1.87_start_0 controller-0" -> "ip-172.17.1.87_monitor_10000 controller-0" [ style = bold]
-"ip-172.17.1.87_start_0 controller-0" [ style=bold color="green" fontcolor="black"]
+"Cancel ovndb_servers_monitor_30000 ovn-dbs-bundle-2" -> "ovndb_servers_promote_0 ovn-dbs-bundle-2" [ style = bold]
+"Cancel ovndb_servers_monitor_30000 ovn-dbs-bundle-2" [ style=bold color="green" fontcolor="black"]
+"ip-172.17.1.87_monitor_10000 controller-1" [ style=bold color="green" fontcolor="black"]
+"ip-172.17.1.87_start_0 controller-1" -> "ip-172.17.1.87_monitor_10000 controller-1" [ style = bold]
+"ip-172.17.1.87_start_0 controller-1" [ style=bold color="green" fontcolor="black"]
"nova-evacuate_clear_failcount_0 messaging-0" [ style=bold color="green" fontcolor="black"]
-"ovn-dbs-bundle-0_clear_failcount_0 controller-0" -> "ovn-dbs-bundle-0_start_0 controller-2" [ style = bold]
-"ovn-dbs-bundle-0_clear_failcount_0 controller-0" [ style=bold color="green" fontcolor="black"]
-"ovn-dbs-bundle-0_monitor_30000 controller-2" [ style=bold color="green" fontcolor="black"]
-"ovn-dbs-bundle-0_start_0 controller-2" -> "ovn-dbs-bundle-0_monitor_30000 controller-2" [ style = bold]
-"ovn-dbs-bundle-0_start_0 controller-2" -> "ovndb_servers:0_monitor_30000 ovn-dbs-bundle-0" [ style = bold]
-"ovn-dbs-bundle-0_start_0 controller-2" -> "ovndb_servers:0_start_0 ovn-dbs-bundle-0" [ style = bold]
-"ovn-dbs-bundle-0_start_0 controller-2" [ style=bold color="green" fontcolor="black"]
-"ovn-dbs-bundle-1_clear_failcount_0 controller-2" -> "ovn-dbs-bundle-1_start_0 controller-0" [ style = bold]
-"ovn-dbs-bundle-1_clear_failcount_0 controller-2" [ style=bold color="green" fontcolor="black"]
-"ovn-dbs-bundle-1_monitor_30000 controller-0" [ style=bold color="green" fontcolor="black"]
-"ovn-dbs-bundle-1_start_0 controller-0" -> "ovn-dbs-bundle-1_monitor_30000 controller-0" [ style = bold]
-"ovn-dbs-bundle-1_start_0 controller-0" -> "ovndb_servers_monitor_10000 ovn-dbs-bundle-1" [ style = bold]
-"ovn-dbs-bundle-1_start_0 controller-0" -> "ovndb_servers_promote_0 ovn-dbs-bundle-1" [ style = bold]
-"ovn-dbs-bundle-1_start_0 controller-0" -> "ovndb_servers_start_0 ovn-dbs-bundle-1" [ style = bold]
-"ovn-dbs-bundle-1_start_0 controller-0" [ style=bold color="green" fontcolor="black"]
-"ovn-dbs-bundle-1_stop_0 controller-2" -> "ovn-dbs-bundle-1_start_0 controller-0" [ style = bold]
-"ovn-dbs-bundle-1_stop_0 controller-2" -> "ovn-dbs-bundle-podman-1_stop_0 controller-2" [ style = bold]
-"ovn-dbs-bundle-1_stop_0 controller-2" [ style=bold color="green" fontcolor="black"]
+"ovn-dbs-bundle-0_monitor_30000 controller-0" [ style=bold color="green" fontcolor="black"]
+"ovn-dbs-bundle-0_start_0 controller-0" -> "ovn-dbs-bundle-0_monitor_30000 controller-0" [ style = bold]
+"ovn-dbs-bundle-0_start_0 controller-0" -> "ovndb_servers:0_monitor_30000 ovn-dbs-bundle-0" [ style = bold]
+"ovn-dbs-bundle-0_start_0 controller-0" -> "ovndb_servers:0_start_0 ovn-dbs-bundle-0" [ style = bold]
+"ovn-dbs-bundle-0_start_0 controller-0" [ style=bold color="green" fontcolor="black"]
"ovn-dbs-bundle-master_confirmed-post_notify_promoted_0" -> "ovn-dbs-bundle_promoted_0" [ style = bold]
"ovn-dbs-bundle-master_confirmed-post_notify_promoted_0" -> "ovndb_servers:0_monitor_30000 ovn-dbs-bundle-0" [ style = bold]
-"ovn-dbs-bundle-master_confirmed-post_notify_promoted_0" -> "ovndb_servers_monitor_10000 ovn-dbs-bundle-1" [ style = bold]
+"ovn-dbs-bundle-master_confirmed-post_notify_promoted_0" -> "ovndb_servers_monitor_10000 ovn-dbs-bundle-2" [ style = bold]
"ovn-dbs-bundle-master_confirmed-post_notify_promoted_0" [ style=bold color="green" fontcolor="orange"]
"ovn-dbs-bundle-master_confirmed-post_notify_running_0" -> "ovn-dbs-bundle-master_pre_notify_promote_0" [ style = bold]
"ovn-dbs-bundle-master_confirmed-post_notify_running_0" -> "ovn-dbs-bundle_running_0" [ style = bold]
"ovn-dbs-bundle-master_confirmed-post_notify_running_0" -> "ovndb_servers:0_monitor_30000 ovn-dbs-bundle-0" [ style = bold]
-"ovn-dbs-bundle-master_confirmed-post_notify_running_0" -> "ovndb_servers_monitor_10000 ovn-dbs-bundle-1" [ style = bold]
+"ovn-dbs-bundle-master_confirmed-post_notify_running_0" -> "ovndb_servers_monitor_10000 ovn-dbs-bundle-2" [ style = bold]
"ovn-dbs-bundle-master_confirmed-post_notify_running_0" [ style=bold color="green" fontcolor="orange"]
-"ovn-dbs-bundle-master_confirmed-post_notify_stopped_0" -> "ovn-dbs-bundle-master_pre_notify_promote_0" [ style = bold]
-"ovn-dbs-bundle-master_confirmed-post_notify_stopped_0" -> "ovn-dbs-bundle-master_pre_notify_start_0" [ style = bold]
-"ovn-dbs-bundle-master_confirmed-post_notify_stopped_0" -> "ovn-dbs-bundle_stopped_0" [ style = bold]
-"ovn-dbs-bundle-master_confirmed-post_notify_stopped_0" [ style=bold color="green" fontcolor="orange"]
"ovn-dbs-bundle-master_confirmed-pre_notify_promote_0" -> "ovn-dbs-bundle-master_post_notify_promoted_0" [ style = bold]
"ovn-dbs-bundle-master_confirmed-pre_notify_promote_0" -> "ovn-dbs-bundle-master_promote_0" [ style = bold]
"ovn-dbs-bundle-master_confirmed-pre_notify_promote_0" [ style=bold color="green" fontcolor="orange"]
"ovn-dbs-bundle-master_confirmed-pre_notify_start_0" -> "ovn-dbs-bundle-master_post_notify_running_0" [ style = bold]
"ovn-dbs-bundle-master_confirmed-pre_notify_start_0" -> "ovn-dbs-bundle-master_start_0" [ style = bold]
"ovn-dbs-bundle-master_confirmed-pre_notify_start_0" [ style=bold color="green" fontcolor="orange"]
-"ovn-dbs-bundle-master_confirmed-pre_notify_stop_0" -> "ovn-dbs-bundle-master_post_notify_stopped_0" [ style = bold]
-"ovn-dbs-bundle-master_confirmed-pre_notify_stop_0" -> "ovn-dbs-bundle-master_stop_0" [ style = bold]
-"ovn-dbs-bundle-master_confirmed-pre_notify_stop_0" [ style=bold color="green" fontcolor="orange"]
"ovn-dbs-bundle-master_post_notify_promoted_0" -> "ovn-dbs-bundle-master_confirmed-post_notify_promoted_0" [ style = bold]
"ovn-dbs-bundle-master_post_notify_promoted_0" -> "ovndb_servers:0_post_notify_promote_0 ovn-dbs-bundle-0" [ style = bold]
"ovn-dbs-bundle-master_post_notify_promoted_0" -> "ovndb_servers_post_notify_promoted_0 ovn-dbs-bundle-1" [ style = bold]
@@ -55,22 +35,16 @@
"ovn-dbs-bundle-master_post_notify_running_0" -> "ovndb_servers_post_notify_running_0 ovn-dbs-bundle-1" [ style = bold]
"ovn-dbs-bundle-master_post_notify_running_0" -> "ovndb_servers_post_notify_running_0 ovn-dbs-bundle-2" [ style = bold]
"ovn-dbs-bundle-master_post_notify_running_0" [ style=bold color="green" fontcolor="orange"]
-"ovn-dbs-bundle-master_post_notify_stopped_0" -> "ovn-dbs-bundle-master_confirmed-post_notify_stopped_0" [ style = bold]
-"ovn-dbs-bundle-master_post_notify_stopped_0" -> "ovndb_servers_post_notify_stopped_0 ovn-dbs-bundle-2" [ style = bold]
-"ovn-dbs-bundle-master_post_notify_stopped_0" [ style=bold color="green" fontcolor="orange"]
"ovn-dbs-bundle-master_pre_notify_promote_0" -> "ovn-dbs-bundle-master_confirmed-pre_notify_promote_0" [ style = bold]
"ovn-dbs-bundle-master_pre_notify_promote_0" -> "ovndb_servers:0_pre_notify_promote_0 ovn-dbs-bundle-0" [ style = bold]
"ovn-dbs-bundle-master_pre_notify_promote_0" -> "ovndb_servers_pre_notify_promote_0 ovn-dbs-bundle-1" [ style = bold]
"ovn-dbs-bundle-master_pre_notify_promote_0" -> "ovndb_servers_pre_notify_promote_0 ovn-dbs-bundle-2" [ style = bold]
"ovn-dbs-bundle-master_pre_notify_promote_0" [ style=bold color="green" fontcolor="orange"]
"ovn-dbs-bundle-master_pre_notify_start_0" -> "ovn-dbs-bundle-master_confirmed-pre_notify_start_0" [ style = bold]
+"ovn-dbs-bundle-master_pre_notify_start_0" -> "ovndb_servers_pre_notify_start_0 ovn-dbs-bundle-1" [ style = bold]
"ovn-dbs-bundle-master_pre_notify_start_0" -> "ovndb_servers_pre_notify_start_0 ovn-dbs-bundle-2" [ style = bold]
"ovn-dbs-bundle-master_pre_notify_start_0" [ style=bold color="green" fontcolor="orange"]
-"ovn-dbs-bundle-master_pre_notify_stop_0" -> "ovn-dbs-bundle-master_confirmed-pre_notify_stop_0" [ style = bold]
-"ovn-dbs-bundle-master_pre_notify_stop_0" -> "ovndb_servers_pre_notify_stop_0 ovn-dbs-bundle-1" [ style = bold]
-"ovn-dbs-bundle-master_pre_notify_stop_0" -> "ovndb_servers_pre_notify_stop_0 ovn-dbs-bundle-2" [ style = bold]
-"ovn-dbs-bundle-master_pre_notify_stop_0" [ style=bold color="green" fontcolor="orange"]
-"ovn-dbs-bundle-master_promote_0" -> "ovndb_servers_promote_0 ovn-dbs-bundle-1" [ style = bold]
+"ovn-dbs-bundle-master_promote_0" -> "ovndb_servers_promote_0 ovn-dbs-bundle-2" [ style = bold]
"ovn-dbs-bundle-master_promote_0" [ style=bold color="green" fontcolor="orange"]
"ovn-dbs-bundle-master_promoted_0" -> "ovn-dbs-bundle-master_post_notify_promoted_0" [ style = bold]
"ovn-dbs-bundle-master_promoted_0" [ style=bold color="green" fontcolor="orange"]
@@ -79,48 +53,22 @@
"ovn-dbs-bundle-master_running_0" [ style=bold color="green" fontcolor="orange"]
"ovn-dbs-bundle-master_start_0" -> "ovn-dbs-bundle-master_running_0" [ style = bold]
"ovn-dbs-bundle-master_start_0" -> "ovndb_servers:0_start_0 ovn-dbs-bundle-0" [ style = bold]
-"ovn-dbs-bundle-master_start_0" -> "ovndb_servers_start_0 ovn-dbs-bundle-1" [ style = bold]
"ovn-dbs-bundle-master_start_0" [ style=bold color="green" fontcolor="orange"]
-"ovn-dbs-bundle-master_stop_0" -> "ovn-dbs-bundle-master_stopped_0" [ style = bold]
-"ovn-dbs-bundle-master_stop_0" -> "ovndb_servers_stop_0 ovn-dbs-bundle-1" [ style = bold]
-"ovn-dbs-bundle-master_stop_0" [ style=bold color="green" fontcolor="orange"]
-"ovn-dbs-bundle-master_stopped_0" -> "ovn-dbs-bundle-master_post_notify_stopped_0" [ style = bold]
-"ovn-dbs-bundle-master_stopped_0" -> "ovn-dbs-bundle-master_promote_0" [ style = bold]
-"ovn-dbs-bundle-master_stopped_0" -> "ovn-dbs-bundle-master_start_0" [ style = bold]
-"ovn-dbs-bundle-master_stopped_0" [ style=bold color="green" fontcolor="orange"]
-"ovn-dbs-bundle-podman-0_monitor_60000 controller-2" [ style=bold color="green" fontcolor="black"]
-"ovn-dbs-bundle-podman-0_start_0 controller-2" -> "ovn-dbs-bundle-0_start_0 controller-2" [ style = bold]
-"ovn-dbs-bundle-podman-0_start_0 controller-2" -> "ovn-dbs-bundle-podman-0_monitor_60000 controller-2" [ style = bold]
-"ovn-dbs-bundle-podman-0_start_0 controller-2" -> "ovn-dbs-bundle_running_0" [ style = bold]
-"ovn-dbs-bundle-podman-0_start_0 controller-2" -> "ovndb_servers:0_start_0 ovn-dbs-bundle-0" [ style = bold]
-"ovn-dbs-bundle-podman-0_start_0 controller-2" [ style=bold color="green" fontcolor="black"]
-"ovn-dbs-bundle-podman-1_monitor_60000 controller-0" [ style=bold color="green" fontcolor="black"]
-"ovn-dbs-bundle-podman-1_start_0 controller-0" -> "ovn-dbs-bundle-1_start_0 controller-0" [ style = bold]
-"ovn-dbs-bundle-podman-1_start_0 controller-0" -> "ovn-dbs-bundle-podman-1_monitor_60000 controller-0" [ style = bold]
-"ovn-dbs-bundle-podman-1_start_0 controller-0" -> "ovn-dbs-bundle_running_0" [ style = bold]
-"ovn-dbs-bundle-podman-1_start_0 controller-0" -> "ovndb_servers_promote_0 ovn-dbs-bundle-1" [ style = bold]
-"ovn-dbs-bundle-podman-1_start_0 controller-0" -> "ovndb_servers_start_0 ovn-dbs-bundle-1" [ style = bold]
-"ovn-dbs-bundle-podman-1_start_0 controller-0" [ style=bold color="green" fontcolor="black"]
-"ovn-dbs-bundle-podman-1_stop_0 controller-2" -> "ovn-dbs-bundle-podman-1_start_0 controller-0" [ style = bold]
-"ovn-dbs-bundle-podman-1_stop_0 controller-2" -> "ovn-dbs-bundle_stopped_0" [ style = bold]
-"ovn-dbs-bundle-podman-1_stop_0 controller-2" [ style=bold color="green" fontcolor="black"]
-"ovn-dbs-bundle_promote_0" -> "ip-172.17.1.87_start_0 controller-0" [ style = bold]
+"ovn-dbs-bundle-podman-0_monitor_60000 controller-0" [ style=bold color="green" fontcolor="black"]
+"ovn-dbs-bundle-podman-0_start_0 controller-0" -> "ovn-dbs-bundle-0_start_0 controller-0" [ style = bold]
+"ovn-dbs-bundle-podman-0_start_0 controller-0" -> "ovn-dbs-bundle-podman-0_monitor_60000 controller-0" [ style = bold]
+"ovn-dbs-bundle-podman-0_start_0 controller-0" -> "ovn-dbs-bundle_running_0" [ style = bold]
+"ovn-dbs-bundle-podman-0_start_0 controller-0" -> "ovndb_servers:0_start_0 ovn-dbs-bundle-0" [ style = bold]
+"ovn-dbs-bundle-podman-0_start_0 controller-0" [ style=bold color="green" fontcolor="black"]
+"ovn-dbs-bundle_promote_0" -> "ip-172.17.1.87_start_0 controller-1" [ style = bold]
"ovn-dbs-bundle_promote_0" -> "ovn-dbs-bundle-master_promote_0" [ style = bold]
"ovn-dbs-bundle_promote_0" [ style=bold color="green" fontcolor="orange"]
"ovn-dbs-bundle_promoted_0" [ style=bold color="green" fontcolor="orange"]
"ovn-dbs-bundle_running_0" -> "ovn-dbs-bundle_promote_0" [ style = bold]
"ovn-dbs-bundle_running_0" [ style=bold color="green" fontcolor="orange"]
"ovn-dbs-bundle_start_0" -> "ovn-dbs-bundle-master_start_0" [ style = bold]
-"ovn-dbs-bundle_start_0" -> "ovn-dbs-bundle-podman-0_start_0 controller-2" [ style = bold]
-"ovn-dbs-bundle_start_0" -> "ovn-dbs-bundle-podman-1_start_0 controller-0" [ style = bold]
+"ovn-dbs-bundle_start_0" -> "ovn-dbs-bundle-podman-0_start_0 controller-0" [ style = bold]
"ovn-dbs-bundle_start_0" [ style=bold color="green" fontcolor="orange"]
-"ovn-dbs-bundle_stop_0" -> "ovn-dbs-bundle-master_stop_0" [ style = bold]
-"ovn-dbs-bundle_stop_0" -> "ovn-dbs-bundle-podman-1_stop_0 controller-2" [ style = bold]
-"ovn-dbs-bundle_stop_0" -> "ovndb_servers_stop_0 ovn-dbs-bundle-1" [ style = bold]
-"ovn-dbs-bundle_stop_0" [ style=bold color="green" fontcolor="orange"]
-"ovn-dbs-bundle_stopped_0" -> "ovn-dbs-bundle_promote_0" [ style = bold]
-"ovn-dbs-bundle_stopped_0" -> "ovn-dbs-bundle_start_0" [ style = bold]
-"ovn-dbs-bundle_stopped_0" [ style=bold color="green" fontcolor="orange"]
"ovndb_servers:0_monitor_30000 ovn-dbs-bundle-0" [ style=bold color="green" fontcolor="black"]
"ovndb_servers:0_post_notify_promote_0 ovn-dbs-bundle-0" -> "ovn-dbs-bundle-master_confirmed-post_notify_promoted_0" [ style = bold]
"ovndb_servers:0_post_notify_promote_0 ovn-dbs-bundle-0" [ style=bold color="green" fontcolor="black"]
@@ -130,9 +78,8 @@
"ovndb_servers:0_pre_notify_promote_0 ovn-dbs-bundle-0" [ style=bold color="green" fontcolor="black"]
"ovndb_servers:0_start_0 ovn-dbs-bundle-0" -> "ovn-dbs-bundle-master_running_0" [ style = bold]
"ovndb_servers:0_start_0 ovn-dbs-bundle-0" -> "ovndb_servers:0_monitor_30000 ovn-dbs-bundle-0" [ style = bold]
-"ovndb_servers:0_start_0 ovn-dbs-bundle-0" -> "ovndb_servers_start_0 ovn-dbs-bundle-1" [ style = bold]
"ovndb_servers:0_start_0 ovn-dbs-bundle-0" [ style=bold color="green" fontcolor="black"]
-"ovndb_servers_monitor_10000 ovn-dbs-bundle-1" [ style=bold color="green" fontcolor="black"]
+"ovndb_servers_monitor_10000 ovn-dbs-bundle-2" [ style=bold color="green" fontcolor="black"]
"ovndb_servers_post_notify_promoted_0 ovn-dbs-bundle-1" -> "ovn-dbs-bundle-master_confirmed-post_notify_promoted_0" [ style = bold]
"ovndb_servers_post_notify_promoted_0 ovn-dbs-bundle-1" [ style=bold color="green" fontcolor="black"]
"ovndb_servers_post_notify_promoted_0 ovn-dbs-bundle-2" -> "ovn-dbs-bundle-master_confirmed-post_notify_promoted_0" [ style = bold]
@@ -141,29 +88,17 @@
"ovndb_servers_post_notify_running_0 ovn-dbs-bundle-1" [ style=bold color="green" fontcolor="black"]
"ovndb_servers_post_notify_running_0 ovn-dbs-bundle-2" -> "ovn-dbs-bundle-master_confirmed-post_notify_running_0" [ style = bold]
"ovndb_servers_post_notify_running_0 ovn-dbs-bundle-2" [ style=bold color="green" fontcolor="black"]
-"ovndb_servers_post_notify_stopped_0 ovn-dbs-bundle-2" -> "ovn-dbs-bundle-master_confirmed-post_notify_stopped_0" [ style = bold]
-"ovndb_servers_post_notify_stopped_0 ovn-dbs-bundle-2" [ style=bold color="green" fontcolor="black"]
"ovndb_servers_pre_notify_promote_0 ovn-dbs-bundle-1" -> "ovn-dbs-bundle-master_confirmed-pre_notify_promote_0" [ style = bold]
"ovndb_servers_pre_notify_promote_0 ovn-dbs-bundle-1" [ style=bold color="green" fontcolor="black"]
"ovndb_servers_pre_notify_promote_0 ovn-dbs-bundle-2" -> "ovn-dbs-bundle-master_confirmed-pre_notify_promote_0" [ style = bold]
"ovndb_servers_pre_notify_promote_0 ovn-dbs-bundle-2" [ style=bold color="green" fontcolor="black"]
+"ovndb_servers_pre_notify_start_0 ovn-dbs-bundle-1" -> "ovn-dbs-bundle-master_confirmed-pre_notify_start_0" [ style = bold]
+"ovndb_servers_pre_notify_start_0 ovn-dbs-bundle-1" [ style=bold color="green" fontcolor="black"]
"ovndb_servers_pre_notify_start_0 ovn-dbs-bundle-2" -> "ovn-dbs-bundle-master_confirmed-pre_notify_start_0" [ style = bold]
"ovndb_servers_pre_notify_start_0 ovn-dbs-bundle-2" [ style=bold color="green" fontcolor="black"]
-"ovndb_servers_pre_notify_stop_0 ovn-dbs-bundle-1" -> "ovn-dbs-bundle-master_confirmed-pre_notify_stop_0" [ style = bold]
-"ovndb_servers_pre_notify_stop_0 ovn-dbs-bundle-1" [ style=bold color="green" fontcolor="black"]
-"ovndb_servers_pre_notify_stop_0 ovn-dbs-bundle-2" -> "ovn-dbs-bundle-master_confirmed-pre_notify_stop_0" [ style = bold]
-"ovndb_servers_pre_notify_stop_0 ovn-dbs-bundle-2" [ style=bold color="green" fontcolor="black"]
-"ovndb_servers_promote_0 ovn-dbs-bundle-1" -> "ovn-dbs-bundle-master_promoted_0" [ style = bold]
-"ovndb_servers_promote_0 ovn-dbs-bundle-1" -> "ovndb_servers_monitor_10000 ovn-dbs-bundle-1" [ style = bold]
-"ovndb_servers_promote_0 ovn-dbs-bundle-1" [ style=bold color="green" fontcolor="black"]
-"ovndb_servers_start_0 ovn-dbs-bundle-1" -> "ovn-dbs-bundle-master_running_0" [ style = bold]
-"ovndb_servers_start_0 ovn-dbs-bundle-1" -> "ovndb_servers_monitor_10000 ovn-dbs-bundle-1" [ style = bold]
-"ovndb_servers_start_0 ovn-dbs-bundle-1" -> "ovndb_servers_promote_0 ovn-dbs-bundle-1" [ style = bold]
-"ovndb_servers_start_0 ovn-dbs-bundle-1" [ style=bold color="green" fontcolor="black"]
-"ovndb_servers_stop_0 ovn-dbs-bundle-1" -> "ovn-dbs-bundle-1_stop_0 controller-2" [ style = bold]
-"ovndb_servers_stop_0 ovn-dbs-bundle-1" -> "ovn-dbs-bundle-master_stopped_0" [ style = bold]
-"ovndb_servers_stop_0 ovn-dbs-bundle-1" -> "ovndb_servers_start_0 ovn-dbs-bundle-1" [ style = bold]
-"ovndb_servers_stop_0 ovn-dbs-bundle-1" [ style=bold color="green" fontcolor="black"]
+"ovndb_servers_promote_0 ovn-dbs-bundle-2" -> "ovn-dbs-bundle-master_promoted_0" [ style = bold]
+"ovndb_servers_promote_0 ovn-dbs-bundle-2" -> "ovndb_servers_monitor_10000 ovn-dbs-bundle-2" [ style = bold]
+"ovndb_servers_promote_0 ovn-dbs-bundle-2" [ style=bold color="green" fontcolor="black"]
"rabbitmq-bundle-1_monitor_30000 controller-0" [ style=dashed color="red" fontcolor="black"]
"rabbitmq-bundle-1_start_0 controller-0" -> "rabbitmq-bundle-1_monitor_30000 controller-0" [ style = dashed]
"rabbitmq-bundle-1_start_0 controller-0" -> "rabbitmq:1_monitor_10000 rabbitmq-bundle-1" [ style = dashed]
diff --git a/cts/scheduler/dot/clone-order-16instances.dot b/cts/scheduler/dot/clone-order-16instances.dot
index cf87468..fbbb55b 100644
--- a/cts/scheduler/dot/clone-order-16instances.dot
+++ b/cts/scheduler/dot/clone-order-16instances.dot
@@ -3,51 +3,36 @@
"clvmd-clone_start_0" -> "clvmd-clone_running_0" [ style = dashed]
"clvmd-clone_start_0" [ style=dashed color="red" fontcolor="orange"]
"clvmd:10_start_0 <none>" -> "clvmd-clone_running_0" [ style = dashed]
-"clvmd:10_start_0 <none>" -> "clvmd:11_start_0 <none>" [ style = dashed]
"clvmd:10_start_0 <none>" [ style=dashed color="red" fontcolor="black"]
"clvmd:11_start_0 <none>" -> "clvmd-clone_running_0" [ style = dashed]
-"clvmd:11_start_0 <none>" -> "clvmd:12_start_0 <none>" [ style = dashed]
"clvmd:11_start_0 <none>" [ style=dashed color="red" fontcolor="black"]
"clvmd:12_start_0 <none>" -> "clvmd-clone_running_0" [ style = dashed]
-"clvmd:12_start_0 <none>" -> "clvmd:13_start_0 <none>" [ style = dashed]
"clvmd:12_start_0 <none>" [ style=dashed color="red" fontcolor="black"]
"clvmd:13_start_0 <none>" -> "clvmd-clone_running_0" [ style = dashed]
-"clvmd:13_start_0 <none>" -> "clvmd:14_start_0 <none>" [ style = dashed]
"clvmd:13_start_0 <none>" [ style=dashed color="red" fontcolor="black"]
"clvmd:14_start_0 <none>" -> "clvmd-clone_running_0" [ style = dashed]
-"clvmd:14_start_0 <none>" -> "clvmd:15_start_0 <none>" [ style = dashed]
"clvmd:14_start_0 <none>" [ style=dashed color="red" fontcolor="black"]
"clvmd:15_start_0 <none>" -> "clvmd-clone_running_0" [ style = dashed]
"clvmd:15_start_0 <none>" [ style=dashed color="red" fontcolor="black"]
"clvmd:1_start_0 <none>" -> "clvmd-clone_running_0" [ style = dashed]
-"clvmd:1_start_0 <none>" -> "clvmd:2_start_0 <none>" [ style = dashed]
"clvmd:1_start_0 <none>" [ style=dashed color="red" fontcolor="black"]
"clvmd:2_start_0 <none>" -> "clvmd-clone_running_0" [ style = dashed]
-"clvmd:2_start_0 <none>" -> "clvmd:3_start_0 <none>" [ style = dashed]
"clvmd:2_start_0 <none>" [ style=dashed color="red" fontcolor="black"]
"clvmd:3_start_0 <none>" -> "clvmd-clone_running_0" [ style = dashed]
-"clvmd:3_start_0 <none>" -> "clvmd:4_start_0 <none>" [ style = dashed]
"clvmd:3_start_0 <none>" [ style=dashed color="red" fontcolor="black"]
"clvmd:4_start_0 <none>" -> "clvmd-clone_running_0" [ style = dashed]
-"clvmd:4_start_0 <none>" -> "clvmd:5_start_0 <none>" [ style = dashed]
"clvmd:4_start_0 <none>" [ style=dashed color="red" fontcolor="black"]
"clvmd:5_start_0 <none>" -> "clvmd-clone_running_0" [ style = dashed]
-"clvmd:5_start_0 <none>" -> "clvmd:6_start_0 <none>" [ style = dashed]
"clvmd:5_start_0 <none>" [ style=dashed color="red" fontcolor="black"]
"clvmd:6_start_0 <none>" -> "clvmd-clone_running_0" [ style = dashed]
-"clvmd:6_start_0 <none>" -> "clvmd:7_start_0 <none>" [ style = dashed]
"clvmd:6_start_0 <none>" [ style=dashed color="red" fontcolor="black"]
"clvmd:7_start_0 <none>" -> "clvmd-clone_running_0" [ style = dashed]
-"clvmd:7_start_0 <none>" -> "clvmd:8_start_0 <none>" [ style = dashed]
"clvmd:7_start_0 <none>" [ style=dashed color="red" fontcolor="black"]
"clvmd:8_start_0 <none>" -> "clvmd-clone_running_0" [ style = dashed]
-"clvmd:8_start_0 <none>" -> "clvmd:9_start_0 <none>" [ style = dashed]
"clvmd:8_start_0 <none>" [ style=dashed color="red" fontcolor="black"]
"clvmd:9_start_0 <none>" -> "clvmd-clone_running_0" [ style = dashed]
-"clvmd:9_start_0 <none>" -> "clvmd:10_start_0 <none>" [ style = dashed]
"clvmd:9_start_0 <none>" [ style=dashed color="red" fontcolor="black"]
"clvmd_start_0 <none>" -> "clvmd-clone_running_0" [ style = dashed]
-"clvmd_start_0 <none>" -> "clvmd:1_start_0 <none>" [ style = dashed]
"clvmd_start_0 <none>" [ style=dashed color="red" fontcolor="black"]
"dlm-clone_running_0" -> "clvmd-clone_start_0" [ style = dashed]
"dlm-clone_running_0" [ style=bold color="green" fontcolor="orange"]
@@ -71,21 +56,31 @@
"dlm:10_start_0 virt-029.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold]
"dlm:10_start_0 virt-029.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:10_monitor_30000 virt-029.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
"dlm:10_start_0 virt-029.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:11_start_0 virt-030.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm:10_start_0 virt-029.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:12_start_0 virt-031.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm:10_start_0 virt-029.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:13_start_0 virt-032.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm:10_start_0 virt-029.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:14_start_0 virt-033.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm:10_start_0 virt-029.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:15_start_0 virt-034.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
"dlm:10_start_0 virt-029.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"]
"dlm:11_monitor_30000 virt-030.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"]
"dlm:11_start_0 virt-030.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold]
"dlm:11_start_0 virt-030.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:11_monitor_30000 virt-030.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
"dlm:11_start_0 virt-030.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:12_start_0 virt-031.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm:11_start_0 virt-030.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:13_start_0 virt-032.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm:11_start_0 virt-030.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:14_start_0 virt-033.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm:11_start_0 virt-030.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:15_start_0 virt-034.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
"dlm:11_start_0 virt-030.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"]
"dlm:12_monitor_30000 virt-031.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"]
"dlm:12_start_0 virt-031.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold]
"dlm:12_start_0 virt-031.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:12_monitor_30000 virt-031.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
"dlm:12_start_0 virt-031.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:13_start_0 virt-032.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm:12_start_0 virt-031.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:14_start_0 virt-033.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm:12_start_0 virt-031.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:15_start_0 virt-034.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
"dlm:12_start_0 virt-031.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"]
"dlm:13_monitor_30000 virt-032.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"]
"dlm:13_start_0 virt-032.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold]
"dlm:13_start_0 virt-032.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:13_monitor_30000 virt-032.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
"dlm:13_start_0 virt-032.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:14_start_0 virt-033.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm:13_start_0 virt-032.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:15_start_0 virt-034.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
"dlm:13_start_0 virt-032.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"]
"dlm:14_monitor_30000 virt-033.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"]
"dlm:14_start_0 virt-033.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold]
@@ -98,37 +93,93 @@
"dlm:15_start_0 virt-034.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"]
"dlm:3_monitor_30000 virt-013.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"]
"dlm:3_start_0 virt-013.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold]
+"dlm:3_start_0 virt-013.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:10_start_0 virt-029.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm:3_start_0 virt-013.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:11_start_0 virt-030.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm:3_start_0 virt-013.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:12_start_0 virt-031.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm:3_start_0 virt-013.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:13_start_0 virt-032.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm:3_start_0 virt-013.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:14_start_0 virt-033.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm:3_start_0 virt-013.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:15_start_0 virt-034.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
"dlm:3_start_0 virt-013.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:3_monitor_30000 virt-013.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
"dlm:3_start_0 virt-013.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:4_start_0 virt-014.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm:3_start_0 virt-013.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:5_start_0 virt-015.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm:3_start_0 virt-013.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:6_start_0 virt-016.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm:3_start_0 virt-013.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:7_start_0 virt-020.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm:3_start_0 virt-013.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:8_start_0 virt-027.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm:3_start_0 virt-013.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:9_start_0 virt-028.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
"dlm:3_start_0 virt-013.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"]
"dlm:4_monitor_30000 virt-014.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"]
"dlm:4_start_0 virt-014.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold]
+"dlm:4_start_0 virt-014.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:10_start_0 virt-029.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm:4_start_0 virt-014.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:11_start_0 virt-030.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm:4_start_0 virt-014.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:12_start_0 virt-031.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm:4_start_0 virt-014.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:13_start_0 virt-032.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm:4_start_0 virt-014.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:14_start_0 virt-033.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm:4_start_0 virt-014.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:15_start_0 virt-034.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
"dlm:4_start_0 virt-014.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:4_monitor_30000 virt-014.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
"dlm:4_start_0 virt-014.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:5_start_0 virt-015.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm:4_start_0 virt-014.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:6_start_0 virt-016.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm:4_start_0 virt-014.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:7_start_0 virt-020.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm:4_start_0 virt-014.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:8_start_0 virt-027.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm:4_start_0 virt-014.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:9_start_0 virt-028.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
"dlm:4_start_0 virt-014.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"]
"dlm:5_monitor_30000 virt-015.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"]
"dlm:5_start_0 virt-015.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold]
+"dlm:5_start_0 virt-015.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:10_start_0 virt-029.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm:5_start_0 virt-015.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:11_start_0 virt-030.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm:5_start_0 virt-015.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:12_start_0 virt-031.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm:5_start_0 virt-015.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:13_start_0 virt-032.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm:5_start_0 virt-015.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:14_start_0 virt-033.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm:5_start_0 virt-015.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:15_start_0 virt-034.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
"dlm:5_start_0 virt-015.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:5_monitor_30000 virt-015.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
"dlm:5_start_0 virt-015.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:6_start_0 virt-016.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm:5_start_0 virt-015.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:7_start_0 virt-020.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm:5_start_0 virt-015.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:8_start_0 virt-027.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm:5_start_0 virt-015.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:9_start_0 virt-028.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
"dlm:5_start_0 virt-015.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"]
"dlm:6_monitor_30000 virt-016.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"]
"dlm:6_start_0 virt-016.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold]
+"dlm:6_start_0 virt-016.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:10_start_0 virt-029.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm:6_start_0 virt-016.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:11_start_0 virt-030.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm:6_start_0 virt-016.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:12_start_0 virt-031.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm:6_start_0 virt-016.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:13_start_0 virt-032.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm:6_start_0 virt-016.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:14_start_0 virt-033.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm:6_start_0 virt-016.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:15_start_0 virt-034.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
"dlm:6_start_0 virt-016.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:6_monitor_30000 virt-016.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
"dlm:6_start_0 virt-016.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:7_start_0 virt-020.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm:6_start_0 virt-016.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:8_start_0 virt-027.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm:6_start_0 virt-016.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:9_start_0 virt-028.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
"dlm:6_start_0 virt-016.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"]
"dlm:7_monitor_30000 virt-020.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"]
"dlm:7_start_0 virt-020.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold]
+"dlm:7_start_0 virt-020.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:10_start_0 virt-029.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm:7_start_0 virt-020.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:11_start_0 virt-030.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm:7_start_0 virt-020.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:12_start_0 virt-031.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm:7_start_0 virt-020.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:13_start_0 virt-032.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm:7_start_0 virt-020.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:14_start_0 virt-033.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm:7_start_0 virt-020.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:15_start_0 virt-034.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
"dlm:7_start_0 virt-020.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:7_monitor_30000 virt-020.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
"dlm:7_start_0 virt-020.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:8_start_0 virt-027.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm:7_start_0 virt-020.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:9_start_0 virt-028.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
"dlm:7_start_0 virt-020.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"]
"dlm:8_monitor_30000 virt-027.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"]
"dlm:8_start_0 virt-027.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold]
+"dlm:8_start_0 virt-027.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:10_start_0 virt-029.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm:8_start_0 virt-027.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:11_start_0 virt-030.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm:8_start_0 virt-027.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:12_start_0 virt-031.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm:8_start_0 virt-027.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:13_start_0 virt-032.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm:8_start_0 virt-027.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:14_start_0 virt-033.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm:8_start_0 virt-027.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:15_start_0 virt-034.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
"dlm:8_start_0 virt-027.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:8_monitor_30000 virt-027.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
"dlm:8_start_0 virt-027.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:9_start_0 virt-028.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
"dlm:8_start_0 virt-027.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"]
"dlm:9_monitor_30000 virt-028.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"]
"dlm:9_start_0 virt-028.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold]
"dlm:9_start_0 virt-028.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:10_start_0 virt-029.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm:9_start_0 virt-028.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:11_start_0 virt-030.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm:9_start_0 virt-028.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:12_start_0 virt-031.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm:9_start_0 virt-028.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:13_start_0 virt-032.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm:9_start_0 virt-028.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:14_start_0 virt-033.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm:9_start_0 virt-028.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:15_start_0 virt-034.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
"dlm:9_start_0 virt-028.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:9_monitor_30000 virt-028.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
"dlm:9_start_0 virt-028.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"]
"dlm_monitor_30000 virt-009.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"]
@@ -149,7 +200,19 @@
"dlm_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "clvmd:9_start_0 <none>" [ style = dashed]
"dlm_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "clvmd_start_0 <none>" [ style = dashed]
"dlm_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "dlm-clone_running_0" [ style = bold]
+"dlm_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:10_start_0 virt-029.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:11_start_0 virt-030.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:12_start_0 virt-031.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:13_start_0 virt-032.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:14_start_0 virt-033.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:15_start_0 virt-034.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
"dlm_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:3_start_0 virt-013.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:4_start_0 virt-014.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:5_start_0 virt-015.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:6_start_0 virt-016.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:7_start_0 virt-020.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:8_start_0 virt-027.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
+"dlm_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "dlm:9_start_0 virt-028.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
"dlm_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" -> "dlm_monitor_30000 virt-009.cluster-qe.lab.eng.brq.redhat.com" [ style = bold]
"dlm_start_0 virt-009.cluster-qe.lab.eng.brq.redhat.com" [ style=bold color="green" fontcolor="black"]
}
diff --git a/cts/scheduler/dot/clone-recover-no-shuffle-1.dot b/cts/scheduler/dot/clone-recover-no-shuffle-1.dot
new file mode 100644
index 0000000..287d82d
--- /dev/null
+++ b/cts/scheduler/dot/clone-recover-no-shuffle-1.dot
@@ -0,0 +1,10 @@
+ digraph "g" {
+"dummy-clone_running_0" [ style=bold color="green" fontcolor="orange"]
+"dummy-clone_start_0" -> "dummy-clone_running_0" [ style = bold]
+"dummy-clone_start_0" -> "dummy:2_start_0 node1" [ style = bold]
+"dummy-clone_start_0" [ style=bold color="green" fontcolor="orange"]
+"dummy:2_monitor_10000 node1" [ style=bold color="green" fontcolor="black"]
+"dummy:2_start_0 node1" -> "dummy-clone_running_0" [ style = bold]
+"dummy:2_start_0 node1" -> "dummy:2_monitor_10000 node1" [ style = bold]
+"dummy:2_start_0 node1" [ style=bold color="green" fontcolor="black"]
+}
diff --git a/cts/scheduler/dot/clone-recover-no-shuffle-10.dot b/cts/scheduler/dot/clone-recover-no-shuffle-10.dot
new file mode 100644
index 0000000..1e18409
--- /dev/null
+++ b/cts/scheduler/dot/clone-recover-no-shuffle-10.dot
@@ -0,0 +1,10 @@
+ digraph "g" {
+"dummy-clone_running_0" [ style=bold color="green" fontcolor="orange"]
+"dummy-clone_start_0" -> "dummy-clone_running_0" [ style = bold]
+"dummy-clone_start_0" -> "dummy:2_start_0 node1" [ style = bold]
+"dummy-clone_start_0" [ style=bold color="green" fontcolor="orange"]
+"dummy:2_monitor_11000 node1" [ style=bold color="green" fontcolor="black"]
+"dummy:2_start_0 node1" -> "dummy-clone_running_0" [ style = bold]
+"dummy:2_start_0 node1" -> "dummy:2_monitor_11000 node1" [ style = bold]
+"dummy:2_start_0 node1" [ style=bold color="green" fontcolor="black"]
+}
diff --git a/cts/scheduler/dot/clone-recover-no-shuffle-11.dot b/cts/scheduler/dot/clone-recover-no-shuffle-11.dot
new file mode 100644
index 0000000..2b08a59
--- /dev/null
+++ b/cts/scheduler/dot/clone-recover-no-shuffle-11.dot
@@ -0,0 +1,21 @@
+ digraph "g" {
+"grp-clone_running_0" [ style=bold color="green" fontcolor="orange"]
+"grp-clone_start_0" -> "grp-clone_running_0" [ style = bold]
+"grp-clone_start_0" -> "grp:2_start_0" [ style = bold]
+"grp-clone_start_0" [ style=bold color="green" fontcolor="orange"]
+"grp:2_running_0" -> "grp-clone_running_0" [ style = bold]
+"grp:2_running_0" [ style=bold color="green" fontcolor="orange"]
+"grp:2_start_0" -> "grp:2_running_0" [ style = bold]
+"grp:2_start_0" -> "rsc1:2_start_0 node1" [ style = bold]
+"grp:2_start_0" -> "rsc2:2_start_0 node1" [ style = bold]
+"grp:2_start_0" [ style=bold color="green" fontcolor="orange"]
+"rsc1:2_monitor_11000 node1" [ style=bold color="green" fontcolor="black"]
+"rsc1:2_start_0 node1" -> "grp:2_running_0" [ style = bold]
+"rsc1:2_start_0 node1" -> "rsc1:2_monitor_11000 node1" [ style = bold]
+"rsc1:2_start_0 node1" -> "rsc2:2_start_0 node1" [ style = bold]
+"rsc1:2_start_0 node1" [ style=bold color="green" fontcolor="black"]
+"rsc2:2_monitor_11000 node1" [ style=bold color="green" fontcolor="black"]
+"rsc2:2_start_0 node1" -> "grp:2_running_0" [ style = bold]
+"rsc2:2_start_0 node1" -> "rsc2:2_monitor_11000 node1" [ style = bold]
+"rsc2:2_start_0 node1" [ style=bold color="green" fontcolor="black"]
+}
diff --git a/cts/scheduler/dot/clone-recover-no-shuffle-12.dot b/cts/scheduler/dot/clone-recover-no-shuffle-12.dot
new file mode 100644
index 0000000..ebc1dc6
--- /dev/null
+++ b/cts/scheduler/dot/clone-recover-no-shuffle-12.dot
@@ -0,0 +1,35 @@
+ digraph "g" {
+"base-bundle-2_monitor_0 node1" -> "base-bundle-2_start_0 node1" [ style = bold]
+"base-bundle-2_monitor_0 node1" [ style=bold color="green" fontcolor="black"]
+"base-bundle-2_monitor_0 node2" -> "base-bundle-2_start_0 node1" [ style = bold]
+"base-bundle-2_monitor_0 node2" [ style=bold color="green" fontcolor="black"]
+"base-bundle-2_monitor_0 node3" -> "base-bundle-2_start_0 node1" [ style = bold]
+"base-bundle-2_monitor_0 node3" [ style=bold color="green" fontcolor="black"]
+"base-bundle-2_monitor_30000 node1" [ style=bold color="green" fontcolor="black"]
+"base-bundle-2_start_0 node1" -> "base-bundle-2_monitor_30000 node1" [ style = bold]
+"base-bundle-2_start_0 node1" -> "base:2_monitor_16000 base-bundle-2" [ style = bold]
+"base-bundle-2_start_0 node1" -> "base:2_start_0 base-bundle-2" [ style = bold]
+"base-bundle-2_start_0 node1" [ style=bold color="green" fontcolor="black"]
+"base-bundle-clone_running_0" -> "base-bundle_running_0" [ style = bold]
+"base-bundle-clone_running_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle-clone_start_0" -> "base-bundle-clone_running_0" [ style = bold]
+"base-bundle-clone_start_0" -> "base:2_start_0 base-bundle-2" [ style = bold]
+"base-bundle-clone_start_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle-podman-2_monitor_60000 node1" [ style=bold color="green" fontcolor="black"]
+"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_monitor_0 node1" [ style = bold]
+"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_monitor_0 node2" [ style = bold]
+"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_monitor_0 node3" [ style = bold]
+"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_start_0 node1" [ style = bold]
+"base-bundle-podman-2_start_0 node1" -> "base-bundle-podman-2_monitor_60000 node1" [ style = bold]
+"base-bundle-podman-2_start_0 node1" -> "base-bundle_running_0" [ style = bold]
+"base-bundle-podman-2_start_0 node1" -> "base:2_start_0 base-bundle-2" [ style = bold]
+"base-bundle-podman-2_start_0 node1" [ style=bold color="green" fontcolor="black"]
+"base-bundle_running_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle_start_0" -> "base-bundle-clone_start_0" [ style = bold]
+"base-bundle_start_0" -> "base-bundle-podman-2_start_0 node1" [ style = bold]
+"base-bundle_start_0" [ style=bold color="green" fontcolor="orange"]
+"base:2_monitor_16000 base-bundle-2" [ style=bold color="green" fontcolor="black"]
+"base:2_start_0 base-bundle-2" -> "base-bundle-clone_running_0" [ style = bold]
+"base:2_start_0 base-bundle-2" -> "base:2_monitor_16000 base-bundle-2" [ style = bold]
+"base:2_start_0 base-bundle-2" [ style=bold color="green" fontcolor="black"]
+}
diff --git a/cts/scheduler/dot/clone-recover-no-shuffle-2.dot b/cts/scheduler/dot/clone-recover-no-shuffle-2.dot
new file mode 100644
index 0000000..d3bdf04
--- /dev/null
+++ b/cts/scheduler/dot/clone-recover-no-shuffle-2.dot
@@ -0,0 +1,21 @@
+ digraph "g" {
+"grp-clone_running_0" [ style=bold color="green" fontcolor="orange"]
+"grp-clone_start_0" -> "grp-clone_running_0" [ style = bold]
+"grp-clone_start_0" -> "grp:2_start_0" [ style = bold]
+"grp-clone_start_0" [ style=bold color="green" fontcolor="orange"]
+"grp:2_running_0" -> "grp-clone_running_0" [ style = bold]
+"grp:2_running_0" [ style=bold color="green" fontcolor="orange"]
+"grp:2_start_0" -> "grp:2_running_0" [ style = bold]
+"grp:2_start_0" -> "rsc1:2_start_0 node1" [ style = bold]
+"grp:2_start_0" -> "rsc2:2_start_0 node1" [ style = bold]
+"grp:2_start_0" [ style=bold color="green" fontcolor="orange"]
+"rsc1:2_monitor_10000 node1" [ style=bold color="green" fontcolor="black"]
+"rsc1:2_start_0 node1" -> "grp:2_running_0" [ style = bold]
+"rsc1:2_start_0 node1" -> "rsc1:2_monitor_10000 node1" [ style = bold]
+"rsc1:2_start_0 node1" -> "rsc2:2_start_0 node1" [ style = bold]
+"rsc1:2_start_0 node1" [ style=bold color="green" fontcolor="black"]
+"rsc2:2_monitor_10000 node1" [ style=bold color="green" fontcolor="black"]
+"rsc2:2_start_0 node1" -> "grp:2_running_0" [ style = bold]
+"rsc2:2_start_0 node1" -> "rsc2:2_monitor_10000 node1" [ style = bold]
+"rsc2:2_start_0 node1" [ style=bold color="green" fontcolor="black"]
+}
diff --git a/cts/scheduler/dot/clone-recover-no-shuffle-3.dot b/cts/scheduler/dot/clone-recover-no-shuffle-3.dot
new file mode 100644
index 0000000..f60fd2c
--- /dev/null
+++ b/cts/scheduler/dot/clone-recover-no-shuffle-3.dot
@@ -0,0 +1,32 @@
+ digraph "g" {
+"base-bundle-2_monitor_0 node1" -> "base-bundle-2_start_0 node1" [ style = bold]
+"base-bundle-2_monitor_0 node1" [ style=bold color="green" fontcolor="black"]
+"base-bundle-2_monitor_0 node2" -> "base-bundle-2_start_0 node1" [ style = bold]
+"base-bundle-2_monitor_0 node2" [ style=bold color="green" fontcolor="black"]
+"base-bundle-2_monitor_0 node3" -> "base-bundle-2_start_0 node1" [ style = bold]
+"base-bundle-2_monitor_0 node3" [ style=bold color="green" fontcolor="black"]
+"base-bundle-2_monitor_30000 node1" [ style=bold color="green" fontcolor="black"]
+"base-bundle-2_start_0 node1" -> "base-bundle-2_monitor_30000 node1" [ style = bold]
+"base-bundle-2_start_0 node1" -> "base:2_start_0 base-bundle-2" [ style = bold]
+"base-bundle-2_start_0 node1" [ style=bold color="green" fontcolor="black"]
+"base-bundle-clone_running_0" -> "base-bundle_running_0" [ style = bold]
+"base-bundle-clone_running_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle-clone_start_0" -> "base-bundle-clone_running_0" [ style = bold]
+"base-bundle-clone_start_0" -> "base:2_start_0 base-bundle-2" [ style = bold]
+"base-bundle-clone_start_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle-podman-2_monitor_60000 node1" [ style=bold color="green" fontcolor="black"]
+"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_monitor_0 node1" [ style = bold]
+"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_monitor_0 node2" [ style = bold]
+"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_monitor_0 node3" [ style = bold]
+"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_start_0 node1" [ style = bold]
+"base-bundle-podman-2_start_0 node1" -> "base-bundle-podman-2_monitor_60000 node1" [ style = bold]
+"base-bundle-podman-2_start_0 node1" -> "base-bundle_running_0" [ style = bold]
+"base-bundle-podman-2_start_0 node1" -> "base:2_start_0 base-bundle-2" [ style = bold]
+"base-bundle-podman-2_start_0 node1" [ style=bold color="green" fontcolor="black"]
+"base-bundle_running_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle_start_0" -> "base-bundle-clone_start_0" [ style = bold]
+"base-bundle_start_0" -> "base-bundle-podman-2_start_0 node1" [ style = bold]
+"base-bundle_start_0" [ style=bold color="green" fontcolor="orange"]
+"base:2_start_0 base-bundle-2" -> "base-bundle-clone_running_0" [ style = bold]
+"base:2_start_0 base-bundle-2" [ style=bold color="green" fontcolor="black"]
+}
diff --git a/cts/scheduler/dot/clone-recover-no-shuffle-4.dot b/cts/scheduler/dot/clone-recover-no-shuffle-4.dot
new file mode 100644
index 0000000..287d82d
--- /dev/null
+++ b/cts/scheduler/dot/clone-recover-no-shuffle-4.dot
@@ -0,0 +1,10 @@
+ digraph "g" {
+"dummy-clone_running_0" [ style=bold color="green" fontcolor="orange"]
+"dummy-clone_start_0" -> "dummy-clone_running_0" [ style = bold]
+"dummy-clone_start_0" -> "dummy:2_start_0 node1" [ style = bold]
+"dummy-clone_start_0" [ style=bold color="green" fontcolor="orange"]
+"dummy:2_monitor_10000 node1" [ style=bold color="green" fontcolor="black"]
+"dummy:2_start_0 node1" -> "dummy-clone_running_0" [ style = bold]
+"dummy:2_start_0 node1" -> "dummy:2_monitor_10000 node1" [ style = bold]
+"dummy:2_start_0 node1" [ style=bold color="green" fontcolor="black"]
+}
diff --git a/cts/scheduler/dot/clone-recover-no-shuffle-5.dot b/cts/scheduler/dot/clone-recover-no-shuffle-5.dot
new file mode 100644
index 0000000..d3bdf04
--- /dev/null
+++ b/cts/scheduler/dot/clone-recover-no-shuffle-5.dot
@@ -0,0 +1,21 @@
+ digraph "g" {
+"grp-clone_running_0" [ style=bold color="green" fontcolor="orange"]
+"grp-clone_start_0" -> "grp-clone_running_0" [ style = bold]
+"grp-clone_start_0" -> "grp:2_start_0" [ style = bold]
+"grp-clone_start_0" [ style=bold color="green" fontcolor="orange"]
+"grp:2_running_0" -> "grp-clone_running_0" [ style = bold]
+"grp:2_running_0" [ style=bold color="green" fontcolor="orange"]
+"grp:2_start_0" -> "grp:2_running_0" [ style = bold]
+"grp:2_start_0" -> "rsc1:2_start_0 node1" [ style = bold]
+"grp:2_start_0" -> "rsc2:2_start_0 node1" [ style = bold]
+"grp:2_start_0" [ style=bold color="green" fontcolor="orange"]
+"rsc1:2_monitor_10000 node1" [ style=bold color="green" fontcolor="black"]
+"rsc1:2_start_0 node1" -> "grp:2_running_0" [ style = bold]
+"rsc1:2_start_0 node1" -> "rsc1:2_monitor_10000 node1" [ style = bold]
+"rsc1:2_start_0 node1" -> "rsc2:2_start_0 node1" [ style = bold]
+"rsc1:2_start_0 node1" [ style=bold color="green" fontcolor="black"]
+"rsc2:2_monitor_10000 node1" [ style=bold color="green" fontcolor="black"]
+"rsc2:2_start_0 node1" -> "grp:2_running_0" [ style = bold]
+"rsc2:2_start_0 node1" -> "rsc2:2_monitor_10000 node1" [ style = bold]
+"rsc2:2_start_0 node1" [ style=bold color="green" fontcolor="black"]
+}
diff --git a/cts/scheduler/dot/clone-recover-no-shuffle-6.dot b/cts/scheduler/dot/clone-recover-no-shuffle-6.dot
new file mode 100644
index 0000000..f60fd2c
--- /dev/null
+++ b/cts/scheduler/dot/clone-recover-no-shuffle-6.dot
@@ -0,0 +1,32 @@
+ digraph "g" {
+"base-bundle-2_monitor_0 node1" -> "base-bundle-2_start_0 node1" [ style = bold]
+"base-bundle-2_monitor_0 node1" [ style=bold color="green" fontcolor="black"]
+"base-bundle-2_monitor_0 node2" -> "base-bundle-2_start_0 node1" [ style = bold]
+"base-bundle-2_monitor_0 node2" [ style=bold color="green" fontcolor="black"]
+"base-bundle-2_monitor_0 node3" -> "base-bundle-2_start_0 node1" [ style = bold]
+"base-bundle-2_monitor_0 node3" [ style=bold color="green" fontcolor="black"]
+"base-bundle-2_monitor_30000 node1" [ style=bold color="green" fontcolor="black"]
+"base-bundle-2_start_0 node1" -> "base-bundle-2_monitor_30000 node1" [ style = bold]
+"base-bundle-2_start_0 node1" -> "base:2_start_0 base-bundle-2" [ style = bold]
+"base-bundle-2_start_0 node1" [ style=bold color="green" fontcolor="black"]
+"base-bundle-clone_running_0" -> "base-bundle_running_0" [ style = bold]
+"base-bundle-clone_running_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle-clone_start_0" -> "base-bundle-clone_running_0" [ style = bold]
+"base-bundle-clone_start_0" -> "base:2_start_0 base-bundle-2" [ style = bold]
+"base-bundle-clone_start_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle-podman-2_monitor_60000 node1" [ style=bold color="green" fontcolor="black"]
+"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_monitor_0 node1" [ style = bold]
+"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_monitor_0 node2" [ style = bold]
+"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_monitor_0 node3" [ style = bold]
+"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_start_0 node1" [ style = bold]
+"base-bundle-podman-2_start_0 node1" -> "base-bundle-podman-2_monitor_60000 node1" [ style = bold]
+"base-bundle-podman-2_start_0 node1" -> "base-bundle_running_0" [ style = bold]
+"base-bundle-podman-2_start_0 node1" -> "base:2_start_0 base-bundle-2" [ style = bold]
+"base-bundle-podman-2_start_0 node1" [ style=bold color="green" fontcolor="black"]
+"base-bundle_running_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle_start_0" -> "base-bundle-clone_start_0" [ style = bold]
+"base-bundle_start_0" -> "base-bundle-podman-2_start_0 node1" [ style = bold]
+"base-bundle_start_0" [ style=bold color="green" fontcolor="orange"]
+"base:2_start_0 base-bundle-2" -> "base-bundle-clone_running_0" [ style = bold]
+"base:2_start_0 base-bundle-2" [ style=bold color="green" fontcolor="black"]
+}
diff --git a/cts/scheduler/dot/clone-recover-no-shuffle-7.dot b/cts/scheduler/dot/clone-recover-no-shuffle-7.dot
new file mode 100644
index 0000000..f61bf0d
--- /dev/null
+++ b/cts/scheduler/dot/clone-recover-no-shuffle-7.dot
@@ -0,0 +1,30 @@
+ digraph "g" {
+"Cancel dummy_monitor_10000 node2" -> "dummy_demote_0 node2" [ style = bold]
+"Cancel dummy_monitor_10000 node2" [ style=bold color="green" fontcolor="black"]
+"dummy-clone_demote_0" -> "dummy-clone_demoted_0" [ style = bold]
+"dummy-clone_demote_0" -> "dummy_demote_0 node2" [ style = bold]
+"dummy-clone_demote_0" [ style=bold color="green" fontcolor="orange"]
+"dummy-clone_demoted_0" -> "dummy-clone_promote_0" [ style = bold]
+"dummy-clone_demoted_0" -> "dummy-clone_start_0" [ style = bold]
+"dummy-clone_demoted_0" [ style=bold color="green" fontcolor="orange"]
+"dummy-clone_promote_0" -> "dummy:2_promote_0 node1" [ style = bold]
+"dummy-clone_promote_0" [ style=bold color="green" fontcolor="orange"]
+"dummy-clone_promoted_0" [ style=bold color="green" fontcolor="orange"]
+"dummy-clone_running_0" -> "dummy-clone_promote_0" [ style = bold]
+"dummy-clone_running_0" [ style=bold color="green" fontcolor="orange"]
+"dummy-clone_start_0" -> "dummy-clone_running_0" [ style = bold]
+"dummy-clone_start_0" -> "dummy:2_start_0 node1" [ style = bold]
+"dummy-clone_start_0" [ style=bold color="green" fontcolor="orange"]
+"dummy:2_monitor_10000 node1" [ style=bold color="green" fontcolor="black"]
+"dummy:2_promote_0 node1" -> "dummy-clone_promoted_0" [ style = bold]
+"dummy:2_promote_0 node1" -> "dummy:2_monitor_10000 node1" [ style = bold]
+"dummy:2_promote_0 node1" [ style=bold color="green" fontcolor="black"]
+"dummy:2_start_0 node1" -> "dummy-clone_running_0" [ style = bold]
+"dummy:2_start_0 node1" -> "dummy:2_monitor_10000 node1" [ style = bold]
+"dummy:2_start_0 node1" -> "dummy:2_promote_0 node1" [ style = bold]
+"dummy:2_start_0 node1" [ style=bold color="green" fontcolor="black"]
+"dummy_demote_0 node2" -> "dummy-clone_demoted_0" [ style = bold]
+"dummy_demote_0 node2" -> "dummy_monitor_11000 node2" [ style = bold]
+"dummy_demote_0 node2" [ style=bold color="green" fontcolor="black"]
+"dummy_monitor_11000 node2" [ style=bold color="green" fontcolor="black"]
+}
diff --git a/cts/scheduler/dot/clone-recover-no-shuffle-8.dot b/cts/scheduler/dot/clone-recover-no-shuffle-8.dot
new file mode 100644
index 0000000..d9c311a
--- /dev/null
+++ b/cts/scheduler/dot/clone-recover-no-shuffle-8.dot
@@ -0,0 +1,63 @@
+ digraph "g" {
+"Cancel rsc1_monitor_10000 node2" -> "rsc1_demote_0 node2" [ style = bold]
+"Cancel rsc1_monitor_10000 node2" [ style=bold color="green" fontcolor="black"]
+"Cancel rsc2_monitor_10000 node2" -> "rsc2_demote_0 node2" [ style = bold]
+"Cancel rsc2_monitor_10000 node2" [ style=bold color="green" fontcolor="black"]
+"grp-clone_demote_0" -> "grp-clone_demoted_0" [ style = bold]
+"grp-clone_demote_0" -> "grp:1_demote_0" [ style = bold]
+"grp-clone_demote_0" [ style=bold color="green" fontcolor="orange"]
+"grp-clone_demoted_0" -> "grp-clone_promote_0" [ style = bold]
+"grp-clone_demoted_0" -> "grp-clone_start_0" [ style = bold]
+"grp-clone_demoted_0" [ style=bold color="green" fontcolor="orange"]
+"grp-clone_promote_0" -> "grp:2_promote_0" [ style = bold]
+"grp-clone_promote_0" [ style=bold color="green" fontcolor="orange"]
+"grp-clone_promoted_0" [ style=bold color="green" fontcolor="orange"]
+"grp-clone_running_0" -> "grp-clone_promote_0" [ style = bold]
+"grp-clone_running_0" [ style=bold color="green" fontcolor="orange"]
+"grp-clone_start_0" -> "grp-clone_running_0" [ style = bold]
+"grp-clone_start_0" -> "grp:2_start_0" [ style = bold]
+"grp-clone_start_0" [ style=bold color="green" fontcolor="orange"]
+"grp:1_demote_0" -> "rsc1_demote_0 node2" [ style = bold]
+"grp:1_demote_0" -> "rsc2_demote_0 node2" [ style = bold]
+"grp:1_demote_0" [ style=bold color="green" fontcolor="orange"]
+"grp:1_demoted_0" -> "grp-clone_demoted_0" [ style = bold]
+"grp:1_demoted_0" [ style=bold color="green" fontcolor="orange"]
+"grp:2_promote_0" -> "rsc1:2_promote_0 node1" [ style = bold]
+"grp:2_promote_0" -> "rsc2:2_promote_0 node1" [ style = bold]
+"grp:2_promote_0" [ style=bold color="green" fontcolor="orange"]
+"grp:2_promoted_0" -> "grp-clone_promoted_0" [ style = bold]
+"grp:2_promoted_0" [ style=bold color="green" fontcolor="orange"]
+"grp:2_running_0" -> "grp-clone_running_0" [ style = bold]
+"grp:2_running_0" [ style=bold color="green" fontcolor="orange"]
+"grp:2_start_0" -> "grp:2_running_0" [ style = bold]
+"grp:2_start_0" -> "rsc1:2_start_0 node1" [ style = bold]
+"grp:2_start_0" -> "rsc2:2_start_0 node1" [ style = bold]
+"grp:2_start_0" [ style=bold color="green" fontcolor="orange"]
+"rsc1:2_monitor_10000 node1" [ style=bold color="green" fontcolor="black"]
+"rsc1:2_promote_0 node1" -> "grp:2_promoted_0" [ style = bold]
+"rsc1:2_promote_0 node1" -> "rsc1:2_monitor_10000 node1" [ style = bold]
+"rsc1:2_promote_0 node1" -> "rsc2:2_promote_0 node1" [ style = bold]
+"rsc1:2_promote_0 node1" [ style=bold color="green" fontcolor="black"]
+"rsc1:2_start_0 node1" -> "grp:2_running_0" [ style = bold]
+"rsc1:2_start_0 node1" -> "rsc1:2_monitor_10000 node1" [ style = bold]
+"rsc1:2_start_0 node1" -> "rsc1:2_promote_0 node1" [ style = bold]
+"rsc1:2_start_0 node1" -> "rsc2:2_start_0 node1" [ style = bold]
+"rsc1:2_start_0 node1" [ style=bold color="green" fontcolor="black"]
+"rsc1_demote_0 node2" -> "grp:1_demoted_0" [ style = bold]
+"rsc1_demote_0 node2" -> "rsc1_monitor_11000 node2" [ style = bold]
+"rsc1_demote_0 node2" [ style=bold color="green" fontcolor="black"]
+"rsc1_monitor_11000 node2" [ style=bold color="green" fontcolor="black"]
+"rsc2:2_monitor_10000 node1" [ style=bold color="green" fontcolor="black"]
+"rsc2:2_promote_0 node1" -> "grp:2_promoted_0" [ style = bold]
+"rsc2:2_promote_0 node1" -> "rsc2:2_monitor_10000 node1" [ style = bold]
+"rsc2:2_promote_0 node1" [ style=bold color="green" fontcolor="black"]
+"rsc2:2_start_0 node1" -> "grp:2_running_0" [ style = bold]
+"rsc2:2_start_0 node1" -> "rsc2:2_monitor_10000 node1" [ style = bold]
+"rsc2:2_start_0 node1" -> "rsc2:2_promote_0 node1" [ style = bold]
+"rsc2:2_start_0 node1" [ style=bold color="green" fontcolor="black"]
+"rsc2_demote_0 node2" -> "grp:1_demoted_0" [ style = bold]
+"rsc2_demote_0 node2" -> "rsc1_demote_0 node2" [ style = bold]
+"rsc2_demote_0 node2" -> "rsc2_monitor_11000 node2" [ style = bold]
+"rsc2_demote_0 node2" [ style=bold color="green" fontcolor="black"]
+"rsc2_monitor_11000 node2" [ style=bold color="green" fontcolor="black"]
+}
diff --git a/cts/scheduler/dot/clone-recover-no-shuffle-9.dot b/cts/scheduler/dot/clone-recover-no-shuffle-9.dot
new file mode 100644
index 0000000..45dbac4
--- /dev/null
+++ b/cts/scheduler/dot/clone-recover-no-shuffle-9.dot
@@ -0,0 +1,69 @@
+ digraph "g" {
+"Cancel base_monitor_15000 base-bundle-1" -> "base_demote_0 base-bundle-1" [ style = bold]
+"Cancel base_monitor_15000 base-bundle-1" [ style=bold color="green" fontcolor="black"]
+"base-bundle-2_monitor_0 node1" -> "base-bundle-2_start_0 node1" [ style = bold]
+"base-bundle-2_monitor_0 node1" [ style=bold color="green" fontcolor="black"]
+"base-bundle-2_monitor_0 node2" -> "base-bundle-2_start_0 node1" [ style = bold]
+"base-bundle-2_monitor_0 node2" [ style=bold color="green" fontcolor="black"]
+"base-bundle-2_monitor_0 node3" -> "base-bundle-2_start_0 node1" [ style = bold]
+"base-bundle-2_monitor_0 node3" [ style=bold color="green" fontcolor="black"]
+"base-bundle-2_monitor_30000 node1" [ style=bold color="green" fontcolor="black"]
+"base-bundle-2_start_0 node1" -> "base-bundle-2_monitor_30000 node1" [ style = bold]
+"base-bundle-2_start_0 node1" -> "base:2_monitor_15000 base-bundle-2" [ style = bold]
+"base-bundle-2_start_0 node1" -> "base:2_promote_0 base-bundle-2" [ style = bold]
+"base-bundle-2_start_0 node1" -> "base:2_start_0 base-bundle-2" [ style = bold]
+"base-bundle-2_start_0 node1" [ style=bold color="green" fontcolor="black"]
+"base-bundle-clone_demote_0" -> "base-bundle-clone_demoted_0" [ style = bold]
+"base-bundle-clone_demote_0" -> "base_demote_0 base-bundle-1" [ style = bold]
+"base-bundle-clone_demote_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle-clone_demoted_0" -> "base-bundle-clone_promote_0" [ style = bold]
+"base-bundle-clone_demoted_0" -> "base-bundle-clone_start_0" [ style = bold]
+"base-bundle-clone_demoted_0" -> "base-bundle_demoted_0" [ style = bold]
+"base-bundle-clone_demoted_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle-clone_promote_0" -> "base:2_promote_0 base-bundle-2" [ style = bold]
+"base-bundle-clone_promote_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle-clone_promoted_0" -> "base-bundle_promoted_0" [ style = bold]
+"base-bundle-clone_promoted_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle-clone_running_0" -> "base-bundle-clone_promote_0" [ style = bold]
+"base-bundle-clone_running_0" -> "base-bundle_running_0" [ style = bold]
+"base-bundle-clone_running_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle-clone_start_0" -> "base-bundle-clone_running_0" [ style = bold]
+"base-bundle-clone_start_0" -> "base:2_start_0 base-bundle-2" [ style = bold]
+"base-bundle-clone_start_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle-podman-2_monitor_60000 node1" [ style=bold color="green" fontcolor="black"]
+"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_monitor_0 node1" [ style = bold]
+"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_monitor_0 node2" [ style = bold]
+"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_monitor_0 node3" [ style = bold]
+"base-bundle-podman-2_start_0 node1" -> "base-bundle-2_start_0 node1" [ style = bold]
+"base-bundle-podman-2_start_0 node1" -> "base-bundle-podman-2_monitor_60000 node1" [ style = bold]
+"base-bundle-podman-2_start_0 node1" -> "base-bundle_running_0" [ style = bold]
+"base-bundle-podman-2_start_0 node1" -> "base:2_promote_0 base-bundle-2" [ style = bold]
+"base-bundle-podman-2_start_0 node1" -> "base:2_start_0 base-bundle-2" [ style = bold]
+"base-bundle-podman-2_start_0 node1" [ style=bold color="green" fontcolor="black"]
+"base-bundle_demote_0" -> "base-bundle-clone_demote_0" [ style = bold]
+"base-bundle_demote_0" -> "base-bundle_demoted_0" [ style = bold]
+"base-bundle_demote_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle_demoted_0" -> "base-bundle_promote_0" [ style = bold]
+"base-bundle_demoted_0" -> "base-bundle_start_0" [ style = bold]
+"base-bundle_demoted_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle_promote_0" -> "base-bundle-clone_promote_0" [ style = bold]
+"base-bundle_promote_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle_promoted_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle_running_0" -> "base-bundle_promote_0" [ style = bold]
+"base-bundle_running_0" [ style=bold color="green" fontcolor="orange"]
+"base-bundle_start_0" -> "base-bundle-clone_start_0" [ style = bold]
+"base-bundle_start_0" -> "base-bundle-podman-2_start_0 node1" [ style = bold]
+"base-bundle_start_0" [ style=bold color="green" fontcolor="orange"]
+"base:2_monitor_15000 base-bundle-2" [ style=bold color="green" fontcolor="black"]
+"base:2_promote_0 base-bundle-2" -> "base-bundle-clone_promoted_0" [ style = bold]
+"base:2_promote_0 base-bundle-2" -> "base:2_monitor_15000 base-bundle-2" [ style = bold]
+"base:2_promote_0 base-bundle-2" [ style=bold color="green" fontcolor="black"]
+"base:2_start_0 base-bundle-2" -> "base-bundle-clone_running_0" [ style = bold]
+"base:2_start_0 base-bundle-2" -> "base:2_monitor_15000 base-bundle-2" [ style = bold]
+"base:2_start_0 base-bundle-2" -> "base:2_promote_0 base-bundle-2" [ style = bold]
+"base:2_start_0 base-bundle-2" [ style=bold color="green" fontcolor="black"]
+"base_demote_0 base-bundle-1" -> "base-bundle-clone_demoted_0" [ style = bold]
+"base_demote_0 base-bundle-1" -> "base_monitor_16000 base-bundle-1" [ style = bold]
+"base_demote_0 base-bundle-1" [ style=bold color="green" fontcolor="black"]
+"base_monitor_16000 base-bundle-1" [ style=bold color="green" fontcolor="black"]
+}
diff --git a/cts/scheduler/dot/coloc-with-inner-group-member.dot b/cts/scheduler/dot/coloc-with-inner-group-member.dot
new file mode 100644
index 0000000..a3bad7a
--- /dev/null
+++ b/cts/scheduler/dot/coloc-with-inner-group-member.dot
@@ -0,0 +1,40 @@
+ digraph "g" {
+"bar_monitor_10000 rhel8-3" [ style=bold color="green" fontcolor="black"]
+"bar_start_0 rhel8-3" -> "bar_monitor_10000 rhel8-3" [ style = bold]
+"bar_start_0 rhel8-3" -> "grp_running_0" [ style = bold]
+"bar_start_0 rhel8-3" -> "vip_start_0 rhel8-3" [ style = bold]
+"bar_start_0 rhel8-3" [ style=bold color="green" fontcolor="black"]
+"bar_stop_0 rhel8-4" -> "bar_start_0 rhel8-3" [ style = bold]
+"bar_stop_0 rhel8-4" -> "foo_stop_0 rhel8-4" [ style = bold]
+"bar_stop_0 rhel8-4" -> "grp_stopped_0" [ style = bold]
+"bar_stop_0 rhel8-4" [ style=bold color="green" fontcolor="black"]
+"foo_monitor_10000 rhel8-3" [ style=bold color="green" fontcolor="black"]
+"foo_start_0 rhel8-3" -> "bar_start_0 rhel8-3" [ style = bold]
+"foo_start_0 rhel8-3" -> "foo_monitor_10000 rhel8-3" [ style = bold]
+"foo_start_0 rhel8-3" -> "grp_running_0" [ style = bold]
+"foo_start_0 rhel8-3" [ style=bold color="green" fontcolor="black"]
+"foo_stop_0 rhel8-4" -> "foo_start_0 rhel8-3" [ style = bold]
+"foo_stop_0 rhel8-4" -> "grp_stopped_0" [ style = bold]
+"foo_stop_0 rhel8-4" [ style=bold color="green" fontcolor="black"]
+"grp_running_0" [ style=bold color="green" fontcolor="orange"]
+"grp_start_0" -> "bar_start_0 rhel8-3" [ style = bold]
+"grp_start_0" -> "foo_start_0 rhel8-3" [ style = bold]
+"grp_start_0" -> "grp_running_0" [ style = bold]
+"grp_start_0" -> "vip_start_0 rhel8-3" [ style = bold]
+"grp_start_0" [ style=bold color="green" fontcolor="orange"]
+"grp_stop_0" -> "bar_stop_0 rhel8-4" [ style = bold]
+"grp_stop_0" -> "foo_stop_0 rhel8-4" [ style = bold]
+"grp_stop_0" -> "grp_stopped_0" [ style = bold]
+"grp_stop_0" -> "vip_stop_0 rhel8-3" [ style = bold]
+"grp_stop_0" [ style=bold color="green" fontcolor="orange"]
+"grp_stopped_0" -> "grp_start_0" [ style = bold]
+"grp_stopped_0" [ style=bold color="green" fontcolor="orange"]
+"vip_monitor_10000 rhel8-3" [ style=bold color="green" fontcolor="black"]
+"vip_start_0 rhel8-3" -> "grp_running_0" [ style = bold]
+"vip_start_0 rhel8-3" -> "vip_monitor_10000 rhel8-3" [ style = bold]
+"vip_start_0 rhel8-3" [ style=bold color="green" fontcolor="black"]
+"vip_stop_0 rhel8-3" -> "bar_stop_0 rhel8-4" [ style = bold]
+"vip_stop_0 rhel8-3" -> "grp_stopped_0" [ style = bold]
+"vip_stop_0 rhel8-3" -> "vip_start_0 rhel8-3" [ style = bold]
+"vip_stop_0 rhel8-3" [ style=bold color="green" fontcolor="black"]
+}
diff --git a/cts/scheduler/dot/group-anticolocation-2.dot b/cts/scheduler/dot/group-anticolocation-2.dot
new file mode 100644
index 0000000..def3b8b
--- /dev/null
+++ b/cts/scheduler/dot/group-anticolocation-2.dot
@@ -0,0 +1,29 @@
+ digraph "g" {
+"group2_running_0" [ style=bold color="green" fontcolor="orange"]
+"group2_start_0" -> "group2_running_0" [ style = bold]
+"group2_start_0" -> "member2a_start_0 node2" [ style = bold]
+"group2_start_0" -> "member2b_start_0 node2" [ style = bold]
+"group2_start_0" [ style=bold color="green" fontcolor="orange"]
+"group2_stop_0" -> "group2_stopped_0" [ style = bold]
+"group2_stop_0" -> "member2a_stop_0 node1" [ style = bold]
+"group2_stop_0" -> "member2b_stop_0 node1" [ style = bold]
+"group2_stop_0" [ style=bold color="green" fontcolor="orange"]
+"group2_stopped_0" -> "group2_start_0" [ style = bold]
+"group2_stopped_0" [ style=bold color="green" fontcolor="orange"]
+"member2a_monitor_10000 node2" [ style=bold color="green" fontcolor="black"]
+"member2a_start_0 node2" -> "group2_running_0" [ style = bold]
+"member2a_start_0 node2" -> "member2a_monitor_10000 node2" [ style = bold]
+"member2a_start_0 node2" -> "member2b_start_0 node2" [ style = bold]
+"member2a_start_0 node2" [ style=bold color="green" fontcolor="black"]
+"member2a_stop_0 node1" -> "group2_stopped_0" [ style = bold]
+"member2a_stop_0 node1" -> "member2a_start_0 node2" [ style = bold]
+"member2a_stop_0 node1" [ style=bold color="green" fontcolor="black"]
+"member2b_monitor_10000 node2" [ style=bold color="green" fontcolor="black"]
+"member2b_start_0 node2" -> "group2_running_0" [ style = bold]
+"member2b_start_0 node2" -> "member2b_monitor_10000 node2" [ style = bold]
+"member2b_start_0 node2" [ style=bold color="green" fontcolor="black"]
+"member2b_stop_0 node1" -> "group2_stopped_0" [ style = bold]
+"member2b_stop_0 node1" -> "member2a_stop_0 node1" [ style = bold]
+"member2b_stop_0 node1" -> "member2b_start_0 node2" [ style = bold]
+"member2b_stop_0 node1" [ style=bold color="green" fontcolor="black"]
+}
diff --git a/cts/scheduler/dot/group-anticolocation-3.dot b/cts/scheduler/dot/group-anticolocation-3.dot
new file mode 100644
index 0000000..4886650
--- /dev/null
+++ b/cts/scheduler/dot/group-anticolocation-3.dot
@@ -0,0 +1,8 @@
+ digraph "g" {
+"group2_stop_0" -> "group2_stopped_0" [ style = bold]
+"group2_stop_0" -> "member2b_stop_0 node1" [ style = bold]
+"group2_stop_0" [ style=bold color="green" fontcolor="orange"]
+"group2_stopped_0" [ style=bold color="green" fontcolor="orange"]
+"member2b_stop_0 node1" -> "group2_stopped_0" [ style = bold]
+"member2b_stop_0 node1" [ style=bold color="green" fontcolor="black"]
+}
diff --git a/cts/scheduler/dot/group-anticolocation-4.dot b/cts/scheduler/dot/group-anticolocation-4.dot
new file mode 100644
index 0000000..def3b8b
--- /dev/null
+++ b/cts/scheduler/dot/group-anticolocation-4.dot
@@ -0,0 +1,29 @@
+ digraph "g" {
+"group2_running_0" [ style=bold color="green" fontcolor="orange"]
+"group2_start_0" -> "group2_running_0" [ style = bold]
+"group2_start_0" -> "member2a_start_0 node2" [ style = bold]
+"group2_start_0" -> "member2b_start_0 node2" [ style = bold]
+"group2_start_0" [ style=bold color="green" fontcolor="orange"]
+"group2_stop_0" -> "group2_stopped_0" [ style = bold]
+"group2_stop_0" -> "member2a_stop_0 node1" [ style = bold]
+"group2_stop_0" -> "member2b_stop_0 node1" [ style = bold]
+"group2_stop_0" [ style=bold color="green" fontcolor="orange"]
+"group2_stopped_0" -> "group2_start_0" [ style = bold]
+"group2_stopped_0" [ style=bold color="green" fontcolor="orange"]
+"member2a_monitor_10000 node2" [ style=bold color="green" fontcolor="black"]
+"member2a_start_0 node2" -> "group2_running_0" [ style = bold]
+"member2a_start_0 node2" -> "member2a_monitor_10000 node2" [ style = bold]
+"member2a_start_0 node2" -> "member2b_start_0 node2" [ style = bold]
+"member2a_start_0 node2" [ style=bold color="green" fontcolor="black"]
+"member2a_stop_0 node1" -> "group2_stopped_0" [ style = bold]
+"member2a_stop_0 node1" -> "member2a_start_0 node2" [ style = bold]
+"member2a_stop_0 node1" [ style=bold color="green" fontcolor="black"]
+"member2b_monitor_10000 node2" [ style=bold color="green" fontcolor="black"]
+"member2b_start_0 node2" -> "group2_running_0" [ style = bold]
+"member2b_start_0 node2" -> "member2b_monitor_10000 node2" [ style = bold]
+"member2b_start_0 node2" [ style=bold color="green" fontcolor="black"]
+"member2b_stop_0 node1" -> "group2_stopped_0" [ style = bold]
+"member2b_stop_0 node1" -> "member2a_stop_0 node1" [ style = bold]
+"member2b_stop_0 node1" -> "member2b_start_0 node2" [ style = bold]
+"member2b_stop_0 node1" [ style=bold color="green" fontcolor="black"]
+}
diff --git a/cts/scheduler/dot/group-anticolocation-5.dot b/cts/scheduler/dot/group-anticolocation-5.dot
new file mode 100644
index 0000000..c30fd94
--- /dev/null
+++ b/cts/scheduler/dot/group-anticolocation-5.dot
@@ -0,0 +1,29 @@
+ digraph "g" {
+"group2_running_0" [ style=bold color="green" fontcolor="orange"]
+"group2_start_0" -> "group2_running_0" [ style = bold]
+"group2_start_0" -> "member2a_start_0 node3" [ style = bold]
+"group2_start_0" -> "member2b_start_0 node3" [ style = bold]
+"group2_start_0" [ style=bold color="green" fontcolor="orange"]
+"group2_stop_0" -> "group2_stopped_0" [ style = bold]
+"group2_stop_0" -> "member2a_stop_0 node1" [ style = bold]
+"group2_stop_0" -> "member2b_stop_0 node1" [ style = bold]
+"group2_stop_0" [ style=bold color="green" fontcolor="orange"]
+"group2_stopped_0" -> "group2_start_0" [ style = bold]
+"group2_stopped_0" [ style=bold color="green" fontcolor="orange"]
+"member2a_monitor_10000 node3" [ style=bold color="green" fontcolor="black"]
+"member2a_start_0 node3" -> "group2_running_0" [ style = bold]
+"member2a_start_0 node3" -> "member2a_monitor_10000 node3" [ style = bold]
+"member2a_start_0 node3" -> "member2b_start_0 node3" [ style = bold]
+"member2a_start_0 node3" [ style=bold color="green" fontcolor="black"]
+"member2a_stop_0 node1" -> "group2_stopped_0" [ style = bold]
+"member2a_stop_0 node1" -> "member2a_start_0 node3" [ style = bold]
+"member2a_stop_0 node1" [ style=bold color="green" fontcolor="black"]
+"member2b_monitor_10000 node3" [ style=bold color="green" fontcolor="black"]
+"member2b_start_0 node3" -> "group2_running_0" [ style = bold]
+"member2b_start_0 node3" -> "member2b_monitor_10000 node3" [ style = bold]
+"member2b_start_0 node3" [ style=bold color="green" fontcolor="black"]
+"member2b_stop_0 node1" -> "group2_stopped_0" [ style = bold]
+"member2b_stop_0 node1" -> "member2a_stop_0 node1" [ style = bold]
+"member2b_stop_0 node1" -> "member2b_start_0 node3" [ style = bold]
+"member2b_stop_0 node1" [ style=bold color="green" fontcolor="black"]
+}
diff --git a/cts/scheduler/dot/group-anticolocation.dot b/cts/scheduler/dot/group-anticolocation.dot
index def3b8b..6454c12 100644
--- a/cts/scheduler/dot/group-anticolocation.dot
+++ b/cts/scheduler/dot/group-anticolocation.dot
@@ -1,4 +1,15 @@
digraph "g" {
+"group1_running_0" [ style=bold color="green" fontcolor="orange"]
+"group1_start_0" -> "group1_running_0" [ style = bold]
+"group1_start_0" -> "member1a_start_0 node1" [ style = bold]
+"group1_start_0" -> "member1b_start_0 node1" [ style = bold]
+"group1_start_0" [ style=bold color="green" fontcolor="orange"]
+"group1_stop_0" -> "group1_stopped_0" [ style = bold]
+"group1_stop_0" -> "member1a_stop_0 node2" [ style = bold]
+"group1_stop_0" -> "member1b_stop_0 node2" [ style = bold]
+"group1_stop_0" [ style=bold color="green" fontcolor="orange"]
+"group1_stopped_0" -> "group1_start_0" [ style = bold]
+"group1_stopped_0" [ style=bold color="green" fontcolor="orange"]
"group2_running_0" [ style=bold color="green" fontcolor="orange"]
"group2_start_0" -> "group2_running_0" [ style = bold]
"group2_start_0" -> "member2a_start_0 node2" [ style = bold]
@@ -10,6 +21,22 @@
"group2_stop_0" [ style=bold color="green" fontcolor="orange"]
"group2_stopped_0" -> "group2_start_0" [ style = bold]
"group2_stopped_0" [ style=bold color="green" fontcolor="orange"]
+"member1a_monitor_10000 node1" [ style=bold color="green" fontcolor="black"]
+"member1a_start_0 node1" -> "group1_running_0" [ style = bold]
+"member1a_start_0 node1" -> "member1a_monitor_10000 node1" [ style = bold]
+"member1a_start_0 node1" -> "member1b_start_0 node1" [ style = bold]
+"member1a_start_0 node1" [ style=bold color="green" fontcolor="black"]
+"member1a_stop_0 node2" -> "group1_stopped_0" [ style = bold]
+"member1a_stop_0 node2" -> "member1a_start_0 node1" [ style = bold]
+"member1a_stop_0 node2" [ style=bold color="green" fontcolor="black"]
+"member1b_monitor_10000 node1" [ style=bold color="green" fontcolor="black"]
+"member1b_start_0 node1" -> "group1_running_0" [ style = bold]
+"member1b_start_0 node1" -> "member1b_monitor_10000 node1" [ style = bold]
+"member1b_start_0 node1" [ style=bold color="green" fontcolor="black"]
+"member1b_stop_0 node2" -> "group1_stopped_0" [ style = bold]
+"member1b_stop_0 node2" -> "member1a_stop_0 node2" [ style = bold]
+"member1b_stop_0 node2" -> "member1b_start_0 node1" [ style = bold]
+"member1b_stop_0 node2" [ style=bold color="green" fontcolor="black"]
"member2a_monitor_10000 node2" [ style=bold color="green" fontcolor="black"]
"member2a_start_0 node2" -> "group2_running_0" [ style = bold]
"member2a_start_0 node2" -> "member2a_monitor_10000 node2" [ style = bold]
diff --git a/cts/scheduler/dot/guest-host-not-fenceable.dot b/cts/scheduler/dot/guest-host-not-fenceable.dot
index a086fcb..0b6eeae 100644
--- a/cts/scheduler/dot/guest-host-not-fenceable.dot
+++ b/cts/scheduler/dot/guest-host-not-fenceable.dot
@@ -111,6 +111,7 @@
"galera_start_0 galera-bundle-0" -> "galera-bundle-master_running_0" [ style = dashed]
"galera_start_0 galera-bundle-0" -> "galera_monitor_10000 galera-bundle-0" [ style = dashed]
"galera_start_0 galera-bundle-0" -> "galera_start_0 galera-bundle-1" [ style = dashed]
+"galera_start_0 galera-bundle-0" -> "galera_start_0 galera-bundle-2" [ style = dashed]
"galera_start_0 galera-bundle-0" [ style=dashed color="red" fontcolor="black"]
"galera_start_0 galera-bundle-1" -> "galera-bundle-master_running_0" [ style = dashed]
"galera_start_0 galera-bundle-1" -> "galera_monitor_20000 galera-bundle-1" [ style = dashed]
@@ -131,6 +132,7 @@
"galera_stop_0 galera-bundle-1" [ style=dashed color="red" fontcolor="black"]
"galera_stop_0 galera-bundle-2" -> "galera-bundle-master_stopped_0" [ style = dashed]
"galera_stop_0 galera-bundle-2" -> "galera_start_0 galera-bundle-2" [ style = dashed]
+"galera_stop_0 galera-bundle-2" -> "galera_stop_0 galera-bundle-0" [ style = dashed]
"galera_stop_0 galera-bundle-2" -> "galera_stop_0 galera-bundle-1" [ style = dashed]
"galera_stop_0 galera-bundle-2" [ style=dashed color="red" fontcolor="black"]
"rabbitmq-bundle-0_monitor_30000 node1" [ style=dashed color="red" fontcolor="black"]
@@ -233,6 +235,7 @@
"rabbitmq_start_0 rabbitmq-bundle-0" -> "rabbitmq-bundle-clone_running_0" [ style = dashed]
"rabbitmq_start_0 rabbitmq-bundle-0" -> "rabbitmq_monitor_10000 rabbitmq-bundle-0" [ style = dashed]
"rabbitmq_start_0 rabbitmq-bundle-0" -> "rabbitmq_start_0 rabbitmq-bundle-1" [ style = dashed]
+"rabbitmq_start_0 rabbitmq-bundle-0" -> "rabbitmq_start_0 rabbitmq-bundle-2" [ style = dashed]
"rabbitmq_start_0 rabbitmq-bundle-0" [ style=dashed color="red" fontcolor="black"]
"rabbitmq_start_0 rabbitmq-bundle-1" -> "rabbitmq-bundle-clone_running_0" [ style = dashed]
"rabbitmq_start_0 rabbitmq-bundle-1" -> "rabbitmq_monitor_10000 rabbitmq-bundle-1" [ style = dashed]
@@ -251,6 +254,7 @@
"rabbitmq_stop_0 rabbitmq-bundle-1" [ style=dashed color="red" fontcolor="black"]
"rabbitmq_stop_0 rabbitmq-bundle-2" -> "rabbitmq-bundle-clone_stopped_0" [ style = dashed]
"rabbitmq_stop_0 rabbitmq-bundle-2" -> "rabbitmq_start_0 rabbitmq-bundle-2" [ style = dashed]
+"rabbitmq_stop_0 rabbitmq-bundle-2" -> "rabbitmq_stop_0 rabbitmq-bundle-0" [ style = dashed]
"rabbitmq_stop_0 rabbitmq-bundle-2" -> "rabbitmq_stop_0 rabbitmq-bundle-1" [ style = dashed]
"rabbitmq_stop_0 rabbitmq-bundle-2" [ style=dashed color="red" fontcolor="black"]
"stonith-fence_ipmilan-node1_stop_0 node2" [ style=dashed color="red" fontcolor="black"]
diff --git a/cts/scheduler/dot/inc4.dot b/cts/scheduler/dot/inc4.dot
index be3e1b3..620a845 100644
--- a/cts/scheduler/dot/inc4.dot
+++ b/cts/scheduler/dot/inc4.dot
@@ -24,10 +24,12 @@
"child_rsc1:3_stop_0 node1" -> "child_rsc1:3_start_0 node2" [ style = bold]
"child_rsc1:3_stop_0 node1" -> "rsc1_stopped_0" [ style = bold]
"child_rsc1:3_stop_0 node1" [ style=bold color="green" fontcolor="black"]
+"child_rsc1:4_monitor_0 node2" -> "child_rsc1:2_stop_0 node1" [ style = bold]
"child_rsc1:4_monitor_0 node2" -> "child_rsc1:3_stop_0 node1" [ style = bold]
"child_rsc1:4_monitor_0 node2" -> "rsc1_start_0" [ style = bold]
"child_rsc1:4_monitor_0 node2" -> "rsc1_stopped_0" [ style = bold]
"child_rsc1:4_monitor_0 node2" [ style=bold color="green" fontcolor="black"]
+"child_rsc1:4_stop_0 node1" -> "child_rsc1:2_stop_0 node1" [ style = bold]
"child_rsc1:4_stop_0 node1" -> "child_rsc1:3_stop_0 node1" [ style = bold]
"child_rsc1:4_stop_0 node1" -> "rsc1_stopped_0" [ style = bold]
"child_rsc1:4_stop_0 node1" [ style=bold color="green" fontcolor="black"]
diff --git a/cts/scheduler/dot/node-pending-timeout.dot b/cts/scheduler/dot/node-pending-timeout.dot
new file mode 100644
index 0000000..c808f7e
--- /dev/null
+++ b/cts/scheduler/dot/node-pending-timeout.dot
@@ -0,0 +1,7 @@
+ digraph "g" {
+"st-sbd_monitor_0 node-1" -> "st-sbd_start_0 node-1" [ style = bold]
+"st-sbd_monitor_0 node-1" [ style=bold color="green" fontcolor="black"]
+"st-sbd_start_0 node-1" [ style=bold color="green" fontcolor="black"]
+"stonith 'reboot' node-2" -> "st-sbd_start_0 node-1" [ style = bold]
+"stonith 'reboot' node-2" [ style=bold color="green" fontcolor="black"]
+}
diff --git a/cts/scheduler/dot/order-clone.dot b/cts/scheduler/dot/order-clone.dot
index 5aee990..e1b2a1a 100644
--- a/cts/scheduler/dot/order-clone.dot
+++ b/cts/scheduler/dot/order-clone.dot
@@ -9,9 +9,12 @@
"clvm-clone_start_0" [ style=dashed color="red" fontcolor="orange"]
"clvm:0_start_0 hex-7" -> "clvm-clone_running_0" [ style = dashed]
"clvm:0_start_0 hex-7" -> "clvm:1_start_0 hex-8" [ style = dashed]
+"clvm:0_start_0 hex-7" -> "clvm:2_start_0 hex-9" [ style = dashed]
+"clvm:0_start_0 hex-7" -> "clvm:3_start_0 hex-0" [ style = dashed]
"clvm:0_start_0 hex-7" [ style=dashed color="red" fontcolor="black"]
"clvm:1_start_0 hex-8" -> "clvm-clone_running_0" [ style = dashed]
"clvm:1_start_0 hex-8" -> "clvm:2_start_0 hex-9" [ style = dashed]
+"clvm:1_start_0 hex-8" -> "clvm:3_start_0 hex-0" [ style = dashed]
"clvm:1_start_0 hex-8" [ style=dashed color="red" fontcolor="black"]
"clvm:2_start_0 hex-9" -> "clvm-clone_running_0" [ style = dashed]
"clvm:2_start_0 hex-9" -> "clvm:3_start_0 hex-0" [ style = dashed]
diff --git a/cts/scheduler/dot/pending-node-no-uname.dot b/cts/scheduler/dot/pending-node-no-uname.dot
new file mode 100644
index 0000000..98783ca
--- /dev/null
+++ b/cts/scheduler/dot/pending-node-no-uname.dot
@@ -0,0 +1,7 @@
+ digraph "g" {
+"st-sbd_monitor_0 node-1" -> "st-sbd_start_0 node-1" [ style = dashed]
+"st-sbd_monitor_0 node-1" [ style=bold color="green" fontcolor="black"]
+"st-sbd_monitor_0 node-2" -> "st-sbd_start_0 node-1" [ style = dashed]
+"st-sbd_monitor_0 node-2" [ style=dashed color="red" fontcolor="black"]
+"st-sbd_start_0 node-1" [ style=dashed color="red" fontcolor="black"]
+}
diff --git a/cts/scheduler/dot/promoted-ordering.dot b/cts/scheduler/dot/promoted-ordering.dot
index a4ada9c..9b5033b 100644
--- a/cts/scheduler/dot/promoted-ordering.dot
+++ b/cts/scheduler/dot/promoted-ordering.dot
@@ -24,20 +24,20 @@
"extip_2_start_0 webcluster01" [ style=bold color="green" fontcolor="black"]
"fs_mysql_monitor_0 webcluster01" [ style=bold color="green" fontcolor="black"]
"intip_0_main_monitor_0 webcluster01" [ style=bold color="green" fontcolor="black"]
-"intip_1_master_monitor_0 webcluster01" -> "intip_1_master_start_0 webcluster01" [ style = bold]
-"intip_1_master_monitor_0 webcluster01" [ style=bold color="green" fontcolor="black"]
-"intip_1_master_monitor_30000 webcluster01" [ style=bold color="green" fontcolor="black"]
-"intip_1_master_start_0 webcluster01" -> "intip_1_master_monitor_30000 webcluster01" [ style = bold]
-"intip_1_master_start_0 webcluster01" -> "ms_drbd_mysql_start_0" [ style = bold]
-"intip_1_master_start_0 webcluster01" -> "ms_drbd_www_start_0" [ style = bold]
-"intip_1_master_start_0 webcluster01" [ style=bold color="green" fontcolor="black"]
-"intip_2_slave_monitor_0 webcluster01" -> "intip_2_slave_start_0 webcluster01" [ style = bold]
-"intip_2_slave_monitor_0 webcluster01" [ style=bold color="green" fontcolor="black"]
-"intip_2_slave_monitor_30000 webcluster01" [ style=bold color="green" fontcolor="black"]
-"intip_2_slave_start_0 webcluster01" -> "intip_2_slave_monitor_30000 webcluster01" [ style = bold]
-"intip_2_slave_start_0 webcluster01" -> "ms_drbd_mysql_start_0" [ style = bold]
-"intip_2_slave_start_0 webcluster01" -> "ms_drbd_www_start_0" [ style = bold]
-"intip_2_slave_start_0 webcluster01" [ style=bold color="green" fontcolor="black"]
+"intip_1_active_monitor_0 webcluster01" -> "intip_1_active_start_0 webcluster01" [ style = bold]
+"intip_1_active_monitor_0 webcluster01" [ style=bold color="green" fontcolor="black"]
+"intip_1_active_monitor_30000 webcluster01" [ style=bold color="green" fontcolor="black"]
+"intip_1_active_start_0 webcluster01" -> "intip_1_active_monitor_30000 webcluster01" [ style = bold]
+"intip_1_active_start_0 webcluster01" -> "ms_drbd_mysql_start_0" [ style = bold]
+"intip_1_active_start_0 webcluster01" -> "ms_drbd_www_start_0" [ style = bold]
+"intip_1_active_start_0 webcluster01" [ style=bold color="green" fontcolor="black"]
+"intip_2_passive_monitor_0 webcluster01" -> "intip_2_passive_start_0 webcluster01" [ style = bold]
+"intip_2_passive_monitor_0 webcluster01" [ style=bold color="green" fontcolor="black"]
+"intip_2_passive_monitor_30000 webcluster01" [ style=bold color="green" fontcolor="black"]
+"intip_2_passive_start_0 webcluster01" -> "intip_2_passive_monitor_30000 webcluster01" [ style = bold]
+"intip_2_passive_start_0 webcluster01" -> "ms_drbd_mysql_start_0" [ style = bold]
+"intip_2_passive_start_0 webcluster01" -> "ms_drbd_www_start_0" [ style = bold]
+"intip_2_passive_start_0 webcluster01" [ style=bold color="green" fontcolor="black"]
"ms_drbd_mysql_confirmed-post_notify_running_0" [ style=bold color="green" fontcolor="orange"]
"ms_drbd_mysql_confirmed-pre_notify_start_0" -> "ms_drbd_mysql_post_notify_running_0" [ style = bold]
"ms_drbd_mysql_confirmed-pre_notify_start_0" -> "ms_drbd_mysql_start_0" [ style = bold]
diff --git a/cts/scheduler/dot/promoted-probed-score.dot b/cts/scheduler/dot/promoted-probed-score.dot
index a382979..f93648f 100644
--- a/cts/scheduler/dot/promoted-probed-score.dot
+++ b/cts/scheduler/dot/promoted-probed-score.dot
@@ -1,11 +1,11 @@
digraph "g" {
-"AdminClone_confirmed-post_notify_promoted_0" -> "AdminDrbd:0_monitor_59000 hypatia-corosync.nevis.columbia.edu" [ style = bold]
-"AdminClone_confirmed-post_notify_promoted_0" -> "AdminDrbd:1_monitor_59000 orestes-corosync.nevis.columbia.edu" [ style = bold]
+"AdminClone_confirmed-post_notify_promoted_0" -> "AdminDrbd:0_monitor_59000 orestes-corosync.nevis.columbia.edu" [ style = bold]
+"AdminClone_confirmed-post_notify_promoted_0" -> "AdminDrbd:1_monitor_59000 hypatia-corosync.nevis.columbia.edu" [ style = bold]
"AdminClone_confirmed-post_notify_promoted_0" -> "FilesystemClone_start_0" [ style = bold]
"AdminClone_confirmed-post_notify_promoted_0" [ style=bold color="green" fontcolor="orange"]
"AdminClone_confirmed-post_notify_running_0" -> "AdminClone_pre_notify_promote_0" [ style = bold]
-"AdminClone_confirmed-post_notify_running_0" -> "AdminDrbd:0_monitor_59000 hypatia-corosync.nevis.columbia.edu" [ style = bold]
-"AdminClone_confirmed-post_notify_running_0" -> "AdminDrbd:1_monitor_59000 orestes-corosync.nevis.columbia.edu" [ style = bold]
+"AdminClone_confirmed-post_notify_running_0" -> "AdminDrbd:0_monitor_59000 orestes-corosync.nevis.columbia.edu" [ style = bold]
+"AdminClone_confirmed-post_notify_running_0" -> "AdminDrbd:1_monitor_59000 hypatia-corosync.nevis.columbia.edu" [ style = bold]
"AdminClone_confirmed-post_notify_running_0" [ style=bold color="green" fontcolor="orange"]
"AdminClone_confirmed-pre_notify_promote_0" -> "AdminClone_post_notify_promoted_0" [ style = bold]
"AdminClone_confirmed-pre_notify_promote_0" -> "AdminClone_promote_0" [ style = bold]
@@ -14,21 +14,21 @@
"AdminClone_confirmed-pre_notify_start_0" -> "AdminClone_start_0" [ style = bold]
"AdminClone_confirmed-pre_notify_start_0" [ style=bold color="green" fontcolor="orange"]
"AdminClone_post_notify_promoted_0" -> "AdminClone_confirmed-post_notify_promoted_0" [ style = bold]
-"AdminClone_post_notify_promoted_0" -> "AdminDrbd:0_post_notify_promote_0 hypatia-corosync.nevis.columbia.edu" [ style = bold]
-"AdminClone_post_notify_promoted_0" -> "AdminDrbd:1_post_notify_promote_0 orestes-corosync.nevis.columbia.edu" [ style = bold]
+"AdminClone_post_notify_promoted_0" -> "AdminDrbd:0_post_notify_promote_0 orestes-corosync.nevis.columbia.edu" [ style = bold]
+"AdminClone_post_notify_promoted_0" -> "AdminDrbd:1_post_notify_promote_0 hypatia-corosync.nevis.columbia.edu" [ style = bold]
"AdminClone_post_notify_promoted_0" [ style=bold color="green" fontcolor="orange"]
"AdminClone_post_notify_running_0" -> "AdminClone_confirmed-post_notify_running_0" [ style = bold]
-"AdminClone_post_notify_running_0" -> "AdminDrbd:0_post_notify_start_0 hypatia-corosync.nevis.columbia.edu" [ style = bold]
-"AdminClone_post_notify_running_0" -> "AdminDrbd:1_post_notify_start_0 orestes-corosync.nevis.columbia.edu" [ style = bold]
+"AdminClone_post_notify_running_0" -> "AdminDrbd:0_post_notify_start_0 orestes-corosync.nevis.columbia.edu" [ style = bold]
+"AdminClone_post_notify_running_0" -> "AdminDrbd:1_post_notify_start_0 hypatia-corosync.nevis.columbia.edu" [ style = bold]
"AdminClone_post_notify_running_0" [ style=bold color="green" fontcolor="orange"]
"AdminClone_pre_notify_promote_0" -> "AdminClone_confirmed-pre_notify_promote_0" [ style = bold]
-"AdminClone_pre_notify_promote_0" -> "AdminDrbd:0_pre_notify_promote_0 hypatia-corosync.nevis.columbia.edu" [ style = bold]
-"AdminClone_pre_notify_promote_0" -> "AdminDrbd:1_pre_notify_promote_0 orestes-corosync.nevis.columbia.edu" [ style = bold]
+"AdminClone_pre_notify_promote_0" -> "AdminDrbd:0_pre_notify_promote_0 orestes-corosync.nevis.columbia.edu" [ style = bold]
+"AdminClone_pre_notify_promote_0" -> "AdminDrbd:1_pre_notify_promote_0 hypatia-corosync.nevis.columbia.edu" [ style = bold]
"AdminClone_pre_notify_promote_0" [ style=bold color="green" fontcolor="orange"]
"AdminClone_pre_notify_start_0" -> "AdminClone_confirmed-pre_notify_start_0" [ style = bold]
"AdminClone_pre_notify_start_0" [ style=bold color="green" fontcolor="orange"]
-"AdminClone_promote_0" -> "AdminDrbd:0_promote_0 hypatia-corosync.nevis.columbia.edu" [ style = bold]
-"AdminClone_promote_0" -> "AdminDrbd:1_promote_0 orestes-corosync.nevis.columbia.edu" [ style = bold]
+"AdminClone_promote_0" -> "AdminDrbd:0_promote_0 orestes-corosync.nevis.columbia.edu" [ style = bold]
+"AdminClone_promote_0" -> "AdminDrbd:1_promote_0 hypatia-corosync.nevis.columbia.edu" [ style = bold]
"AdminClone_promote_0" [ style=bold color="green" fontcolor="orange"]
"AdminClone_promoted_0" -> "AdminClone_post_notify_promoted_0" [ style = bold]
"AdminClone_promoted_0" [ style=bold color="green" fontcolor="orange"]
@@ -36,53 +36,53 @@
"AdminClone_running_0" -> "AdminClone_promote_0" [ style = bold]
"AdminClone_running_0" [ style=bold color="green" fontcolor="orange"]
"AdminClone_start_0" -> "AdminClone_running_0" [ style = bold]
-"AdminClone_start_0" -> "AdminDrbd:0_start_0 hypatia-corosync.nevis.columbia.edu" [ style = bold]
-"AdminClone_start_0" -> "AdminDrbd:1_start_0 orestes-corosync.nevis.columbia.edu" [ style = bold]
+"AdminClone_start_0" -> "AdminDrbd:0_start_0 orestes-corosync.nevis.columbia.edu" [ style = bold]
+"AdminClone_start_0" -> "AdminDrbd:1_start_0 hypatia-corosync.nevis.columbia.edu" [ style = bold]
"AdminClone_start_0" [ style=bold color="green" fontcolor="orange"]
-"AdminDrbd:0_monitor_59000 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
-"AdminDrbd:0_post_notify_promote_0 hypatia-corosync.nevis.columbia.edu" -> "AdminClone_confirmed-post_notify_promoted_0" [ style = bold]
-"AdminDrbd:0_post_notify_promote_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
-"AdminDrbd:0_post_notify_start_0 hypatia-corosync.nevis.columbia.edu" -> "AdminClone_confirmed-post_notify_running_0" [ style = bold]
-"AdminDrbd:0_post_notify_start_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
-"AdminDrbd:0_pre_notify_promote_0 hypatia-corosync.nevis.columbia.edu" -> "AdminClone_confirmed-pre_notify_promote_0" [ style = bold]
-"AdminDrbd:0_pre_notify_promote_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
-"AdminDrbd:0_promote_0 hypatia-corosync.nevis.columbia.edu" -> "AdminClone_promoted_0" [ style = bold]
-"AdminDrbd:0_promote_0 hypatia-corosync.nevis.columbia.edu" -> "AdminDrbd:0_monitor_59000 hypatia-corosync.nevis.columbia.edu" [ style = bold]
-"AdminDrbd:0_promote_0 hypatia-corosync.nevis.columbia.edu" -> "FilesystemGroup:0_start_0" [ style = bold]
-"AdminDrbd:0_promote_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
-"AdminDrbd:0_start_0 hypatia-corosync.nevis.columbia.edu" -> "AdminClone_running_0" [ style = bold]
-"AdminDrbd:0_start_0 hypatia-corosync.nevis.columbia.edu" -> "AdminDrbd:0_monitor_59000 hypatia-corosync.nevis.columbia.edu" [ style = bold]
-"AdminDrbd:0_start_0 hypatia-corosync.nevis.columbia.edu" -> "AdminDrbd:0_promote_0 hypatia-corosync.nevis.columbia.edu" [ style = bold]
-"AdminDrbd:0_start_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
-"AdminDrbd:1_monitor_59000 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
-"AdminDrbd:1_post_notify_promote_0 orestes-corosync.nevis.columbia.edu" -> "AdminClone_confirmed-post_notify_promoted_0" [ style = bold]
-"AdminDrbd:1_post_notify_promote_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
-"AdminDrbd:1_post_notify_start_0 orestes-corosync.nevis.columbia.edu" -> "AdminClone_confirmed-post_notify_running_0" [ style = bold]
-"AdminDrbd:1_post_notify_start_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
-"AdminDrbd:1_pre_notify_promote_0 orestes-corosync.nevis.columbia.edu" -> "AdminClone_confirmed-pre_notify_promote_0" [ style = bold]
-"AdminDrbd:1_pre_notify_promote_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
-"AdminDrbd:1_promote_0 orestes-corosync.nevis.columbia.edu" -> "AdminClone_promoted_0" [ style = bold]
-"AdminDrbd:1_promote_0 orestes-corosync.nevis.columbia.edu" -> "AdminDrbd:1_monitor_59000 orestes-corosync.nevis.columbia.edu" [ style = bold]
-"AdminDrbd:1_promote_0 orestes-corosync.nevis.columbia.edu" -> "FilesystemGroup:1_start_0" [ style = bold]
-"AdminDrbd:1_promote_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
-"AdminDrbd:1_start_0 orestes-corosync.nevis.columbia.edu" -> "AdminClone_running_0" [ style = bold]
-"AdminDrbd:1_start_0 orestes-corosync.nevis.columbia.edu" -> "AdminDrbd:1_monitor_59000 orestes-corosync.nevis.columbia.edu" [ style = bold]
-"AdminDrbd:1_start_0 orestes-corosync.nevis.columbia.edu" -> "AdminDrbd:1_promote_0 orestes-corosync.nevis.columbia.edu" [ style = bold]
-"AdminDrbd:1_start_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
-"AdminLvm:0_monitor_0 hypatia-corosync.nevis.columbia.edu" -> "FilesystemClone_start_0" [ style = bold]
-"AdminLvm:0_monitor_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
-"AdminLvm:0_monitor_30000 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
-"AdminLvm:0_start_0 hypatia-corosync.nevis.columbia.edu" -> "AdminLvm:0_monitor_30000 hypatia-corosync.nevis.columbia.edu" [ style = bold]
-"AdminLvm:0_start_0 hypatia-corosync.nevis.columbia.edu" -> "FSUsrNevis:0_start_0 hypatia-corosync.nevis.columbia.edu" [ style = bold]
-"AdminLvm:0_start_0 hypatia-corosync.nevis.columbia.edu" -> "FilesystemGroup:0_running_0" [ style = bold]
-"AdminLvm:0_start_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
-"AdminLvm:1_monitor_0 orestes-corosync.nevis.columbia.edu" -> "FilesystemClone_start_0" [ style = bold]
-"AdminLvm:1_monitor_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
-"AdminLvm:1_monitor_30000 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
-"AdminLvm:1_start_0 orestes-corosync.nevis.columbia.edu" -> "AdminLvm:1_monitor_30000 orestes-corosync.nevis.columbia.edu" [ style = bold]
-"AdminLvm:1_start_0 orestes-corosync.nevis.columbia.edu" -> "FSUsrNevis:1_start_0 orestes-corosync.nevis.columbia.edu" [ style = bold]
-"AdminLvm:1_start_0 orestes-corosync.nevis.columbia.edu" -> "FilesystemGroup:1_running_0" [ style = bold]
-"AdminLvm:1_start_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
+"AdminDrbd:0_monitor_59000 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
+"AdminDrbd:0_post_notify_promote_0 orestes-corosync.nevis.columbia.edu" -> "AdminClone_confirmed-post_notify_promoted_0" [ style = bold]
+"AdminDrbd:0_post_notify_promote_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
+"AdminDrbd:0_post_notify_start_0 orestes-corosync.nevis.columbia.edu" -> "AdminClone_confirmed-post_notify_running_0" [ style = bold]
+"AdminDrbd:0_post_notify_start_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
+"AdminDrbd:0_pre_notify_promote_0 orestes-corosync.nevis.columbia.edu" -> "AdminClone_confirmed-pre_notify_promote_0" [ style = bold]
+"AdminDrbd:0_pre_notify_promote_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
+"AdminDrbd:0_promote_0 orestes-corosync.nevis.columbia.edu" -> "AdminClone_promoted_0" [ style = bold]
+"AdminDrbd:0_promote_0 orestes-corosync.nevis.columbia.edu" -> "AdminDrbd:0_monitor_59000 orestes-corosync.nevis.columbia.edu" [ style = bold]
+"AdminDrbd:0_promote_0 orestes-corosync.nevis.columbia.edu" -> "FilesystemGroup:0_start_0" [ style = bold]
+"AdminDrbd:0_promote_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
+"AdminDrbd:0_start_0 orestes-corosync.nevis.columbia.edu" -> "AdminClone_running_0" [ style = bold]
+"AdminDrbd:0_start_0 orestes-corosync.nevis.columbia.edu" -> "AdminDrbd:0_monitor_59000 orestes-corosync.nevis.columbia.edu" [ style = bold]
+"AdminDrbd:0_start_0 orestes-corosync.nevis.columbia.edu" -> "AdminDrbd:0_promote_0 orestes-corosync.nevis.columbia.edu" [ style = bold]
+"AdminDrbd:0_start_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
+"AdminDrbd:1_monitor_59000 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
+"AdminDrbd:1_post_notify_promote_0 hypatia-corosync.nevis.columbia.edu" -> "AdminClone_confirmed-post_notify_promoted_0" [ style = bold]
+"AdminDrbd:1_post_notify_promote_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
+"AdminDrbd:1_post_notify_start_0 hypatia-corosync.nevis.columbia.edu" -> "AdminClone_confirmed-post_notify_running_0" [ style = bold]
+"AdminDrbd:1_post_notify_start_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
+"AdminDrbd:1_pre_notify_promote_0 hypatia-corosync.nevis.columbia.edu" -> "AdminClone_confirmed-pre_notify_promote_0" [ style = bold]
+"AdminDrbd:1_pre_notify_promote_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
+"AdminDrbd:1_promote_0 hypatia-corosync.nevis.columbia.edu" -> "AdminClone_promoted_0" [ style = bold]
+"AdminDrbd:1_promote_0 hypatia-corosync.nevis.columbia.edu" -> "AdminDrbd:1_monitor_59000 hypatia-corosync.nevis.columbia.edu" [ style = bold]
+"AdminDrbd:1_promote_0 hypatia-corosync.nevis.columbia.edu" -> "FilesystemGroup:1_start_0" [ style = bold]
+"AdminDrbd:1_promote_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
+"AdminDrbd:1_start_0 hypatia-corosync.nevis.columbia.edu" -> "AdminClone_running_0" [ style = bold]
+"AdminDrbd:1_start_0 hypatia-corosync.nevis.columbia.edu" -> "AdminDrbd:1_monitor_59000 hypatia-corosync.nevis.columbia.edu" [ style = bold]
+"AdminDrbd:1_start_0 hypatia-corosync.nevis.columbia.edu" -> "AdminDrbd:1_promote_0 hypatia-corosync.nevis.columbia.edu" [ style = bold]
+"AdminDrbd:1_start_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
+"AdminLvm:0_monitor_0 orestes-corosync.nevis.columbia.edu" -> "FilesystemClone_start_0" [ style = bold]
+"AdminLvm:0_monitor_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
+"AdminLvm:0_monitor_30000 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
+"AdminLvm:0_start_0 orestes-corosync.nevis.columbia.edu" -> "AdminLvm:0_monitor_30000 orestes-corosync.nevis.columbia.edu" [ style = bold]
+"AdminLvm:0_start_0 orestes-corosync.nevis.columbia.edu" -> "FSUsrNevis:0_start_0 orestes-corosync.nevis.columbia.edu" [ style = bold]
+"AdminLvm:0_start_0 orestes-corosync.nevis.columbia.edu" -> "FilesystemGroup:0_running_0" [ style = bold]
+"AdminLvm:0_start_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
+"AdminLvm:1_monitor_0 hypatia-corosync.nevis.columbia.edu" -> "FilesystemClone_start_0" [ style = bold]
+"AdminLvm:1_monitor_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
+"AdminLvm:1_monitor_30000 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
+"AdminLvm:1_start_0 hypatia-corosync.nevis.columbia.edu" -> "AdminLvm:1_monitor_30000 hypatia-corosync.nevis.columbia.edu" [ style = bold]
+"AdminLvm:1_start_0 hypatia-corosync.nevis.columbia.edu" -> "FSUsrNevis:1_start_0 hypatia-corosync.nevis.columbia.edu" [ style = bold]
+"AdminLvm:1_start_0 hypatia-corosync.nevis.columbia.edu" -> "FilesystemGroup:1_running_0" [ style = bold]
+"AdminLvm:1_start_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
"ClusterIP:0_monitor_30000 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
"ClusterIP:0_start_0 hypatia-corosync.nevis.columbia.edu" -> "ClusterIP:0_monitor_30000 hypatia-corosync.nevis.columbia.edu" [ style = bold]
"ClusterIP:0_start_0 hypatia-corosync.nevis.columbia.edu" -> "ClusterIPLocal:0_start_0 hypatia-corosync.nevis.columbia.edu" [ style = bold]
@@ -259,74 +259,74 @@
"ExportsGroup:1_start_0" -> "ExportWWW:1_start_0 orestes-corosync.nevis.columbia.edu" [ style = bold]
"ExportsGroup:1_start_0" -> "ExportsGroup:1_running_0" [ style = bold]
"ExportsGroup:1_start_0" [ style=bold color="green" fontcolor="orange"]
-"FSMail:0_monitor_0 hypatia-corosync.nevis.columbia.edu" -> "FilesystemClone_start_0" [ style = bold]
-"FSMail:0_monitor_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
-"FSMail:0_monitor_20000 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
-"FSMail:0_start_0 hypatia-corosync.nevis.columbia.edu" -> "FSMail:0_monitor_20000 hypatia-corosync.nevis.columbia.edu" [ style = bold]
-"FSMail:0_start_0 hypatia-corosync.nevis.columbia.edu" -> "FSWork:0_start_0 hypatia-corosync.nevis.columbia.edu" [ style = bold]
-"FSMail:0_start_0 hypatia-corosync.nevis.columbia.edu" -> "FilesystemGroup:0_running_0" [ style = bold]
-"FSMail:0_start_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
-"FSMail:1_monitor_0 orestes-corosync.nevis.columbia.edu" -> "FilesystemClone_start_0" [ style = bold]
-"FSMail:1_monitor_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
-"FSMail:1_monitor_20000 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
-"FSMail:1_start_0 orestes-corosync.nevis.columbia.edu" -> "FSMail:1_monitor_20000 orestes-corosync.nevis.columbia.edu" [ style = bold]
-"FSMail:1_start_0 orestes-corosync.nevis.columbia.edu" -> "FSWork:1_start_0 orestes-corosync.nevis.columbia.edu" [ style = bold]
-"FSMail:1_start_0 orestes-corosync.nevis.columbia.edu" -> "FilesystemGroup:1_running_0" [ style = bold]
-"FSMail:1_start_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
-"FSUsrNevis:0_monitor_0 hypatia-corosync.nevis.columbia.edu" -> "FilesystemClone_start_0" [ style = bold]
-"FSUsrNevis:0_monitor_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
-"FSUsrNevis:0_monitor_20000 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
-"FSUsrNevis:0_start_0 hypatia-corosync.nevis.columbia.edu" -> "FSUsrNevis:0_monitor_20000 hypatia-corosync.nevis.columbia.edu" [ style = bold]
-"FSUsrNevis:0_start_0 hypatia-corosync.nevis.columbia.edu" -> "FSVarNevis:0_start_0 hypatia-corosync.nevis.columbia.edu" [ style = bold]
-"FSUsrNevis:0_start_0 hypatia-corosync.nevis.columbia.edu" -> "FilesystemGroup:0_running_0" [ style = bold]
-"FSUsrNevis:0_start_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
-"FSUsrNevis:1_monitor_0 orestes-corosync.nevis.columbia.edu" -> "FilesystemClone_start_0" [ style = bold]
-"FSUsrNevis:1_monitor_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
-"FSUsrNevis:1_monitor_20000 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
-"FSUsrNevis:1_start_0 orestes-corosync.nevis.columbia.edu" -> "FSUsrNevis:1_monitor_20000 orestes-corosync.nevis.columbia.edu" [ style = bold]
-"FSUsrNevis:1_start_0 orestes-corosync.nevis.columbia.edu" -> "FSVarNevis:1_start_0 orestes-corosync.nevis.columbia.edu" [ style = bold]
-"FSUsrNevis:1_start_0 orestes-corosync.nevis.columbia.edu" -> "FilesystemGroup:1_running_0" [ style = bold]
-"FSUsrNevis:1_start_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
-"FSVarNevis:0_monitor_0 hypatia-corosync.nevis.columbia.edu" -> "FilesystemClone_start_0" [ style = bold]
-"FSVarNevis:0_monitor_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
-"FSVarNevis:0_monitor_20000 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
-"FSVarNevis:0_start_0 hypatia-corosync.nevis.columbia.edu" -> "FSVarNevis:0_monitor_20000 hypatia-corosync.nevis.columbia.edu" [ style = bold]
-"FSVarNevis:0_start_0 hypatia-corosync.nevis.columbia.edu" -> "FSVirtualMachines:0_start_0 hypatia-corosync.nevis.columbia.edu" [ style = bold]
-"FSVarNevis:0_start_0 hypatia-corosync.nevis.columbia.edu" -> "FilesystemGroup:0_running_0" [ style = bold]
-"FSVarNevis:0_start_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
-"FSVarNevis:1_monitor_0 orestes-corosync.nevis.columbia.edu" -> "FilesystemClone_start_0" [ style = bold]
-"FSVarNevis:1_monitor_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
-"FSVarNevis:1_monitor_20000 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
-"FSVarNevis:1_start_0 orestes-corosync.nevis.columbia.edu" -> "FSVarNevis:1_monitor_20000 orestes-corosync.nevis.columbia.edu" [ style = bold]
-"FSVarNevis:1_start_0 orestes-corosync.nevis.columbia.edu" -> "FSVirtualMachines:1_start_0 orestes-corosync.nevis.columbia.edu" [ style = bold]
-"FSVarNevis:1_start_0 orestes-corosync.nevis.columbia.edu" -> "FilesystemGroup:1_running_0" [ style = bold]
-"FSVarNevis:1_start_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
-"FSVirtualMachines:0_monitor_0 hypatia-corosync.nevis.columbia.edu" -> "FilesystemClone_start_0" [ style = bold]
-"FSVirtualMachines:0_monitor_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
-"FSVirtualMachines:0_monitor_20000 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
-"FSVirtualMachines:0_start_0 hypatia-corosync.nevis.columbia.edu" -> "FSMail:0_start_0 hypatia-corosync.nevis.columbia.edu" [ style = bold]
-"FSVirtualMachines:0_start_0 hypatia-corosync.nevis.columbia.edu" -> "FSVirtualMachines:0_monitor_20000 hypatia-corosync.nevis.columbia.edu" [ style = bold]
-"FSVirtualMachines:0_start_0 hypatia-corosync.nevis.columbia.edu" -> "FilesystemGroup:0_running_0" [ style = bold]
-"FSVirtualMachines:0_start_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
-"FSVirtualMachines:1_monitor_0 orestes-corosync.nevis.columbia.edu" -> "FilesystemClone_start_0" [ style = bold]
-"FSVirtualMachines:1_monitor_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
-"FSVirtualMachines:1_monitor_20000 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
-"FSVirtualMachines:1_start_0 orestes-corosync.nevis.columbia.edu" -> "FSMail:1_start_0 orestes-corosync.nevis.columbia.edu" [ style = bold]
-"FSVirtualMachines:1_start_0 orestes-corosync.nevis.columbia.edu" -> "FSVirtualMachines:1_monitor_20000 orestes-corosync.nevis.columbia.edu" [ style = bold]
-"FSVirtualMachines:1_start_0 orestes-corosync.nevis.columbia.edu" -> "FilesystemGroup:1_running_0" [ style = bold]
-"FSVirtualMachines:1_start_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
-"FSWork:0_monitor_0 hypatia-corosync.nevis.columbia.edu" -> "FilesystemClone_start_0" [ style = bold]
-"FSWork:0_monitor_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
-"FSWork:0_monitor_20000 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
-"FSWork:0_start_0 hypatia-corosync.nevis.columbia.edu" -> "FSWork:0_monitor_20000 hypatia-corosync.nevis.columbia.edu" [ style = bold]
-"FSWork:0_start_0 hypatia-corosync.nevis.columbia.edu" -> "FilesystemGroup:0_running_0" [ style = bold]
-"FSWork:0_start_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
-"FSWork:1_monitor_0 orestes-corosync.nevis.columbia.edu" -> "FilesystemClone_start_0" [ style = bold]
-"FSWork:1_monitor_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
-"FSWork:1_monitor_20000 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
-"FSWork:1_start_0 orestes-corosync.nevis.columbia.edu" -> "FSWork:1_monitor_20000 orestes-corosync.nevis.columbia.edu" [ style = bold]
-"FSWork:1_start_0 orestes-corosync.nevis.columbia.edu" -> "FilesystemGroup:1_running_0" [ style = bold]
-"FSWork:1_start_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
+"FSMail:0_monitor_0 orestes-corosync.nevis.columbia.edu" -> "FilesystemClone_start_0" [ style = bold]
+"FSMail:0_monitor_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
+"FSMail:0_monitor_20000 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
+"FSMail:0_start_0 orestes-corosync.nevis.columbia.edu" -> "FSMail:0_monitor_20000 orestes-corosync.nevis.columbia.edu" [ style = bold]
+"FSMail:0_start_0 orestes-corosync.nevis.columbia.edu" -> "FSWork:0_start_0 orestes-corosync.nevis.columbia.edu" [ style = bold]
+"FSMail:0_start_0 orestes-corosync.nevis.columbia.edu" -> "FilesystemGroup:0_running_0" [ style = bold]
+"FSMail:0_start_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
+"FSMail:1_monitor_0 hypatia-corosync.nevis.columbia.edu" -> "FilesystemClone_start_0" [ style = bold]
+"FSMail:1_monitor_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
+"FSMail:1_monitor_20000 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
+"FSMail:1_start_0 hypatia-corosync.nevis.columbia.edu" -> "FSMail:1_monitor_20000 hypatia-corosync.nevis.columbia.edu" [ style = bold]
+"FSMail:1_start_0 hypatia-corosync.nevis.columbia.edu" -> "FSWork:1_start_0 hypatia-corosync.nevis.columbia.edu" [ style = bold]
+"FSMail:1_start_0 hypatia-corosync.nevis.columbia.edu" -> "FilesystemGroup:1_running_0" [ style = bold]
+"FSMail:1_start_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
+"FSUsrNevis:0_monitor_0 orestes-corosync.nevis.columbia.edu" -> "FilesystemClone_start_0" [ style = bold]
+"FSUsrNevis:0_monitor_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
+"FSUsrNevis:0_monitor_20000 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
+"FSUsrNevis:0_start_0 orestes-corosync.nevis.columbia.edu" -> "FSUsrNevis:0_monitor_20000 orestes-corosync.nevis.columbia.edu" [ style = bold]
+"FSUsrNevis:0_start_0 orestes-corosync.nevis.columbia.edu" -> "FSVarNevis:0_start_0 orestes-corosync.nevis.columbia.edu" [ style = bold]
+"FSUsrNevis:0_start_0 orestes-corosync.nevis.columbia.edu" -> "FilesystemGroup:0_running_0" [ style = bold]
+"FSUsrNevis:0_start_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
+"FSUsrNevis:1_monitor_0 hypatia-corosync.nevis.columbia.edu" -> "FilesystemClone_start_0" [ style = bold]
+"FSUsrNevis:1_monitor_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
+"FSUsrNevis:1_monitor_20000 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
+"FSUsrNevis:1_start_0 hypatia-corosync.nevis.columbia.edu" -> "FSUsrNevis:1_monitor_20000 hypatia-corosync.nevis.columbia.edu" [ style = bold]
+"FSUsrNevis:1_start_0 hypatia-corosync.nevis.columbia.edu" -> "FSVarNevis:1_start_0 hypatia-corosync.nevis.columbia.edu" [ style = bold]
+"FSUsrNevis:1_start_0 hypatia-corosync.nevis.columbia.edu" -> "FilesystemGroup:1_running_0" [ style = bold]
+"FSUsrNevis:1_start_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
+"FSVarNevis:0_monitor_0 orestes-corosync.nevis.columbia.edu" -> "FilesystemClone_start_0" [ style = bold]
+"FSVarNevis:0_monitor_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
+"FSVarNevis:0_monitor_20000 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
+"FSVarNevis:0_start_0 orestes-corosync.nevis.columbia.edu" -> "FSVarNevis:0_monitor_20000 orestes-corosync.nevis.columbia.edu" [ style = bold]
+"FSVarNevis:0_start_0 orestes-corosync.nevis.columbia.edu" -> "FSVirtualMachines:0_start_0 orestes-corosync.nevis.columbia.edu" [ style = bold]
+"FSVarNevis:0_start_0 orestes-corosync.nevis.columbia.edu" -> "FilesystemGroup:0_running_0" [ style = bold]
+"FSVarNevis:0_start_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
+"FSVarNevis:1_monitor_0 hypatia-corosync.nevis.columbia.edu" -> "FilesystemClone_start_0" [ style = bold]
+"FSVarNevis:1_monitor_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
+"FSVarNevis:1_monitor_20000 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
+"FSVarNevis:1_start_0 hypatia-corosync.nevis.columbia.edu" -> "FSVarNevis:1_monitor_20000 hypatia-corosync.nevis.columbia.edu" [ style = bold]
+"FSVarNevis:1_start_0 hypatia-corosync.nevis.columbia.edu" -> "FSVirtualMachines:1_start_0 hypatia-corosync.nevis.columbia.edu" [ style = bold]
+"FSVarNevis:1_start_0 hypatia-corosync.nevis.columbia.edu" -> "FilesystemGroup:1_running_0" [ style = bold]
+"FSVarNevis:1_start_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
+"FSVirtualMachines:0_monitor_0 orestes-corosync.nevis.columbia.edu" -> "FilesystemClone_start_0" [ style = bold]
+"FSVirtualMachines:0_monitor_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
+"FSVirtualMachines:0_monitor_20000 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
+"FSVirtualMachines:0_start_0 orestes-corosync.nevis.columbia.edu" -> "FSMail:0_start_0 orestes-corosync.nevis.columbia.edu" [ style = bold]
+"FSVirtualMachines:0_start_0 orestes-corosync.nevis.columbia.edu" -> "FSVirtualMachines:0_monitor_20000 orestes-corosync.nevis.columbia.edu" [ style = bold]
+"FSVirtualMachines:0_start_0 orestes-corosync.nevis.columbia.edu" -> "FilesystemGroup:0_running_0" [ style = bold]
+"FSVirtualMachines:0_start_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
+"FSVirtualMachines:1_monitor_0 hypatia-corosync.nevis.columbia.edu" -> "FilesystemClone_start_0" [ style = bold]
+"FSVirtualMachines:1_monitor_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
+"FSVirtualMachines:1_monitor_20000 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
+"FSVirtualMachines:1_start_0 hypatia-corosync.nevis.columbia.edu" -> "FSMail:1_start_0 hypatia-corosync.nevis.columbia.edu" [ style = bold]
+"FSVirtualMachines:1_start_0 hypatia-corosync.nevis.columbia.edu" -> "FSVirtualMachines:1_monitor_20000 hypatia-corosync.nevis.columbia.edu" [ style = bold]
+"FSVirtualMachines:1_start_0 hypatia-corosync.nevis.columbia.edu" -> "FilesystemGroup:1_running_0" [ style = bold]
+"FSVirtualMachines:1_start_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
+"FSWork:0_monitor_0 orestes-corosync.nevis.columbia.edu" -> "FilesystemClone_start_0" [ style = bold]
+"FSWork:0_monitor_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
+"FSWork:0_monitor_20000 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
+"FSWork:0_start_0 orestes-corosync.nevis.columbia.edu" -> "FSWork:0_monitor_20000 orestes-corosync.nevis.columbia.edu" [ style = bold]
+"FSWork:0_start_0 orestes-corosync.nevis.columbia.edu" -> "FilesystemGroup:0_running_0" [ style = bold]
+"FSWork:0_start_0 orestes-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
+"FSWork:1_monitor_0 hypatia-corosync.nevis.columbia.edu" -> "FilesystemClone_start_0" [ style = bold]
+"FSWork:1_monitor_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
+"FSWork:1_monitor_20000 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
+"FSWork:1_start_0 hypatia-corosync.nevis.columbia.edu" -> "FSWork:1_monitor_20000 hypatia-corosync.nevis.columbia.edu" [ style = bold]
+"FSWork:1_start_0 hypatia-corosync.nevis.columbia.edu" -> "FilesystemGroup:1_running_0" [ style = bold]
+"FSWork:1_start_0 hypatia-corosync.nevis.columbia.edu" [ style=bold color="green" fontcolor="black"]
"FilesystemClone_running_0" -> "CronAmbientTemperature_start_0 hypatia-corosync.nevis.columbia.edu" [ style = bold]
"FilesystemClone_running_0" -> "CupsClone_start_0" [ style = bold]
"FilesystemClone_running_0" -> "DhcpGroup_start_0" [ style = bold]
@@ -339,32 +339,32 @@
"FilesystemClone_start_0" -> "FilesystemGroup:0_start_0" [ style = bold]
"FilesystemClone_start_0" -> "FilesystemGroup:1_start_0" [ style = bold]
"FilesystemClone_start_0" [ style=bold color="green" fontcolor="orange"]
-"FilesystemGroup:0_running_0" -> "CupsGroup:0_start_0" [ style = bold]
-"FilesystemGroup:0_running_0" -> "ExportsGroup:0_start_0" [ style = bold]
+"FilesystemGroup:0_running_0" -> "CupsGroup:1_start_0" [ style = bold]
+"FilesystemGroup:0_running_0" -> "ExportsGroup:1_start_0" [ style = bold]
"FilesystemGroup:0_running_0" -> "FilesystemClone_running_0" [ style = bold]
-"FilesystemGroup:0_running_0" -> "LibvirtdGroup:0_start_0" [ style = bold]
-"FilesystemGroup:0_running_0" -> "TftpGroup:0_start_0" [ style = bold]
+"FilesystemGroup:0_running_0" -> "LibvirtdGroup:1_start_0" [ style = bold]
+"FilesystemGroup:0_running_0" -> "TftpGroup:1_start_0" [ style = bold]
"FilesystemGroup:0_running_0" [ style=bold color="green" fontcolor="orange"]
-"FilesystemGroup:0_start_0" -> "AdminLvm:0_start_0 hypatia-corosync.nevis.columbia.edu" [ style = bold]
-"FilesystemGroup:0_start_0" -> "FSMail:0_start_0 hypatia-corosync.nevis.columbia.edu" [ style = bold]
-"FilesystemGroup:0_start_0" -> "FSUsrNevis:0_start_0 hypatia-corosync.nevis.columbia.edu" [ style = bold]
-"FilesystemGroup:0_start_0" -> "FSVarNevis:0_start_0 hypatia-corosync.nevis.columbia.edu" [ style = bold]
-"FilesystemGroup:0_start_0" -> "FSVirtualMachines:0_start_0 hypatia-corosync.nevis.columbia.edu" [ style = bold]
-"FilesystemGroup:0_start_0" -> "FSWork:0_start_0 hypatia-corosync.nevis.columbia.edu" [ style = bold]
+"FilesystemGroup:0_start_0" -> "AdminLvm:0_start_0 orestes-corosync.nevis.columbia.edu" [ style = bold]
+"FilesystemGroup:0_start_0" -> "FSMail:0_start_0 orestes-corosync.nevis.columbia.edu" [ style = bold]
+"FilesystemGroup:0_start_0" -> "FSUsrNevis:0_start_0 orestes-corosync.nevis.columbia.edu" [ style = bold]
+"FilesystemGroup:0_start_0" -> "FSVarNevis:0_start_0 orestes-corosync.nevis.columbia.edu" [ style = bold]
+"FilesystemGroup:0_start_0" -> "FSVirtualMachines:0_start_0 orestes-corosync.nevis.columbia.edu" [ style = bold]
+"FilesystemGroup:0_start_0" -> "FSWork:0_start_0 orestes-corosync.nevis.columbia.edu" [ style = bold]
"FilesystemGroup:0_start_0" -> "FilesystemGroup:0_running_0" [ style = bold]
"FilesystemGroup:0_start_0" [ style=bold color="green" fontcolor="orange"]
-"FilesystemGroup:1_running_0" -> "CupsGroup:1_start_0" [ style = bold]
-"FilesystemGroup:1_running_0" -> "ExportsGroup:1_start_0" [ style = bold]
+"FilesystemGroup:1_running_0" -> "CupsGroup:0_start_0" [ style = bold]
+"FilesystemGroup:1_running_0" -> "ExportsGroup:0_start_0" [ style = bold]
"FilesystemGroup:1_running_0" -> "FilesystemClone_running_0" [ style = bold]
-"FilesystemGroup:1_running_0" -> "LibvirtdGroup:1_start_0" [ style = bold]
-"FilesystemGroup:1_running_0" -> "TftpGroup:1_start_0" [ style = bold]
+"FilesystemGroup:1_running_0" -> "LibvirtdGroup:0_start_0" [ style = bold]
+"FilesystemGroup:1_running_0" -> "TftpGroup:0_start_0" [ style = bold]
"FilesystemGroup:1_running_0" [ style=bold color="green" fontcolor="orange"]
-"FilesystemGroup:1_start_0" -> "AdminLvm:1_start_0 orestes-corosync.nevis.columbia.edu" [ style = bold]
-"FilesystemGroup:1_start_0" -> "FSMail:1_start_0 orestes-corosync.nevis.columbia.edu" [ style = bold]
-"FilesystemGroup:1_start_0" -> "FSUsrNevis:1_start_0 orestes-corosync.nevis.columbia.edu" [ style = bold]
-"FilesystemGroup:1_start_0" -> "FSVarNevis:1_start_0 orestes-corosync.nevis.columbia.edu" [ style = bold]
-"FilesystemGroup:1_start_0" -> "FSVirtualMachines:1_start_0 orestes-corosync.nevis.columbia.edu" [ style = bold]
-"FilesystemGroup:1_start_0" -> "FSWork:1_start_0 orestes-corosync.nevis.columbia.edu" [ style = bold]
+"FilesystemGroup:1_start_0" -> "AdminLvm:1_start_0 hypatia-corosync.nevis.columbia.edu" [ style = bold]
+"FilesystemGroup:1_start_0" -> "FSMail:1_start_0 hypatia-corosync.nevis.columbia.edu" [ style = bold]
+"FilesystemGroup:1_start_0" -> "FSUsrNevis:1_start_0 hypatia-corosync.nevis.columbia.edu" [ style = bold]
+"FilesystemGroup:1_start_0" -> "FSVarNevis:1_start_0 hypatia-corosync.nevis.columbia.edu" [ style = bold]
+"FilesystemGroup:1_start_0" -> "FSVirtualMachines:1_start_0 hypatia-corosync.nevis.columbia.edu" [ style = bold]
+"FilesystemGroup:1_start_0" -> "FSWork:1_start_0 hypatia-corosync.nevis.columbia.edu" [ style = bold]
"FilesystemGroup:1_start_0" -> "FilesystemGroup:1_running_0" [ style = bold]
"FilesystemGroup:1_start_0" [ style=bold color="green" fontcolor="orange"]
"IPClone_running_0" [ style=bold color="green" fontcolor="orange"]
diff --git a/cts/scheduler/dot/timeout-by-node.dot b/cts/scheduler/dot/timeout-by-node.dot
new file mode 100644
index 0000000..b4c0b97
--- /dev/null
+++ b/cts/scheduler/dot/timeout-by-node.dot
@@ -0,0 +1,40 @@
+ digraph "g" {
+"rsc1-clone_running_0" [ style=bold color="green" fontcolor="orange"]
+"rsc1-clone_start_0" -> "rsc1-clone_running_0" [ style = bold]
+"rsc1-clone_start_0" -> "rsc1:0_start_0 node2" [ style = bold]
+"rsc1-clone_start_0" -> "rsc1:1_start_0 node3" [ style = bold]
+"rsc1-clone_start_0" -> "rsc1:2_start_0 node4" [ style = bold]
+"rsc1-clone_start_0" -> "rsc1:3_start_0 node5" [ style = bold]
+"rsc1-clone_start_0" -> "rsc1:4_start_0 node1" [ style = bold]
+"rsc1-clone_start_0" [ style=bold color="green" fontcolor="orange"]
+"rsc1:0_monitor_0 node2" -> "rsc1-clone_start_0" [ style = bold]
+"rsc1:0_monitor_0 node2" [ style=bold color="green" fontcolor="black"]
+"rsc1:0_monitor_10000 node2" [ style=bold color="green" fontcolor="black"]
+"rsc1:0_start_0 node2" -> "rsc1-clone_running_0" [ style = bold]
+"rsc1:0_start_0 node2" -> "rsc1:0_monitor_10000 node2" [ style = bold]
+"rsc1:0_start_0 node2" [ style=bold color="green" fontcolor="black"]
+"rsc1:1_monitor_0 node3" -> "rsc1-clone_start_0" [ style = bold]
+"rsc1:1_monitor_0 node3" [ style=bold color="green" fontcolor="black"]
+"rsc1:1_monitor_10000 node3" [ style=bold color="green" fontcolor="black"]
+"rsc1:1_start_0 node3" -> "rsc1-clone_running_0" [ style = bold]
+"rsc1:1_start_0 node3" -> "rsc1:1_monitor_10000 node3" [ style = bold]
+"rsc1:1_start_0 node3" [ style=bold color="green" fontcolor="black"]
+"rsc1:2_monitor_0 node4" -> "rsc1-clone_start_0" [ style = bold]
+"rsc1:2_monitor_0 node4" [ style=bold color="green" fontcolor="black"]
+"rsc1:2_monitor_10000 node4" [ style=bold color="green" fontcolor="black"]
+"rsc1:2_start_0 node4" -> "rsc1-clone_running_0" [ style = bold]
+"rsc1:2_start_0 node4" -> "rsc1:2_monitor_10000 node4" [ style = bold]
+"rsc1:2_start_0 node4" [ style=bold color="green" fontcolor="black"]
+"rsc1:3_monitor_0 node5" -> "rsc1-clone_start_0" [ style = bold]
+"rsc1:3_monitor_0 node5" [ style=bold color="green" fontcolor="black"]
+"rsc1:3_monitor_10000 node5" [ style=bold color="green" fontcolor="black"]
+"rsc1:3_start_0 node5" -> "rsc1-clone_running_0" [ style = bold]
+"rsc1:3_start_0 node5" -> "rsc1:3_monitor_10000 node5" [ style = bold]
+"rsc1:3_start_0 node5" [ style=bold color="green" fontcolor="black"]
+"rsc1:4_monitor_0 node1" -> "rsc1-clone_start_0" [ style = bold]
+"rsc1:4_monitor_0 node1" [ style=bold color="green" fontcolor="black"]
+"rsc1:4_monitor_10000 node1" [ style=bold color="green" fontcolor="black"]
+"rsc1:4_start_0 node1" -> "rsc1-clone_running_0" [ style = bold]
+"rsc1:4_start_0 node1" -> "rsc1:4_monitor_10000 node1" [ style = bold]
+"rsc1:4_start_0 node1" [ style=bold color="green" fontcolor="black"]
+}
diff --git a/cts/scheduler/dot/unfence-definition.dot b/cts/scheduler/dot/unfence-definition.dot
index 6b67392..b2ec3d5 100644
--- a/cts/scheduler/dot/unfence-definition.dot
+++ b/cts/scheduler/dot/unfence-definition.dot
@@ -20,11 +20,13 @@
"clvmd:1_start_0 virt-2" [ style=bold color="green" fontcolor="black"]
"clvmd:2_monitor_0 virt-3" -> "clvmd-clone_start_0" [ style = bold]
"clvmd:2_monitor_0 virt-3" -> "clvmd-clone_stopped_0" [ style = bold]
+"clvmd:2_monitor_0 virt-3" -> "clvmd_stop_0 virt-1" [ style = bold]
"clvmd:2_monitor_0 virt-3" [ style=bold color="green" fontcolor="black"]
"clvmd:2_start_0 virt-3" -> "clvmd-clone_running_0" [ style = bold]
"clvmd:2_start_0 virt-3" [ style=bold color="green" fontcolor="black"]
"clvmd_start_0 virt-1" -> "clvmd-clone_running_0" [ style = bold]
"clvmd_start_0 virt-1" -> "clvmd:1_start_0 virt-2" [ style = bold]
+"clvmd_start_0 virt-1" -> "clvmd:2_start_0 virt-3" [ style = bold]
"clvmd_start_0 virt-1" [ style=bold color="green" fontcolor="black"]
"clvmd_stop_0 virt-1" -> "clvmd-clone_stopped_0" [ style = bold]
"clvmd_stop_0 virt-1" -> "clvmd_start_0 virt-1" [ style = bold]
@@ -44,12 +46,14 @@
"dlm-clone_stopped_0" [ style=bold color="green" fontcolor="orange"]
"dlm:2_monitor_0 virt-3" -> "dlm-clone_start_0" [ style = bold]
"dlm:2_monitor_0 virt-3" -> "dlm-clone_stopped_0" [ style = bold]
+"dlm:2_monitor_0 virt-3" -> "dlm_stop_0 virt-1" [ style = bold]
"dlm:2_monitor_0 virt-3" [ style=bold color="green" fontcolor="black"]
"dlm:2_start_0 virt-3" -> "clvmd:2_start_0 virt-3" [ style = bold]
"dlm:2_start_0 virt-3" -> "dlm-clone_running_0" [ style = bold]
"dlm:2_start_0 virt-3" [ style=bold color="green" fontcolor="black"]
"dlm_start_0 virt-1" -> "clvmd_start_0 virt-1" [ style = bold]
"dlm_start_0 virt-1" -> "dlm-clone_running_0" [ style = bold]
+"dlm_start_0 virt-1" -> "dlm:2_start_0 virt-3" [ style = bold]
"dlm_start_0 virt-1" [ style=bold color="green" fontcolor="black"]
"dlm_stop_0 virt-1" -> "dlm-clone_stopped_0" [ style = bold]
"dlm_stop_0 virt-1" -> "dlm_start_0 virt-1" [ style = bold]
diff --git a/cts/scheduler/dot/unfence-parameters.dot b/cts/scheduler/dot/unfence-parameters.dot
index d03b227..d5646c9 100644
--- a/cts/scheduler/dot/unfence-parameters.dot
+++ b/cts/scheduler/dot/unfence-parameters.dot
@@ -20,11 +20,13 @@
"clvmd:1_start_0 virt-2" [ style=bold color="green" fontcolor="black"]
"clvmd:2_monitor_0 virt-3" -> "clvmd-clone_start_0" [ style = bold]
"clvmd:2_monitor_0 virt-3" -> "clvmd-clone_stopped_0" [ style = bold]
+"clvmd:2_monitor_0 virt-3" -> "clvmd_stop_0 virt-1" [ style = bold]
"clvmd:2_monitor_0 virt-3" [ style=bold color="green" fontcolor="black"]
"clvmd:2_start_0 virt-3" -> "clvmd-clone_running_0" [ style = bold]
"clvmd:2_start_0 virt-3" [ style=bold color="green" fontcolor="black"]
"clvmd_start_0 virt-1" -> "clvmd-clone_running_0" [ style = bold]
"clvmd_start_0 virt-1" -> "clvmd:1_start_0 virt-2" [ style = bold]
+"clvmd_start_0 virt-1" -> "clvmd:2_start_0 virt-3" [ style = bold]
"clvmd_start_0 virt-1" [ style=bold color="green" fontcolor="black"]
"clvmd_stop_0 virt-1" -> "clvmd-clone_stopped_0" [ style = bold]
"clvmd_stop_0 virt-1" -> "clvmd_start_0 virt-1" [ style = bold]
@@ -44,12 +46,14 @@
"dlm-clone_stopped_0" [ style=bold color="green" fontcolor="orange"]
"dlm:2_monitor_0 virt-3" -> "dlm-clone_start_0" [ style = bold]
"dlm:2_monitor_0 virt-3" -> "dlm-clone_stopped_0" [ style = bold]
+"dlm:2_monitor_0 virt-3" -> "dlm_stop_0 virt-1" [ style = bold]
"dlm:2_monitor_0 virt-3" [ style=bold color="green" fontcolor="black"]
"dlm:2_start_0 virt-3" -> "clvmd:2_start_0 virt-3" [ style = bold]
"dlm:2_start_0 virt-3" -> "dlm-clone_running_0" [ style = bold]
"dlm:2_start_0 virt-3" [ style=bold color="green" fontcolor="black"]
"dlm_start_0 virt-1" -> "clvmd_start_0 virt-1" [ style = bold]
"dlm_start_0 virt-1" -> "dlm-clone_running_0" [ style = bold]
+"dlm_start_0 virt-1" -> "dlm:2_start_0 virt-3" [ style = bold]
"dlm_start_0 virt-1" [ style=bold color="green" fontcolor="black"]
"dlm_stop_0 virt-1" -> "dlm-clone_stopped_0" [ style = bold]
"dlm_stop_0 virt-1" -> "dlm_start_0 virt-1" [ style = bold]
diff --git a/cts/scheduler/dot/utilization-complex.dot b/cts/scheduler/dot/utilization-complex.dot
index cccda24..340880d 100644
--- a/cts/scheduler/dot/utilization-complex.dot
+++ b/cts/scheduler/dot/utilization-complex.dot
@@ -151,6 +151,7 @@
"httpd_start_0 httpd-bundle-0" -> "httpd-bundle-clone_running_0" [ style = bold]
"httpd_start_0 httpd-bundle-0" -> "httpd_monitor_15000 httpd-bundle-0" [ style = bold]
"httpd_start_0 httpd-bundle-0" -> "httpd_start_0 httpd-bundle-1" [ style = dashed]
+"httpd_start_0 httpd-bundle-0" -> "httpd_start_0 httpd-bundle-2" [ style = dashed]
"httpd_start_0 httpd-bundle-0" [ style=bold color="green" fontcolor="black"]
"httpd_start_0 httpd-bundle-1" -> "httpd-bundle-clone_running_0" [ style = dashed]
"httpd_start_0 httpd-bundle-1" -> "httpd_monitor_15000 httpd-bundle-1" [ style = dashed]
diff --git a/cts/scheduler/exp/bug-1822.exp b/cts/scheduler/exp/bug-1822.exp
index 1206c97..9960c68 100644
--- a/cts/scheduler/exp/bug-1822.exp
+++ b/cts/scheduler/exp/bug-1822.exp
@@ -60,7 +60,7 @@
<action_set>
<rsc_op id="13" operation="stop" operation_key="promotable_Stateful:1_stop_0" on_node="process1a" on_node_uuid="4dbb8c56-330e-4835-a15e-c0aa632d4e89">
<primitive id="promotable_Stateful:1" class="ocf" provider="heartbeat" type="Dummy-statful"/>
- <attributes CRM_meta_clone="1" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="true" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="stop" CRM_meta_notify="false" CRM_meta_on_fail="stop" CRM_meta_on_node="process1a" CRM_meta_on_node_uuid="4dbb8c56-330e-4835-a15e-c0aa632d4e89" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="60000" sleep_time="10000"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="true" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="stop" CRM_meta_notify="false" CRM_meta_on_node="process1a" CRM_meta_on_node_uuid="4dbb8c56-330e-4835-a15e-c0aa632d4e89" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="60000" sleep_time="10000"/>
</rsc_op>
</action_set>
<inputs>
@@ -89,7 +89,7 @@
<action_set>
<rsc_op id="15" operation="stop" operation_key="promotable_procdctl:1_stop_0" on_node="process1a" on_node_uuid="4dbb8c56-330e-4835-a15e-c0aa632d4e89">
<primitive id="promotable_procdctl:1" class="ocf" provider="heartbeat" type="procdctl"/>
- <attributes CRM_meta_clone="1" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="true" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="stop" CRM_meta_notify="false" CRM_meta_on_fail="stop" CRM_meta_on_node="process1a" CRM_meta_on_node_uuid="4dbb8c56-330e-4835-a15e-c0aa632d4e89" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="60000" procd="sleep 10000"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="true" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="stop" CRM_meta_notify="false" CRM_meta_on_node="process1a" CRM_meta_on_node_uuid="4dbb8c56-330e-4835-a15e-c0aa632d4e89" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="60000" procd="sleep 10000"/>
</rsc_op>
</action_set>
<inputs>
diff --git a/cts/scheduler/exp/bug-lf-2422.exp b/cts/scheduler/exp/bug-lf-2422.exp
index 212493e..4728c24 100644
--- a/cts/scheduler/exp/bug-lf-2422.exp
+++ b/cts/scheduler/exp/bug-lf-2422.exp
@@ -304,6 +304,12 @@
<rsc_op id="53" operation="stop" operation_key="ocfs:0_stop_0" internal_operation_key="ocfs:1_stop_0" on_node="qa-suse-4" on_node_uuid="qa-suse-4"/>
</trigger>
<trigger>
+ <rsc_op id="54" operation="stop" operation_key="ocfs:2_stop_0" on_node="qa-suse-3" on_node_uuid="qa-suse-3"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="55" operation="stop" operation_key="ocfs:3_stop_0" on_node="qa-suse-2" on_node_uuid="qa-suse-2"/>
+ </trigger>
+ <trigger>
<pseudo_event id="58" operation="stop" operation_key="c-ocfs_stop_0"/>
</trigger>
</inputs>
@@ -320,6 +326,9 @@
<rsc_op id="54" operation="stop" operation_key="ocfs:2_stop_0" on_node="qa-suse-3" on_node_uuid="qa-suse-3"/>
</trigger>
<trigger>
+ <rsc_op id="55" operation="stop" operation_key="ocfs:3_stop_0" on_node="qa-suse-2" on_node_uuid="qa-suse-2"/>
+ </trigger>
+ <trigger>
<pseudo_event id="58" operation="stop" operation_key="c-ocfs_stop_0"/>
</trigger>
</inputs>
diff --git a/cts/scheduler/exp/bundle-interleave-start.exp b/cts/scheduler/exp/bundle-interleave-start.exp
index e676b1b..4f726cd 100644
--- a/cts/scheduler/exp/bundle-interleave-start.exp
+++ b/cts/scheduler/exp/bundle-interleave-start.exp
@@ -1,42 +1,73 @@
<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0">
<synapse id="0">
<action_set>
- <rsc_op id="81" operation="monitor" operation_key="base:0_monitor_16000" on_node="base-bundle-0" on_node_uuid="base-bundle-0" router_node="node2">
- <primitive id="base" long-id="base:0" class="ocf" provider="pacemaker" type="Stateful"/>
- <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_interval="16000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="base-bundle-0" CRM_meta_on_node_uuid="base-bundle-0" CRM_meta_physical_host="node2" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Unpromoted" CRM_meta_timeout="16000" />
+ <rsc_op id="82" operation="monitor" operation_key="base:2_monitor_15000" on_node="base-bundle-2" on_node_uuid="base-bundle-2" router_node="node4">
+ <primitive id="base" long-id="base:2" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_interval="15000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="base-bundle-2" CRM_meta_on_node_uuid="base-bundle-2" CRM_meta_op_target_rc="8" CRM_meta_physical_host="node4" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Promoted" CRM_meta_timeout="15000" />
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="66" operation="start" operation_key="base-bundle-0_start_0" on_node="node2" on_node_uuid="2"/>
+ <rsc_op id="74" operation="start" operation_key="base-bundle-2_start_0" on_node="node4" on_node_uuid="4"/>
</trigger>
<trigger>
- <rsc_op id="80" operation="start" operation_key="base:0_start_0" on_node="base-bundle-0" on_node_uuid="base-bundle-0" router_node="node2"/>
+ <rsc_op id="80" operation="start" operation_key="base:2_start_0" on_node="base-bundle-2" on_node_uuid="base-bundle-2" router_node="node4"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="81" operation="promote" operation_key="base:2_promote_0" on_node="base-bundle-2" on_node_uuid="base-bundle-2" router_node="node4"/>
</trigger>
</inputs>
</synapse>
<synapse id="1">
<action_set>
- <rsc_op id="80" operation="start" operation_key="base:0_start_0" on_node="base-bundle-0" on_node_uuid="base-bundle-0" router_node="node2">
- <primitive id="base" long-id="base:0" class="ocf" provider="pacemaker" type="Stateful"/>
- <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_on_node="base-bundle-0" CRM_meta_on_node_uuid="base-bundle-0" CRM_meta_physical_host="node2" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ <rsc_op id="81" operation="promote" operation_key="base:2_promote_0" on_node="base-bundle-2" on_node_uuid="base-bundle-2" router_node="node4">
+ <primitive id="base" long-id="base:2" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_on_node="base-bundle-2" CRM_meta_on_node_uuid="base-bundle-2" CRM_meta_physical_host="node4" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="64" operation="start" operation_key="base-bundle-podman-0_start_0" on_node="node2" on_node_uuid="2"/>
+ <rsc_op id="72" operation="start" operation_key="base-bundle-podman-2_start_0" on_node="node4" on_node_uuid="4"/>
</trigger>
<trigger>
- <rsc_op id="66" operation="start" operation_key="base-bundle-0_start_0" on_node="node2" on_node_uuid="2"/>
+ <rsc_op id="74" operation="start" operation_key="base-bundle-2_start_0" on_node="node4" on_node_uuid="4"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="80" operation="start" operation_key="base:2_start_0" on_node="base-bundle-2" on_node_uuid="base-bundle-2" router_node="node4"/>
</trigger>
<trigger>
- <pseudo_event id="86" operation="start" operation_key="base-bundle-clone_start_0"/>
+ <pseudo_event id="91" operation="promote" operation_key="base-bundle-clone_promote_0"/>
</trigger>
</inputs>
</synapse>
<synapse id="2">
<action_set>
- <rsc_op id="83" operation="monitor" operation_key="base:1_monitor_16000" on_node="base-bundle-1" on_node_uuid="base-bundle-1" router_node="node3">
+ <rsc_op id="80" operation="start" operation_key="base:2_start_0" on_node="base-bundle-2" on_node_uuid="base-bundle-2" router_node="node4">
+ <primitive id="base" long-id="base:2" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_on_node="base-bundle-2" CRM_meta_on_node_uuid="base-bundle-2" CRM_meta_physical_host="node4" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="72" operation="start" operation_key="base-bundle-podman-2_start_0" on_node="node4" on_node_uuid="4"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="74" operation="start" operation_key="base-bundle-2_start_0" on_node="node4" on_node_uuid="4"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="83" operation="start" operation_key="base:1_start_0" on_node="base-bundle-1" on_node_uuid="base-bundle-1" router_node="node3"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="85" operation="start" operation_key="base:0_start_0" on_node="base-bundle-0" on_node_uuid="base-bundle-0" router_node="node2"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="87" operation="start" operation_key="base-bundle-clone_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="3">
+ <action_set>
+ <rsc_op id="84" operation="monitor" operation_key="base:1_monitor_16000" on_node="base-bundle-1" on_node_uuid="base-bundle-1" router_node="node3">
<primitive id="base" long-id="base:1" class="ocf" provider="pacemaker" type="Stateful"/>
<attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_interval="16000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="base-bundle-1" CRM_meta_on_node_uuid="base-bundle-1" CRM_meta_physical_host="node3" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Unpromoted" CRM_meta_timeout="16000" />
</rsc_op>
@@ -46,13 +77,13 @@
<rsc_op id="70" operation="start" operation_key="base-bundle-1_start_0" on_node="node3" on_node_uuid="3"/>
</trigger>
<trigger>
- <rsc_op id="82" operation="start" operation_key="base:1_start_0" on_node="base-bundle-1" on_node_uuid="base-bundle-1" router_node="node3"/>
+ <rsc_op id="83" operation="start" operation_key="base:1_start_0" on_node="base-bundle-1" on_node_uuid="base-bundle-1" router_node="node3"/>
</trigger>
</inputs>
</synapse>
- <synapse id="3">
+ <synapse id="4">
<action_set>
- <rsc_op id="82" operation="start" operation_key="base:1_start_0" on_node="base-bundle-1" on_node_uuid="base-bundle-1" router_node="node3">
+ <rsc_op id="83" operation="start" operation_key="base:1_start_0" on_node="base-bundle-1" on_node_uuid="base-bundle-1" router_node="node3">
<primitive id="base" long-id="base:1" class="ocf" provider="pacemaker" type="Stateful"/>
<attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_on_node="base-bundle-1" CRM_meta_on_node_uuid="base-bundle-1" CRM_meta_physical_host="node3" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
</rsc_op>
@@ -65,75 +96,99 @@
<rsc_op id="70" operation="start" operation_key="base-bundle-1_start_0" on_node="node3" on_node_uuid="3"/>
</trigger>
<trigger>
- <rsc_op id="80" operation="start" operation_key="base:0_start_0" on_node="base-bundle-0" on_node_uuid="base-bundle-0" router_node="node2"/>
+ <rsc_op id="85" operation="start" operation_key="base:0_start_0" on_node="base-bundle-0" on_node_uuid="base-bundle-0" router_node="node2"/>
</trigger>
<trigger>
- <pseudo_event id="86" operation="start" operation_key="base-bundle-clone_start_0"/>
+ <pseudo_event id="87" operation="start" operation_key="base-bundle-clone_start_0"/>
</trigger>
</inputs>
</synapse>
- <synapse id="4">
+ <synapse id="5">
<action_set>
- <rsc_op id="85" operation="monitor" operation_key="base:2_monitor_16000" on_node="base-bundle-2" on_node_uuid="base-bundle-2" router_node="node4">
- <primitive id="base" long-id="base:2" class="ocf" provider="pacemaker" type="Stateful"/>
- <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_interval="16000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="base-bundle-2" CRM_meta_on_node_uuid="base-bundle-2" CRM_meta_physical_host="node4" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Unpromoted" CRM_meta_timeout="16000" />
+ <rsc_op id="86" operation="monitor" operation_key="base:0_monitor_16000" on_node="base-bundle-0" on_node_uuid="base-bundle-0" router_node="node2">
+ <primitive id="base" long-id="base:0" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_interval="16000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="base-bundle-0" CRM_meta_on_node_uuid="base-bundle-0" CRM_meta_physical_host="node2" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Unpromoted" CRM_meta_timeout="16000" />
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="74" operation="start" operation_key="base-bundle-2_start_0" on_node="node4" on_node_uuid="4"/>
+ <rsc_op id="66" operation="start" operation_key="base-bundle-0_start_0" on_node="node2" on_node_uuid="2"/>
</trigger>
<trigger>
- <rsc_op id="84" operation="start" operation_key="base:2_start_0" on_node="base-bundle-2" on_node_uuid="base-bundle-2" router_node="node4"/>
+ <rsc_op id="85" operation="start" operation_key="base:0_start_0" on_node="base-bundle-0" on_node_uuid="base-bundle-0" router_node="node2"/>
</trigger>
</inputs>
</synapse>
- <synapse id="5">
+ <synapse id="6">
<action_set>
- <rsc_op id="84" operation="start" operation_key="base:2_start_0" on_node="base-bundle-2" on_node_uuid="base-bundle-2" router_node="node4">
- <primitive id="base" long-id="base:2" class="ocf" provider="pacemaker" type="Stateful"/>
- <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_on_node="base-bundle-2" CRM_meta_on_node_uuid="base-bundle-2" CRM_meta_physical_host="node4" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ <rsc_op id="85" operation="start" operation_key="base:0_start_0" on_node="base-bundle-0" on_node_uuid="base-bundle-0" router_node="node2">
+ <primitive id="base" long-id="base:0" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_on_node="base-bundle-0" CRM_meta_on_node_uuid="base-bundle-0" CRM_meta_physical_host="node2" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="72" operation="start" operation_key="base-bundle-podman-2_start_0" on_node="node4" on_node_uuid="4"/>
+ <rsc_op id="64" operation="start" operation_key="base-bundle-podman-0_start_0" on_node="node2" on_node_uuid="2"/>
</trigger>
<trigger>
- <rsc_op id="74" operation="start" operation_key="base-bundle-2_start_0" on_node="node4" on_node_uuid="4"/>
+ <rsc_op id="66" operation="start" operation_key="base-bundle-0_start_0" on_node="node2" on_node_uuid="2"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="87" operation="start" operation_key="base-bundle-clone_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="7" priority="1000000">
+ <action_set>
+ <pseudo_event id="92" operation="promoted" operation_key="base-bundle-clone_promoted_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="81" operation="promote" operation_key="base:2_promote_0" on_node="base-bundle-2" on_node_uuid="base-bundle-2" router_node="node4"/>
</trigger>
+ </inputs>
+ </synapse>
+ <synapse id="8">
+ <action_set>
+ <pseudo_event id="91" operation="promote" operation_key="base-bundle-clone_promote_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
<trigger>
- <rsc_op id="82" operation="start" operation_key="base:1_start_0" on_node="base-bundle-1" on_node_uuid="base-bundle-1" router_node="node3"/>
+ <pseudo_event id="88" operation="running" operation_key="base-bundle-clone_running_0"/>
</trigger>
<trigger>
- <pseudo_event id="86" operation="start" operation_key="base-bundle-clone_start_0"/>
+ <pseudo_event id="95" operation="promote" operation_key="base-bundle_promote_0"/>
</trigger>
</inputs>
</synapse>
- <synapse id="6" priority="1000000">
+ <synapse id="9" priority="1000000">
<action_set>
- <pseudo_event id="87" operation="running" operation_key="base-bundle-clone_running_0">
+ <pseudo_event id="88" operation="running" operation_key="base-bundle-clone_running_0">
<attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
</pseudo_event>
</action_set>
<inputs>
<trigger>
- <rsc_op id="80" operation="start" operation_key="base:0_start_0" on_node="base-bundle-0" on_node_uuid="base-bundle-0" router_node="node2"/>
+ <rsc_op id="80" operation="start" operation_key="base:2_start_0" on_node="base-bundle-2" on_node_uuid="base-bundle-2" router_node="node4"/>
</trigger>
<trigger>
- <rsc_op id="82" operation="start" operation_key="base:1_start_0" on_node="base-bundle-1" on_node_uuid="base-bundle-1" router_node="node3"/>
+ <rsc_op id="83" operation="start" operation_key="base:1_start_0" on_node="base-bundle-1" on_node_uuid="base-bundle-1" router_node="node3"/>
</trigger>
<trigger>
- <rsc_op id="84" operation="start" operation_key="base:2_start_0" on_node="base-bundle-2" on_node_uuid="base-bundle-2" router_node="node4"/>
+ <rsc_op id="85" operation="start" operation_key="base:0_start_0" on_node="base-bundle-0" on_node_uuid="base-bundle-0" router_node="node2"/>
</trigger>
<trigger>
- <pseudo_event id="86" operation="start" operation_key="base-bundle-clone_start_0"/>
+ <pseudo_event id="87" operation="start" operation_key="base-bundle-clone_start_0"/>
</trigger>
</inputs>
</synapse>
- <synapse id="7">
+ <synapse id="10">
<action_set>
- <pseudo_event id="86" operation="start" operation_key="base-bundle-clone_start_0">
+ <pseudo_event id="87" operation="start" operation_key="base-bundle-clone_start_0">
<attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
</pseudo_event>
</action_set>
@@ -188,7 +243,7 @@
</trigger>
</inputs>
</synapse>
- <synapse id="8">
+ <synapse id="11">
<action_set>
<rsc_op id="65" operation="monitor" operation_key="base-bundle-podman-0_monitor_60000" on_node="node2" on_node_uuid="2">
<primitive id="base-bundle-podman-0" class="ocf" provider="heartbeat" type="podman"/>
@@ -201,7 +256,7 @@
</trigger>
</inputs>
</synapse>
- <synapse id="9">
+ <synapse id="12">
<action_set>
<rsc_op id="64" operation="start" operation_key="base-bundle-podman-0_start_0" on_node="node2" on_node_uuid="2">
<primitive id="base-bundle-podman-0" class="ocf" provider="heartbeat" type="podman"/>
@@ -235,7 +290,7 @@
</trigger>
</inputs>
</synapse>
- <synapse id="10">
+ <synapse id="13">
<action_set>
<rsc_op id="50" operation="monitor" operation_key="base-bundle-podman-0_monitor_0" on_node="node5" on_node_uuid="5">
<primitive id="base-bundle-podman-0" class="ocf" provider="heartbeat" type="podman"/>
@@ -244,7 +299,7 @@
</action_set>
<inputs/>
</synapse>
- <synapse id="11">
+ <synapse id="14">
<action_set>
<rsc_op id="38" operation="monitor" operation_key="base-bundle-podman-0_monitor_0" on_node="node4" on_node_uuid="4">
<primitive id="base-bundle-podman-0" class="ocf" provider="heartbeat" type="podman"/>
@@ -253,7 +308,7 @@
</action_set>
<inputs/>
</synapse>
- <synapse id="12">
+ <synapse id="15">
<action_set>
<rsc_op id="26" operation="monitor" operation_key="base-bundle-podman-0_monitor_0" on_node="node3" on_node_uuid="3">
<primitive id="base-bundle-podman-0" class="ocf" provider="heartbeat" type="podman"/>
@@ -262,7 +317,7 @@
</action_set>
<inputs/>
</synapse>
- <synapse id="13">
+ <synapse id="16">
<action_set>
<rsc_op id="14" operation="monitor" operation_key="base-bundle-podman-0_monitor_0" on_node="node2" on_node_uuid="2">
<primitive id="base-bundle-podman-0" class="ocf" provider="heartbeat" type="podman"/>
@@ -271,7 +326,7 @@
</action_set>
<inputs/>
</synapse>
- <synapse id="14">
+ <synapse id="17">
<action_set>
<rsc_op id="2" operation="monitor" operation_key="base-bundle-podman-0_monitor_0" on_node="node1" on_node_uuid="1">
<primitive id="base-bundle-podman-0" class="ocf" provider="heartbeat" type="podman"/>
@@ -280,7 +335,7 @@
</action_set>
<inputs/>
</synapse>
- <synapse id="15">
+ <synapse id="18">
<action_set>
<rsc_op id="67" operation="monitor" operation_key="base-bundle-0_monitor_30000" on_node="node2" on_node_uuid="2">
<primitive id="base-bundle-0" class="ocf" provider="pacemaker" type="remote"/>
@@ -293,7 +348,7 @@
</trigger>
</inputs>
</synapse>
- <synapse id="16">
+ <synapse id="19">
<action_set>
<rsc_op id="66" operation="start" operation_key="base-bundle-0_start_0" on_node="node2" on_node_uuid="2">
<primitive id="base-bundle-0" class="ocf" provider="pacemaker" type="remote"/>
@@ -321,7 +376,7 @@
</trigger>
</inputs>
</synapse>
- <synapse id="17">
+ <synapse id="20">
<action_set>
<rsc_op id="51" operation="monitor" operation_key="base-bundle-0_monitor_0" on_node="node5" on_node_uuid="5">
<primitive id="base-bundle-0" class="ocf" provider="pacemaker" type="remote"/>
@@ -334,7 +389,7 @@
</trigger>
</inputs>
</synapse>
- <synapse id="18">
+ <synapse id="21">
<action_set>
<rsc_op id="39" operation="monitor" operation_key="base-bundle-0_monitor_0" on_node="node4" on_node_uuid="4">
<primitive id="base-bundle-0" class="ocf" provider="pacemaker" type="remote"/>
@@ -347,7 +402,7 @@
</trigger>
</inputs>
</synapse>
- <synapse id="19">
+ <synapse id="22">
<action_set>
<rsc_op id="27" operation="monitor" operation_key="base-bundle-0_monitor_0" on_node="node3" on_node_uuid="3">
<primitive id="base-bundle-0" class="ocf" provider="pacemaker" type="remote"/>
@@ -360,7 +415,7 @@
</trigger>
</inputs>
</synapse>
- <synapse id="20">
+ <synapse id="23">
<action_set>
<rsc_op id="15" operation="monitor" operation_key="base-bundle-0_monitor_0" on_node="node2" on_node_uuid="2">
<primitive id="base-bundle-0" class="ocf" provider="pacemaker" type="remote"/>
@@ -373,7 +428,7 @@
</trigger>
</inputs>
</synapse>
- <synapse id="21">
+ <synapse id="24">
<action_set>
<rsc_op id="3" operation="monitor" operation_key="base-bundle-0_monitor_0" on_node="node1" on_node_uuid="1">
<primitive id="base-bundle-0" class="ocf" provider="pacemaker" type="remote"/>
@@ -386,7 +441,7 @@
</trigger>
</inputs>
</synapse>
- <synapse id="22">
+ <synapse id="25">
<action_set>
<rsc_op id="69" operation="monitor" operation_key="base-bundle-podman-1_monitor_60000" on_node="node3" on_node_uuid="3">
<primitive id="base-bundle-podman-1" class="ocf" provider="heartbeat" type="podman"/>
@@ -399,7 +454,7 @@
</trigger>
</inputs>
</synapse>
- <synapse id="23">
+ <synapse id="26">
<action_set>
<rsc_op id="68" operation="start" operation_key="base-bundle-podman-1_start_0" on_node="node3" on_node_uuid="3">
<primitive id="base-bundle-podman-1" class="ocf" provider="heartbeat" type="podman"/>
@@ -433,7 +488,7 @@
</trigger>
</inputs>
</synapse>
- <synapse id="24">
+ <synapse id="27">
<action_set>
<rsc_op id="52" operation="monitor" operation_key="base-bundle-podman-1_monitor_0" on_node="node5" on_node_uuid="5">
<primitive id="base-bundle-podman-1" class="ocf" provider="heartbeat" type="podman"/>
@@ -442,7 +497,7 @@
</action_set>
<inputs/>
</synapse>
- <synapse id="25">
+ <synapse id="28">
<action_set>
<rsc_op id="40" operation="monitor" operation_key="base-bundle-podman-1_monitor_0" on_node="node4" on_node_uuid="4">
<primitive id="base-bundle-podman-1" class="ocf" provider="heartbeat" type="podman"/>
@@ -451,7 +506,7 @@
</action_set>
<inputs/>
</synapse>
- <synapse id="26">
+ <synapse id="29">
<action_set>
<rsc_op id="28" operation="monitor" operation_key="base-bundle-podman-1_monitor_0" on_node="node3" on_node_uuid="3">
<primitive id="base-bundle-podman-1" class="ocf" provider="heartbeat" type="podman"/>
@@ -460,7 +515,7 @@
</action_set>
<inputs/>
</synapse>
- <synapse id="27">
+ <synapse id="30">
<action_set>
<rsc_op id="16" operation="monitor" operation_key="base-bundle-podman-1_monitor_0" on_node="node2" on_node_uuid="2">
<primitive id="base-bundle-podman-1" class="ocf" provider="heartbeat" type="podman"/>
@@ -469,7 +524,7 @@
</action_set>
<inputs/>
</synapse>
- <synapse id="28">
+ <synapse id="31">
<action_set>
<rsc_op id="4" operation="monitor" operation_key="base-bundle-podman-1_monitor_0" on_node="node1" on_node_uuid="1">
<primitive id="base-bundle-podman-1" class="ocf" provider="heartbeat" type="podman"/>
@@ -478,7 +533,7 @@
</action_set>
<inputs/>
</synapse>
- <synapse id="29">
+ <synapse id="32">
<action_set>
<rsc_op id="71" operation="monitor" operation_key="base-bundle-1_monitor_30000" on_node="node3" on_node_uuid="3">
<primitive id="base-bundle-1" class="ocf" provider="pacemaker" type="remote"/>
@@ -491,7 +546,7 @@
</trigger>
</inputs>
</synapse>
- <synapse id="30">
+ <synapse id="33">
<action_set>
<rsc_op id="70" operation="start" operation_key="base-bundle-1_start_0" on_node="node3" on_node_uuid="3">
<primitive id="base-bundle-1" class="ocf" provider="pacemaker" type="remote"/>
@@ -519,7 +574,7 @@
</trigger>
</inputs>
</synapse>
- <synapse id="31">
+ <synapse id="34">
<action_set>
<rsc_op id="53" operation="monitor" operation_key="base-bundle-1_monitor_0" on_node="node5" on_node_uuid="5">
<primitive id="base-bundle-1" class="ocf" provider="pacemaker" type="remote"/>
@@ -532,7 +587,7 @@
</trigger>
</inputs>
</synapse>
- <synapse id="32">
+ <synapse id="35">
<action_set>
<rsc_op id="41" operation="monitor" operation_key="base-bundle-1_monitor_0" on_node="node4" on_node_uuid="4">
<primitive id="base-bundle-1" class="ocf" provider="pacemaker" type="remote"/>
@@ -545,7 +600,7 @@
</trigger>
</inputs>
</synapse>
- <synapse id="33">
+ <synapse id="36">
<action_set>
<rsc_op id="29" operation="monitor" operation_key="base-bundle-1_monitor_0" on_node="node3" on_node_uuid="3">
<primitive id="base-bundle-1" class="ocf" provider="pacemaker" type="remote"/>
@@ -558,7 +613,7 @@
</trigger>
</inputs>
</synapse>
- <synapse id="34">
+ <synapse id="37">
<action_set>
<rsc_op id="17" operation="monitor" operation_key="base-bundle-1_monitor_0" on_node="node2" on_node_uuid="2">
<primitive id="base-bundle-1" class="ocf" provider="pacemaker" type="remote"/>
@@ -571,7 +626,7 @@
</trigger>
</inputs>
</synapse>
- <synapse id="35">
+ <synapse id="38">
<action_set>
<rsc_op id="5" operation="monitor" operation_key="base-bundle-1_monitor_0" on_node="node1" on_node_uuid="1">
<primitive id="base-bundle-1" class="ocf" provider="pacemaker" type="remote"/>
@@ -584,7 +639,7 @@
</trigger>
</inputs>
</synapse>
- <synapse id="36">
+ <synapse id="39">
<action_set>
<rsc_op id="73" operation="monitor" operation_key="base-bundle-podman-2_monitor_60000" on_node="node4" on_node_uuid="4">
<primitive id="base-bundle-podman-2" class="ocf" provider="heartbeat" type="podman"/>
@@ -597,7 +652,7 @@
</trigger>
</inputs>
</synapse>
- <synapse id="37">
+ <synapse id="40">
<action_set>
<rsc_op id="72" operation="start" operation_key="base-bundle-podman-2_start_0" on_node="node4" on_node_uuid="4">
<primitive id="base-bundle-podman-2" class="ocf" provider="heartbeat" type="podman"/>
@@ -631,7 +686,7 @@
</trigger>
</inputs>
</synapse>
- <synapse id="38">
+ <synapse id="41">
<action_set>
<rsc_op id="54" operation="monitor" operation_key="base-bundle-podman-2_monitor_0" on_node="node5" on_node_uuid="5">
<primitive id="base-bundle-podman-2" class="ocf" provider="heartbeat" type="podman"/>
@@ -640,7 +695,7 @@
</action_set>
<inputs/>
</synapse>
- <synapse id="39">
+ <synapse id="42">
<action_set>
<rsc_op id="42" operation="monitor" operation_key="base-bundle-podman-2_monitor_0" on_node="node4" on_node_uuid="4">
<primitive id="base-bundle-podman-2" class="ocf" provider="heartbeat" type="podman"/>
@@ -649,7 +704,7 @@
</action_set>
<inputs/>
</synapse>
- <synapse id="40">
+ <synapse id="43">
<action_set>
<rsc_op id="30" operation="monitor" operation_key="base-bundle-podman-2_monitor_0" on_node="node3" on_node_uuid="3">
<primitive id="base-bundle-podman-2" class="ocf" provider="heartbeat" type="podman"/>
@@ -658,7 +713,7 @@
</action_set>
<inputs/>
</synapse>
- <synapse id="41">
+ <synapse id="44">
<action_set>
<rsc_op id="18" operation="monitor" operation_key="base-bundle-podman-2_monitor_0" on_node="node2" on_node_uuid="2">
<primitive id="base-bundle-podman-2" class="ocf" provider="heartbeat" type="podman"/>
@@ -667,7 +722,7 @@
</action_set>
<inputs/>
</synapse>
- <synapse id="42">
+ <synapse id="45">
<action_set>
<rsc_op id="6" operation="monitor" operation_key="base-bundle-podman-2_monitor_0" on_node="node1" on_node_uuid="1">
<primitive id="base-bundle-podman-2" class="ocf" provider="heartbeat" type="podman"/>
@@ -676,7 +731,7 @@
</action_set>
<inputs/>
</synapse>
- <synapse id="43">
+ <synapse id="46">
<action_set>
<rsc_op id="75" operation="monitor" operation_key="base-bundle-2_monitor_30000" on_node="node4" on_node_uuid="4">
<primitive id="base-bundle-2" class="ocf" provider="pacemaker" type="remote"/>
@@ -689,7 +744,7 @@
</trigger>
</inputs>
</synapse>
- <synapse id="44">
+ <synapse id="47">
<action_set>
<rsc_op id="74" operation="start" operation_key="base-bundle-2_start_0" on_node="node4" on_node_uuid="4">
<primitive id="base-bundle-2" class="ocf" provider="pacemaker" type="remote"/>
@@ -717,7 +772,7 @@
</trigger>
</inputs>
</synapse>
- <synapse id="45">
+ <synapse id="48">
<action_set>
<rsc_op id="55" operation="monitor" operation_key="base-bundle-2_monitor_0" on_node="node5" on_node_uuid="5">
<primitive id="base-bundle-2" class="ocf" provider="pacemaker" type="remote"/>
@@ -730,7 +785,7 @@
</trigger>
</inputs>
</synapse>
- <synapse id="46">
+ <synapse id="49">
<action_set>
<rsc_op id="43" operation="monitor" operation_key="base-bundle-2_monitor_0" on_node="node4" on_node_uuid="4">
<primitive id="base-bundle-2" class="ocf" provider="pacemaker" type="remote"/>
@@ -743,7 +798,7 @@
</trigger>
</inputs>
</synapse>
- <synapse id="47">
+ <synapse id="50">
<action_set>
<rsc_op id="31" operation="monitor" operation_key="base-bundle-2_monitor_0" on_node="node3" on_node_uuid="3">
<primitive id="base-bundle-2" class="ocf" provider="pacemaker" type="remote"/>
@@ -756,7 +811,7 @@
</trigger>
</inputs>
</synapse>
- <synapse id="48">
+ <synapse id="51">
<action_set>
<rsc_op id="19" operation="monitor" operation_key="base-bundle-2_monitor_0" on_node="node2" on_node_uuid="2">
<primitive id="base-bundle-2" class="ocf" provider="pacemaker" type="remote"/>
@@ -769,7 +824,7 @@
</trigger>
</inputs>
</synapse>
- <synapse id="49">
+ <synapse id="52">
<action_set>
<rsc_op id="7" operation="monitor" operation_key="base-bundle-2_monitor_0" on_node="node1" on_node_uuid="1">
<primitive id="base-bundle-2" class="ocf" provider="pacemaker" type="remote"/>
@@ -782,141 +837,199 @@
</trigger>
</inputs>
</synapse>
- <synapse id="50">
+ <synapse id="53">
<action_set>
- <rsc_op id="115" operation="monitor" operation_key="app:0_monitor_16000" on_node="app-bundle-0" on_node_uuid="app-bundle-0" router_node="node2">
- <primitive id="app" long-id="app:0" class="ocf" provider="pacemaker" type="Stateful"/>
- <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_interval="16000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="app-bundle-0" CRM_meta_on_node_uuid="app-bundle-0" CRM_meta_physical_host="node2" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Unpromoted" CRM_meta_timeout="16000" />
+ <rsc_op id="117" operation="monitor" operation_key="app:2_monitor_15000" on_node="app-bundle-2" on_node_uuid="app-bundle-2" router_node="node4">
+ <primitive id="app" long-id="app:2" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_interval="15000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="app-bundle-2" CRM_meta_on_node_uuid="app-bundle-2" CRM_meta_op_target_rc="8" CRM_meta_physical_host="node4" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Promoted" CRM_meta_timeout="15000" />
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="100" operation="start" operation_key="app-bundle-0_start_0" on_node="node2" on_node_uuid="2"/>
+ <rsc_op id="109" operation="start" operation_key="app-bundle-2_start_0" on_node="node4" on_node_uuid="4"/>
</trigger>
<trigger>
- <rsc_op id="114" operation="start" operation_key="app:0_start_0" on_node="app-bundle-0" on_node_uuid="app-bundle-0" router_node="node2"/>
+ <rsc_op id="115" operation="start" operation_key="app:2_start_0" on_node="app-bundle-2" on_node_uuid="app-bundle-2" router_node="node4"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="116" operation="promote" operation_key="app:2_promote_0" on_node="app-bundle-2" on_node_uuid="app-bundle-2" router_node="node4"/>
</trigger>
</inputs>
</synapse>
- <synapse id="51">
+ <synapse id="54">
<action_set>
- <rsc_op id="114" operation="start" operation_key="app:0_start_0" on_node="app-bundle-0" on_node_uuid="app-bundle-0" router_node="node2">
- <primitive id="app" long-id="app:0" class="ocf" provider="pacemaker" type="Stateful"/>
- <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_on_node="app-bundle-0" CRM_meta_on_node_uuid="app-bundle-0" CRM_meta_physical_host="node2" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ <rsc_op id="116" operation="promote" operation_key="app:2_promote_0" on_node="app-bundle-2" on_node_uuid="app-bundle-2" router_node="node4">
+ <primitive id="app" long-id="app:2" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_on_node="app-bundle-2" CRM_meta_on_node_uuid="app-bundle-2" CRM_meta_physical_host="node4" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="98" operation="start" operation_key="app-bundle-podman-0_start_0" on_node="node2" on_node_uuid="2"/>
+ <rsc_op id="81" operation="promote" operation_key="base:2_promote_0" on_node="base-bundle-2" on_node_uuid="base-bundle-2" router_node="node4"/>
</trigger>
<trigger>
- <rsc_op id="100" operation="start" operation_key="app-bundle-0_start_0" on_node="node2" on_node_uuid="2"/>
+ <rsc_op id="107" operation="start" operation_key="app-bundle-podman-2_start_0" on_node="node4" on_node_uuid="4"/>
</trigger>
<trigger>
- <pseudo_event id="120" operation="start" operation_key="app-bundle-clone_start_0"/>
+ <rsc_op id="109" operation="start" operation_key="app-bundle-2_start_0" on_node="node4" on_node_uuid="4"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="115" operation="start" operation_key="app:2_start_0" on_node="app-bundle-2" on_node_uuid="app-bundle-2" router_node="node4"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="126" operation="promote" operation_key="app-bundle-clone_promote_0"/>
</trigger>
</inputs>
</synapse>
- <synapse id="52">
+ <synapse id="55">
+ <action_set>
+ <rsc_op id="115" operation="start" operation_key="app:2_start_0" on_node="app-bundle-2" on_node_uuid="app-bundle-2" router_node="node4">
+ <primitive id="app" long-id="app:2" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_on_node="app-bundle-2" CRM_meta_on_node_uuid="app-bundle-2" CRM_meta_physical_host="node4" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="107" operation="start" operation_key="app-bundle-podman-2_start_0" on_node="node4" on_node_uuid="4"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="109" operation="start" operation_key="app-bundle-2_start_0" on_node="node4" on_node_uuid="4"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="118" operation="start" operation_key="app:1_start_0" on_node="app-bundle-1" on_node_uuid="app-bundle-1" router_node="node3"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="120" operation="start" operation_key="app:0_start_0" on_node="app-bundle-0" on_node_uuid="app-bundle-0" router_node="node2"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="122" operation="start" operation_key="app-bundle-clone_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="56">
<action_set>
- <rsc_op id="117" operation="monitor" operation_key="app:1_monitor_16000" on_node="app-bundle-1" on_node_uuid="app-bundle-1" router_node="node3">
+ <rsc_op id="119" operation="monitor" operation_key="app:1_monitor_16000" on_node="app-bundle-1" on_node_uuid="app-bundle-1" router_node="node3">
<primitive id="app" long-id="app:1" class="ocf" provider="pacemaker" type="Stateful"/>
<attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_interval="16000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="app-bundle-1" CRM_meta_on_node_uuid="app-bundle-1" CRM_meta_physical_host="node3" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Unpromoted" CRM_meta_timeout="16000" />
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="104" operation="start" operation_key="app-bundle-1_start_0" on_node="node3" on_node_uuid="3"/>
+ <rsc_op id="105" operation="start" operation_key="app-bundle-1_start_0" on_node="node3" on_node_uuid="3"/>
</trigger>
<trigger>
- <rsc_op id="116" operation="start" operation_key="app:1_start_0" on_node="app-bundle-1" on_node_uuid="app-bundle-1" router_node="node3"/>
+ <rsc_op id="118" operation="start" operation_key="app:1_start_0" on_node="app-bundle-1" on_node_uuid="app-bundle-1" router_node="node3"/>
</trigger>
</inputs>
</synapse>
- <synapse id="53">
+ <synapse id="57">
<action_set>
- <rsc_op id="116" operation="start" operation_key="app:1_start_0" on_node="app-bundle-1" on_node_uuid="app-bundle-1" router_node="node3">
+ <rsc_op id="118" operation="start" operation_key="app:1_start_0" on_node="app-bundle-1" on_node_uuid="app-bundle-1" router_node="node3">
<primitive id="app" long-id="app:1" class="ocf" provider="pacemaker" type="Stateful"/>
<attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_on_node="app-bundle-1" CRM_meta_on_node_uuid="app-bundle-1" CRM_meta_physical_host="node3" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="102" operation="start" operation_key="app-bundle-podman-1_start_0" on_node="node3" on_node_uuid="3"/>
+ <rsc_op id="103" operation="start" operation_key="app-bundle-podman-1_start_0" on_node="node3" on_node_uuid="3"/>
</trigger>
<trigger>
- <rsc_op id="104" operation="start" operation_key="app-bundle-1_start_0" on_node="node3" on_node_uuid="3"/>
+ <rsc_op id="105" operation="start" operation_key="app-bundle-1_start_0" on_node="node3" on_node_uuid="3"/>
</trigger>
<trigger>
- <rsc_op id="114" operation="start" operation_key="app:0_start_0" on_node="app-bundle-0" on_node_uuid="app-bundle-0" router_node="node2"/>
+ <rsc_op id="120" operation="start" operation_key="app:0_start_0" on_node="app-bundle-0" on_node_uuid="app-bundle-0" router_node="node2"/>
</trigger>
<trigger>
- <pseudo_event id="120" operation="start" operation_key="app-bundle-clone_start_0"/>
+ <pseudo_event id="122" operation="start" operation_key="app-bundle-clone_start_0"/>
</trigger>
</inputs>
</synapse>
- <synapse id="54">
+ <synapse id="58">
<action_set>
- <rsc_op id="119" operation="monitor" operation_key="app:2_monitor_16000" on_node="app-bundle-2" on_node_uuid="app-bundle-2" router_node="node4">
- <primitive id="app" long-id="app:2" class="ocf" provider="pacemaker" type="Stateful"/>
- <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_interval="16000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="app-bundle-2" CRM_meta_on_node_uuid="app-bundle-2" CRM_meta_physical_host="node4" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Unpromoted" CRM_meta_timeout="16000" />
+ <rsc_op id="121" operation="monitor" operation_key="app:0_monitor_16000" on_node="app-bundle-0" on_node_uuid="app-bundle-0" router_node="node2">
+ <primitive id="app" long-id="app:0" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_interval="16000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="app-bundle-0" CRM_meta_on_node_uuid="app-bundle-0" CRM_meta_physical_host="node2" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Unpromoted" CRM_meta_timeout="16000" />
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="108" operation="start" operation_key="app-bundle-2_start_0" on_node="node4" on_node_uuid="4"/>
+ <rsc_op id="101" operation="start" operation_key="app-bundle-0_start_0" on_node="node2" on_node_uuid="2"/>
</trigger>
<trigger>
- <rsc_op id="118" operation="start" operation_key="app:2_start_0" on_node="app-bundle-2" on_node_uuid="app-bundle-2" router_node="node4"/>
+ <rsc_op id="120" operation="start" operation_key="app:0_start_0" on_node="app-bundle-0" on_node_uuid="app-bundle-0" router_node="node2"/>
</trigger>
</inputs>
</synapse>
- <synapse id="55">
+ <synapse id="59">
<action_set>
- <rsc_op id="118" operation="start" operation_key="app:2_start_0" on_node="app-bundle-2" on_node_uuid="app-bundle-2" router_node="node4">
- <primitive id="app" long-id="app:2" class="ocf" provider="pacemaker" type="Stateful"/>
- <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_on_node="app-bundle-2" CRM_meta_on_node_uuid="app-bundle-2" CRM_meta_physical_host="node4" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ <rsc_op id="120" operation="start" operation_key="app:0_start_0" on_node="app-bundle-0" on_node_uuid="app-bundle-0" router_node="node2">
+ <primitive id="app" long-id="app:0" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_on_node="app-bundle-0" CRM_meta_on_node_uuid="app-bundle-0" CRM_meta_physical_host="node2" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="106" operation="start" operation_key="app-bundle-podman-2_start_0" on_node="node4" on_node_uuid="4"/>
+ <rsc_op id="99" operation="start" operation_key="app-bundle-podman-0_start_0" on_node="node2" on_node_uuid="2"/>
</trigger>
<trigger>
- <rsc_op id="108" operation="start" operation_key="app-bundle-2_start_0" on_node="node4" on_node_uuid="4"/>
+ <rsc_op id="101" operation="start" operation_key="app-bundle-0_start_0" on_node="node2" on_node_uuid="2"/>
</trigger>
<trigger>
- <rsc_op id="116" operation="start" operation_key="app:1_start_0" on_node="app-bundle-1" on_node_uuid="app-bundle-1" router_node="node3"/>
+ <pseudo_event id="122" operation="start" operation_key="app-bundle-clone_start_0"/>
</trigger>
+ </inputs>
+ </synapse>
+ <synapse id="60" priority="1000000">
+ <action_set>
+ <pseudo_event id="127" operation="promoted" operation_key="app-bundle-clone_promoted_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
<trigger>
- <pseudo_event id="120" operation="start" operation_key="app-bundle-clone_start_0"/>
+ <rsc_op id="116" operation="promote" operation_key="app:2_promote_0" on_node="app-bundle-2" on_node_uuid="app-bundle-2" router_node="node4"/>
</trigger>
</inputs>
</synapse>
- <synapse id="56" priority="1000000">
+ <synapse id="61">
<action_set>
- <pseudo_event id="121" operation="running" operation_key="app-bundle-clone_running_0">
+ <pseudo_event id="126" operation="promote" operation_key="app-bundle-clone_promote_0">
<attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
</pseudo_event>
</action_set>
<inputs>
<trigger>
- <rsc_op id="114" operation="start" operation_key="app:0_start_0" on_node="app-bundle-0" on_node_uuid="app-bundle-0" router_node="node2"/>
+ <pseudo_event id="123" operation="running" operation_key="app-bundle-clone_running_0"/>
</trigger>
<trigger>
- <rsc_op id="116" operation="start" operation_key="app:1_start_0" on_node="app-bundle-1" on_node_uuid="app-bundle-1" router_node="node3"/>
+ <pseudo_event id="130" operation="promote" operation_key="app-bundle_promote_0"/>
</trigger>
+ </inputs>
+ </synapse>
+ <synapse id="62" priority="1000000">
+ <action_set>
+ <pseudo_event id="123" operation="running" operation_key="app-bundle-clone_running_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
<trigger>
- <rsc_op id="118" operation="start" operation_key="app:2_start_0" on_node="app-bundle-2" on_node_uuid="app-bundle-2" router_node="node4"/>
+ <rsc_op id="115" operation="start" operation_key="app:2_start_0" on_node="app-bundle-2" on_node_uuid="app-bundle-2" router_node="node4"/>
</trigger>
<trigger>
- <pseudo_event id="120" operation="start" operation_key="app-bundle-clone_start_0"/>
+ <rsc_op id="118" operation="start" operation_key="app:1_start_0" on_node="app-bundle-1" on_node_uuid="app-bundle-1" router_node="node3"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="120" operation="start" operation_key="app:0_start_0" on_node="app-bundle-0" on_node_uuid="app-bundle-0" router_node="node2"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="122" operation="start" operation_key="app-bundle-clone_start_0"/>
</trigger>
</inputs>
</synapse>
- <synapse id="57">
+ <synapse id="63">
<action_set>
- <pseudo_event id="120" operation="start" operation_key="app-bundle-clone_start_0">
+ <pseudo_event id="122" operation="start" operation_key="app-bundle-clone_start_0">
<attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
</pseudo_event>
</action_set>
@@ -967,26 +1080,26 @@
<rsc_op id="60" operation="monitor" operation_key="app-bundle-podman-2_monitor_0" on_node="node5" on_node_uuid="5"/>
</trigger>
<trigger>
- <pseudo_event id="110" operation="start" operation_key="app-bundle_start_0"/>
+ <pseudo_event id="111" operation="start" operation_key="app-bundle_start_0"/>
</trigger>
</inputs>
</synapse>
- <synapse id="58">
+ <synapse id="64">
<action_set>
- <rsc_op id="99" operation="monitor" operation_key="app-bundle-podman-0_monitor_60000" on_node="node2" on_node_uuid="2">
+ <rsc_op id="100" operation="monitor" operation_key="app-bundle-podman-0_monitor_60000" on_node="node2" on_node_uuid="2">
<primitive id="app-bundle-podman-0" class="ocf" provider="heartbeat" type="podman"/>
<attributes CRM_meta_interval="60000" CRM_meta_name="monitor" CRM_meta_on_node="node2" CRM_meta_on_node_uuid="2" CRM_meta_timeout="20000" allow_pull="true" force_kill="false" image="localhost/pcmktest:app" monitor_cmd="/bin/true" mount_points="/var/log/pacemaker/bundles/app-bundle-0" reuse="false" run_cmd="/usr/sbin/pacemaker-remoted" run_opts=" -e PCMK_stderr=1 -e PCMK_remote_port=3121 -v /etc/pacemaker/authkey:/etc/pacemaker/authkey -v /var/log/pacemaker/bundles/app-bundle-0:/var/log -p 3121:3121 "/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="98" operation="start" operation_key="app-bundle-podman-0_start_0" on_node="node2" on_node_uuid="2"/>
+ <rsc_op id="99" operation="start" operation_key="app-bundle-podman-0_start_0" on_node="node2" on_node_uuid="2"/>
</trigger>
</inputs>
</synapse>
- <synapse id="59">
+ <synapse id="65">
<action_set>
- <rsc_op id="98" operation="start" operation_key="app-bundle-podman-0_start_0" on_node="node2" on_node_uuid="2">
+ <rsc_op id="99" operation="start" operation_key="app-bundle-podman-0_start_0" on_node="node2" on_node_uuid="2">
<primitive id="app-bundle-podman-0" class="ocf" provider="heartbeat" type="podman"/>
<attributes CRM_meta_on_node="node2" CRM_meta_on_node_uuid="2" CRM_meta_timeout="20000" allow_pull="true" force_kill="false" image="localhost/pcmktest:app" monitor_cmd="/bin/true" mount_points="/var/log/pacemaker/bundles/app-bundle-0" reuse="false" run_cmd="/usr/sbin/pacemaker-remoted" run_opts=" -e PCMK_stderr=1 -e PCMK_remote_port=3121 -v /etc/pacemaker/authkey:/etc/pacemaker/authkey -v /var/log/pacemaker/bundles/app-bundle-0:/var/log -p 3121:3121 "/>
</rsc_op>
@@ -1014,14 +1127,14 @@
<rsc_op id="56" operation="monitor" operation_key="app-bundle-podman-0_monitor_0" on_node="node5" on_node_uuid="5"/>
</trigger>
<trigger>
- <rsc_op id="80" operation="start" operation_key="base:0_start_0" on_node="base-bundle-0" on_node_uuid="base-bundle-0" router_node="node2"/>
+ <rsc_op id="85" operation="start" operation_key="base:0_start_0" on_node="base-bundle-0" on_node_uuid="base-bundle-0" router_node="node2"/>
</trigger>
<trigger>
- <pseudo_event id="110" operation="start" operation_key="app-bundle_start_0"/>
+ <pseudo_event id="111" operation="start" operation_key="app-bundle_start_0"/>
</trigger>
</inputs>
</synapse>
- <synapse id="60">
+ <synapse id="66">
<action_set>
<rsc_op id="56" operation="monitor" operation_key="app-bundle-podman-0_monitor_0" on_node="node5" on_node_uuid="5">
<primitive id="app-bundle-podman-0" class="ocf" provider="heartbeat" type="podman"/>
@@ -1030,7 +1143,7 @@
</action_set>
<inputs/>
</synapse>
- <synapse id="61">
+ <synapse id="67">
<action_set>
<rsc_op id="44" operation="monitor" operation_key="app-bundle-podman-0_monitor_0" on_node="node4" on_node_uuid="4">
<primitive id="app-bundle-podman-0" class="ocf" provider="heartbeat" type="podman"/>
@@ -1039,7 +1152,7 @@
</action_set>
<inputs/>
</synapse>
- <synapse id="62">
+ <synapse id="68">
<action_set>
<rsc_op id="32" operation="monitor" operation_key="app-bundle-podman-0_monitor_0" on_node="node3" on_node_uuid="3">
<primitive id="app-bundle-podman-0" class="ocf" provider="heartbeat" type="podman"/>
@@ -1048,7 +1161,7 @@
</action_set>
<inputs/>
</synapse>
- <synapse id="63">
+ <synapse id="69">
<action_set>
<rsc_op id="20" operation="monitor" operation_key="app-bundle-podman-0_monitor_0" on_node="node2" on_node_uuid="2">
<primitive id="app-bundle-podman-0" class="ocf" provider="heartbeat" type="podman"/>
@@ -1057,7 +1170,7 @@
</action_set>
<inputs/>
</synapse>
- <synapse id="64">
+ <synapse id="70">
<action_set>
<rsc_op id="8" operation="monitor" operation_key="app-bundle-podman-0_monitor_0" on_node="node1" on_node_uuid="1">
<primitive id="app-bundle-podman-0" class="ocf" provider="heartbeat" type="podman"/>
@@ -1066,22 +1179,22 @@
</action_set>
<inputs/>
</synapse>
- <synapse id="65">
+ <synapse id="71">
<action_set>
- <rsc_op id="101" operation="monitor" operation_key="app-bundle-0_monitor_30000" on_node="node2" on_node_uuid="2">
+ <rsc_op id="102" operation="monitor" operation_key="app-bundle-0_monitor_30000" on_node="node2" on_node_uuid="2">
<primitive id="app-bundle-0" class="ocf" provider="pacemaker" type="remote"/>
<attributes CRM_meta_container="app-bundle-podman-0" CRM_meta_interval="30000" CRM_meta_name="monitor" CRM_meta_on_node="node2" CRM_meta_on_node_uuid="2" CRM_meta_timeout="30000" addr="node2" port="3121"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="100" operation="start" operation_key="app-bundle-0_start_0" on_node="node2" on_node_uuid="2"/>
+ <rsc_op id="101" operation="start" operation_key="app-bundle-0_start_0" on_node="node2" on_node_uuid="2"/>
</trigger>
</inputs>
</synapse>
- <synapse id="66">
+ <synapse id="72">
<action_set>
- <rsc_op id="100" operation="start" operation_key="app-bundle-0_start_0" on_node="node2" on_node_uuid="2">
+ <rsc_op id="101" operation="start" operation_key="app-bundle-0_start_0" on_node="node2" on_node_uuid="2">
<primitive id="app-bundle-0" class="ocf" provider="pacemaker" type="remote"/>
<attributes CRM_meta_container="app-bundle-podman-0" CRM_meta_on_node="node2" CRM_meta_on_node_uuid="2" CRM_meta_timeout="20000" addr="node2" port="3121"/>
</rsc_op>
@@ -1103,11 +1216,11 @@
<rsc_op id="57" operation="monitor" operation_key="app-bundle-0_monitor_0" on_node="node5" on_node_uuid="5"/>
</trigger>
<trigger>
- <rsc_op id="98" operation="start" operation_key="app-bundle-podman-0_start_0" on_node="node2" on_node_uuid="2"/>
+ <rsc_op id="99" operation="start" operation_key="app-bundle-podman-0_start_0" on_node="node2" on_node_uuid="2"/>
</trigger>
</inputs>
</synapse>
- <synapse id="67">
+ <synapse id="73">
<action_set>
<rsc_op id="57" operation="monitor" operation_key="app-bundle-0_monitor_0" on_node="node5" on_node_uuid="5">
<primitive id="app-bundle-0" class="ocf" provider="pacemaker" type="remote"/>
@@ -1116,11 +1229,11 @@
</action_set>
<inputs>
<trigger>
- <rsc_op id="98" operation="start" operation_key="app-bundle-podman-0_start_0" on_node="node2" on_node_uuid="2"/>
+ <rsc_op id="99" operation="start" operation_key="app-bundle-podman-0_start_0" on_node="node2" on_node_uuid="2"/>
</trigger>
</inputs>
</synapse>
- <synapse id="68">
+ <synapse id="74">
<action_set>
<rsc_op id="45" operation="monitor" operation_key="app-bundle-0_monitor_0" on_node="node4" on_node_uuid="4">
<primitive id="app-bundle-0" class="ocf" provider="pacemaker" type="remote"/>
@@ -1129,11 +1242,11 @@
</action_set>
<inputs>
<trigger>
- <rsc_op id="98" operation="start" operation_key="app-bundle-podman-0_start_0" on_node="node2" on_node_uuid="2"/>
+ <rsc_op id="99" operation="start" operation_key="app-bundle-podman-0_start_0" on_node="node2" on_node_uuid="2"/>
</trigger>
</inputs>
</synapse>
- <synapse id="69">
+ <synapse id="75">
<action_set>
<rsc_op id="33" operation="monitor" operation_key="app-bundle-0_monitor_0" on_node="node3" on_node_uuid="3">
<primitive id="app-bundle-0" class="ocf" provider="pacemaker" type="remote"/>
@@ -1142,11 +1255,11 @@
</action_set>
<inputs>
<trigger>
- <rsc_op id="98" operation="start" operation_key="app-bundle-podman-0_start_0" on_node="node2" on_node_uuid="2"/>
+ <rsc_op id="99" operation="start" operation_key="app-bundle-podman-0_start_0" on_node="node2" on_node_uuid="2"/>
</trigger>
</inputs>
</synapse>
- <synapse id="70">
+ <synapse id="76">
<action_set>
<rsc_op id="21" operation="monitor" operation_key="app-bundle-0_monitor_0" on_node="node2" on_node_uuid="2">
<primitive id="app-bundle-0" class="ocf" provider="pacemaker" type="remote"/>
@@ -1155,11 +1268,11 @@
</action_set>
<inputs>
<trigger>
- <rsc_op id="98" operation="start" operation_key="app-bundle-podman-0_start_0" on_node="node2" on_node_uuid="2"/>
+ <rsc_op id="99" operation="start" operation_key="app-bundle-podman-0_start_0" on_node="node2" on_node_uuid="2"/>
</trigger>
</inputs>
</synapse>
- <synapse id="71">
+ <synapse id="77">
<action_set>
<rsc_op id="9" operation="monitor" operation_key="app-bundle-0_monitor_0" on_node="node1" on_node_uuid="1">
<primitive id="app-bundle-0" class="ocf" provider="pacemaker" type="remote"/>
@@ -1168,26 +1281,26 @@
</action_set>
<inputs>
<trigger>
- <rsc_op id="98" operation="start" operation_key="app-bundle-podman-0_start_0" on_node="node2" on_node_uuid="2"/>
+ <rsc_op id="99" operation="start" operation_key="app-bundle-podman-0_start_0" on_node="node2" on_node_uuid="2"/>
</trigger>
</inputs>
</synapse>
- <synapse id="72">
+ <synapse id="78">
<action_set>
- <rsc_op id="103" operation="monitor" operation_key="app-bundle-podman-1_monitor_60000" on_node="node3" on_node_uuid="3">
+ <rsc_op id="104" operation="monitor" operation_key="app-bundle-podman-1_monitor_60000" on_node="node3" on_node_uuid="3">
<primitive id="app-bundle-podman-1" class="ocf" provider="heartbeat" type="podman"/>
<attributes CRM_meta_interval="60000" CRM_meta_name="monitor" CRM_meta_on_node="node3" CRM_meta_on_node_uuid="3" CRM_meta_timeout="20000" allow_pull="true" force_kill="false" image="localhost/pcmktest:app" monitor_cmd="/bin/true" mount_points="/var/log/pacemaker/bundles/app-bundle-1" reuse="false" run_cmd="/usr/sbin/pacemaker-remoted" run_opts=" -e PCMK_stderr=1 -e PCMK_remote_port=3121 -v /etc/pacemaker/authkey:/etc/pacemaker/authkey -v /var/log/pacemaker/bundles/app-bundle-1:/var/log -p 3121:3121 "/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="102" operation="start" operation_key="app-bundle-podman-1_start_0" on_node="node3" on_node_uuid="3"/>
+ <rsc_op id="103" operation="start" operation_key="app-bundle-podman-1_start_0" on_node="node3" on_node_uuid="3"/>
</trigger>
</inputs>
</synapse>
- <synapse id="73">
+ <synapse id="79">
<action_set>
- <rsc_op id="102" operation="start" operation_key="app-bundle-podman-1_start_0" on_node="node3" on_node_uuid="3">
+ <rsc_op id="103" operation="start" operation_key="app-bundle-podman-1_start_0" on_node="node3" on_node_uuid="3">
<primitive id="app-bundle-podman-1" class="ocf" provider="heartbeat" type="podman"/>
<attributes CRM_meta_on_node="node3" CRM_meta_on_node_uuid="3" CRM_meta_timeout="20000" allow_pull="true" force_kill="false" image="localhost/pcmktest:app" monitor_cmd="/bin/true" mount_points="/var/log/pacemaker/bundles/app-bundle-1" reuse="false" run_cmd="/usr/sbin/pacemaker-remoted" run_opts=" -e PCMK_stderr=1 -e PCMK_remote_port=3121 -v /etc/pacemaker/authkey:/etc/pacemaker/authkey -v /var/log/pacemaker/bundles/app-bundle-1:/var/log -p 3121:3121 "/>
</rsc_op>
@@ -1215,14 +1328,14 @@
<rsc_op id="58" operation="monitor" operation_key="app-bundle-podman-1_monitor_0" on_node="node5" on_node_uuid="5"/>
</trigger>
<trigger>
- <rsc_op id="82" operation="start" operation_key="base:1_start_0" on_node="base-bundle-1" on_node_uuid="base-bundle-1" router_node="node3"/>
+ <rsc_op id="83" operation="start" operation_key="base:1_start_0" on_node="base-bundle-1" on_node_uuid="base-bundle-1" router_node="node3"/>
</trigger>
<trigger>
- <pseudo_event id="110" operation="start" operation_key="app-bundle_start_0"/>
+ <pseudo_event id="111" operation="start" operation_key="app-bundle_start_0"/>
</trigger>
</inputs>
</synapse>
- <synapse id="74">
+ <synapse id="80">
<action_set>
<rsc_op id="58" operation="monitor" operation_key="app-bundle-podman-1_monitor_0" on_node="node5" on_node_uuid="5">
<primitive id="app-bundle-podman-1" class="ocf" provider="heartbeat" type="podman"/>
@@ -1231,7 +1344,7 @@
</action_set>
<inputs/>
</synapse>
- <synapse id="75">
+ <synapse id="81">
<action_set>
<rsc_op id="46" operation="monitor" operation_key="app-bundle-podman-1_monitor_0" on_node="node4" on_node_uuid="4">
<primitive id="app-bundle-podman-1" class="ocf" provider="heartbeat" type="podman"/>
@@ -1240,7 +1353,7 @@
</action_set>
<inputs/>
</synapse>
- <synapse id="76">
+ <synapse id="82">
<action_set>
<rsc_op id="34" operation="monitor" operation_key="app-bundle-podman-1_monitor_0" on_node="node3" on_node_uuid="3">
<primitive id="app-bundle-podman-1" class="ocf" provider="heartbeat" type="podman"/>
@@ -1249,7 +1362,7 @@
</action_set>
<inputs/>
</synapse>
- <synapse id="77">
+ <synapse id="83">
<action_set>
<rsc_op id="22" operation="monitor" operation_key="app-bundle-podman-1_monitor_0" on_node="node2" on_node_uuid="2">
<primitive id="app-bundle-podman-1" class="ocf" provider="heartbeat" type="podman"/>
@@ -1258,7 +1371,7 @@
</action_set>
<inputs/>
</synapse>
- <synapse id="78">
+ <synapse id="84">
<action_set>
<rsc_op id="10" operation="monitor" operation_key="app-bundle-podman-1_monitor_0" on_node="node1" on_node_uuid="1">
<primitive id="app-bundle-podman-1" class="ocf" provider="heartbeat" type="podman"/>
@@ -1267,22 +1380,22 @@
</action_set>
<inputs/>
</synapse>
- <synapse id="79">
+ <synapse id="85">
<action_set>
- <rsc_op id="105" operation="monitor" operation_key="app-bundle-1_monitor_30000" on_node="node3" on_node_uuid="3">
+ <rsc_op id="106" operation="monitor" operation_key="app-bundle-1_monitor_30000" on_node="node3" on_node_uuid="3">
<primitive id="app-bundle-1" class="ocf" provider="pacemaker" type="remote"/>
<attributes CRM_meta_container="app-bundle-podman-1" CRM_meta_interval="30000" CRM_meta_name="monitor" CRM_meta_on_node="node3" CRM_meta_on_node_uuid="3" CRM_meta_timeout="30000" addr="node3" port="3121"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="104" operation="start" operation_key="app-bundle-1_start_0" on_node="node3" on_node_uuid="3"/>
+ <rsc_op id="105" operation="start" operation_key="app-bundle-1_start_0" on_node="node3" on_node_uuid="3"/>
</trigger>
</inputs>
</synapse>
- <synapse id="80">
+ <synapse id="86">
<action_set>
- <rsc_op id="104" operation="start" operation_key="app-bundle-1_start_0" on_node="node3" on_node_uuid="3">
+ <rsc_op id="105" operation="start" operation_key="app-bundle-1_start_0" on_node="node3" on_node_uuid="3">
<primitive id="app-bundle-1" class="ocf" provider="pacemaker" type="remote"/>
<attributes CRM_meta_container="app-bundle-podman-1" CRM_meta_on_node="node3" CRM_meta_on_node_uuid="3" CRM_meta_timeout="20000" addr="node3" port="3121"/>
</rsc_op>
@@ -1304,11 +1417,11 @@
<rsc_op id="59" operation="monitor" operation_key="app-bundle-1_monitor_0" on_node="node5" on_node_uuid="5"/>
</trigger>
<trigger>
- <rsc_op id="102" operation="start" operation_key="app-bundle-podman-1_start_0" on_node="node3" on_node_uuid="3"/>
+ <rsc_op id="103" operation="start" operation_key="app-bundle-podman-1_start_0" on_node="node3" on_node_uuid="3"/>
</trigger>
</inputs>
</synapse>
- <synapse id="81">
+ <synapse id="87">
<action_set>
<rsc_op id="59" operation="monitor" operation_key="app-bundle-1_monitor_0" on_node="node5" on_node_uuid="5">
<primitive id="app-bundle-1" class="ocf" provider="pacemaker" type="remote"/>
@@ -1317,11 +1430,11 @@
</action_set>
<inputs>
<trigger>
- <rsc_op id="102" operation="start" operation_key="app-bundle-podman-1_start_0" on_node="node3" on_node_uuid="3"/>
+ <rsc_op id="103" operation="start" operation_key="app-bundle-podman-1_start_0" on_node="node3" on_node_uuid="3"/>
</trigger>
</inputs>
</synapse>
- <synapse id="82">
+ <synapse id="88">
<action_set>
<rsc_op id="47" operation="monitor" operation_key="app-bundle-1_monitor_0" on_node="node4" on_node_uuid="4">
<primitive id="app-bundle-1" class="ocf" provider="pacemaker" type="remote"/>
@@ -1330,11 +1443,11 @@
</action_set>
<inputs>
<trigger>
- <rsc_op id="102" operation="start" operation_key="app-bundle-podman-1_start_0" on_node="node3" on_node_uuid="3"/>
+ <rsc_op id="103" operation="start" operation_key="app-bundle-podman-1_start_0" on_node="node3" on_node_uuid="3"/>
</trigger>
</inputs>
</synapse>
- <synapse id="83">
+ <synapse id="89">
<action_set>
<rsc_op id="35" operation="monitor" operation_key="app-bundle-1_monitor_0" on_node="node3" on_node_uuid="3">
<primitive id="app-bundle-1" class="ocf" provider="pacemaker" type="remote"/>
@@ -1343,11 +1456,11 @@
</action_set>
<inputs>
<trigger>
- <rsc_op id="102" operation="start" operation_key="app-bundle-podman-1_start_0" on_node="node3" on_node_uuid="3"/>
+ <rsc_op id="103" operation="start" operation_key="app-bundle-podman-1_start_0" on_node="node3" on_node_uuid="3"/>
</trigger>
</inputs>
</synapse>
- <synapse id="84">
+ <synapse id="90">
<action_set>
<rsc_op id="23" operation="monitor" operation_key="app-bundle-1_monitor_0" on_node="node2" on_node_uuid="2">
<primitive id="app-bundle-1" class="ocf" provider="pacemaker" type="remote"/>
@@ -1356,11 +1469,11 @@
</action_set>
<inputs>
<trigger>
- <rsc_op id="102" operation="start" operation_key="app-bundle-podman-1_start_0" on_node="node3" on_node_uuid="3"/>
+ <rsc_op id="103" operation="start" operation_key="app-bundle-podman-1_start_0" on_node="node3" on_node_uuid="3"/>
</trigger>
</inputs>
</synapse>
- <synapse id="85">
+ <synapse id="91">
<action_set>
<rsc_op id="11" operation="monitor" operation_key="app-bundle-1_monitor_0" on_node="node1" on_node_uuid="1">
<primitive id="app-bundle-1" class="ocf" provider="pacemaker" type="remote"/>
@@ -1369,26 +1482,26 @@
</action_set>
<inputs>
<trigger>
- <rsc_op id="102" operation="start" operation_key="app-bundle-podman-1_start_0" on_node="node3" on_node_uuid="3"/>
+ <rsc_op id="103" operation="start" operation_key="app-bundle-podman-1_start_0" on_node="node3" on_node_uuid="3"/>
</trigger>
</inputs>
</synapse>
- <synapse id="86">
+ <synapse id="92">
<action_set>
- <rsc_op id="107" operation="monitor" operation_key="app-bundle-podman-2_monitor_60000" on_node="node4" on_node_uuid="4">
+ <rsc_op id="108" operation="monitor" operation_key="app-bundle-podman-2_monitor_60000" on_node="node4" on_node_uuid="4">
<primitive id="app-bundle-podman-2" class="ocf" provider="heartbeat" type="podman"/>
<attributes CRM_meta_interval="60000" CRM_meta_name="monitor" CRM_meta_on_node="node4" CRM_meta_on_node_uuid="4" CRM_meta_timeout="20000" allow_pull="true" force_kill="false" image="localhost/pcmktest:app" monitor_cmd="/bin/true" mount_points="/var/log/pacemaker/bundles/app-bundle-2" reuse="false" run_cmd="/usr/sbin/pacemaker-remoted" run_opts=" -e PCMK_stderr=1 -e PCMK_remote_port=3121 -v /etc/pacemaker/authkey:/etc/pacemaker/authkey -v /var/log/pacemaker/bundles/app-bundle-2:/var/log -p 3121:3121 "/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="106" operation="start" operation_key="app-bundle-podman-2_start_0" on_node="node4" on_node_uuid="4"/>
+ <rsc_op id="107" operation="start" operation_key="app-bundle-podman-2_start_0" on_node="node4" on_node_uuid="4"/>
</trigger>
</inputs>
</synapse>
- <synapse id="87">
+ <synapse id="93">
<action_set>
- <rsc_op id="106" operation="start" operation_key="app-bundle-podman-2_start_0" on_node="node4" on_node_uuid="4">
+ <rsc_op id="107" operation="start" operation_key="app-bundle-podman-2_start_0" on_node="node4" on_node_uuid="4">
<primitive id="app-bundle-podman-2" class="ocf" provider="heartbeat" type="podman"/>
<attributes CRM_meta_on_node="node4" CRM_meta_on_node_uuid="4" CRM_meta_timeout="20000" allow_pull="true" force_kill="false" image="localhost/pcmktest:app" monitor_cmd="/bin/true" mount_points="/var/log/pacemaker/bundles/app-bundle-2" reuse="false" run_cmd="/usr/sbin/pacemaker-remoted" run_opts=" -e PCMK_stderr=1 -e PCMK_remote_port=3121 -v /etc/pacemaker/authkey:/etc/pacemaker/authkey -v /var/log/pacemaker/bundles/app-bundle-2:/var/log -p 3121:3121 "/>
</rsc_op>
@@ -1416,14 +1529,14 @@
<rsc_op id="60" operation="monitor" operation_key="app-bundle-podman-2_monitor_0" on_node="node5" on_node_uuid="5"/>
</trigger>
<trigger>
- <rsc_op id="84" operation="start" operation_key="base:2_start_0" on_node="base-bundle-2" on_node_uuid="base-bundle-2" router_node="node4"/>
+ <rsc_op id="80" operation="start" operation_key="base:2_start_0" on_node="base-bundle-2" on_node_uuid="base-bundle-2" router_node="node4"/>
</trigger>
<trigger>
- <pseudo_event id="110" operation="start" operation_key="app-bundle_start_0"/>
+ <pseudo_event id="111" operation="start" operation_key="app-bundle_start_0"/>
</trigger>
</inputs>
</synapse>
- <synapse id="88">
+ <synapse id="94">
<action_set>
<rsc_op id="60" operation="monitor" operation_key="app-bundle-podman-2_monitor_0" on_node="node5" on_node_uuid="5">
<primitive id="app-bundle-podman-2" class="ocf" provider="heartbeat" type="podman"/>
@@ -1432,7 +1545,7 @@
</action_set>
<inputs/>
</synapse>
- <synapse id="89">
+ <synapse id="95">
<action_set>
<rsc_op id="48" operation="monitor" operation_key="app-bundle-podman-2_monitor_0" on_node="node4" on_node_uuid="4">
<primitive id="app-bundle-podman-2" class="ocf" provider="heartbeat" type="podman"/>
@@ -1441,7 +1554,7 @@
</action_set>
<inputs/>
</synapse>
- <synapse id="90">
+ <synapse id="96">
<action_set>
<rsc_op id="36" operation="monitor" operation_key="app-bundle-podman-2_monitor_0" on_node="node3" on_node_uuid="3">
<primitive id="app-bundle-podman-2" class="ocf" provider="heartbeat" type="podman"/>
@@ -1450,7 +1563,7 @@
</action_set>
<inputs/>
</synapse>
- <synapse id="91">
+ <synapse id="97">
<action_set>
<rsc_op id="24" operation="monitor" operation_key="app-bundle-podman-2_monitor_0" on_node="node2" on_node_uuid="2">
<primitive id="app-bundle-podman-2" class="ocf" provider="heartbeat" type="podman"/>
@@ -1459,7 +1572,7 @@
</action_set>
<inputs/>
</synapse>
- <synapse id="92">
+ <synapse id="98">
<action_set>
<rsc_op id="12" operation="monitor" operation_key="app-bundle-podman-2_monitor_0" on_node="node1" on_node_uuid="1">
<primitive id="app-bundle-podman-2" class="ocf" provider="heartbeat" type="podman"/>
@@ -1468,22 +1581,22 @@
</action_set>
<inputs/>
</synapse>
- <synapse id="93">
+ <synapse id="99">
<action_set>
- <rsc_op id="109" operation="monitor" operation_key="app-bundle-2_monitor_30000" on_node="node4" on_node_uuid="4">
+ <rsc_op id="110" operation="monitor" operation_key="app-bundle-2_monitor_30000" on_node="node4" on_node_uuid="4">
<primitive id="app-bundle-2" class="ocf" provider="pacemaker" type="remote"/>
<attributes CRM_meta_container="app-bundle-podman-2" CRM_meta_interval="30000" CRM_meta_name="monitor" CRM_meta_on_node="node4" CRM_meta_on_node_uuid="4" CRM_meta_timeout="30000" addr="node4" port="3121"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="108" operation="start" operation_key="app-bundle-2_start_0" on_node="node4" on_node_uuid="4"/>
+ <rsc_op id="109" operation="start" operation_key="app-bundle-2_start_0" on_node="node4" on_node_uuid="4"/>
</trigger>
</inputs>
</synapse>
- <synapse id="94">
+ <synapse id="100">
<action_set>
- <rsc_op id="108" operation="start" operation_key="app-bundle-2_start_0" on_node="node4" on_node_uuid="4">
+ <rsc_op id="109" operation="start" operation_key="app-bundle-2_start_0" on_node="node4" on_node_uuid="4">
<primitive id="app-bundle-2" class="ocf" provider="pacemaker" type="remote"/>
<attributes CRM_meta_container="app-bundle-podman-2" CRM_meta_on_node="node4" CRM_meta_on_node_uuid="4" CRM_meta_timeout="20000" addr="node4" port="3121"/>
</rsc_op>
@@ -1505,11 +1618,11 @@
<rsc_op id="61" operation="monitor" operation_key="app-bundle-2_monitor_0" on_node="node5" on_node_uuid="5"/>
</trigger>
<trigger>
- <rsc_op id="106" operation="start" operation_key="app-bundle-podman-2_start_0" on_node="node4" on_node_uuid="4"/>
+ <rsc_op id="107" operation="start" operation_key="app-bundle-podman-2_start_0" on_node="node4" on_node_uuid="4"/>
</trigger>
</inputs>
</synapse>
- <synapse id="95">
+ <synapse id="101">
<action_set>
<rsc_op id="61" operation="monitor" operation_key="app-bundle-2_monitor_0" on_node="node5" on_node_uuid="5">
<primitive id="app-bundle-2" class="ocf" provider="pacemaker" type="remote"/>
@@ -1518,11 +1631,11 @@
</action_set>
<inputs>
<trigger>
- <rsc_op id="106" operation="start" operation_key="app-bundle-podman-2_start_0" on_node="node4" on_node_uuid="4"/>
+ <rsc_op id="107" operation="start" operation_key="app-bundle-podman-2_start_0" on_node="node4" on_node_uuid="4"/>
</trigger>
</inputs>
</synapse>
- <synapse id="96">
+ <synapse id="102">
<action_set>
<rsc_op id="49" operation="monitor" operation_key="app-bundle-2_monitor_0" on_node="node4" on_node_uuid="4">
<primitive id="app-bundle-2" class="ocf" provider="pacemaker" type="remote"/>
@@ -1531,11 +1644,11 @@
</action_set>
<inputs>
<trigger>
- <rsc_op id="106" operation="start" operation_key="app-bundle-podman-2_start_0" on_node="node4" on_node_uuid="4"/>
+ <rsc_op id="107" operation="start" operation_key="app-bundle-podman-2_start_0" on_node="node4" on_node_uuid="4"/>
</trigger>
</inputs>
</synapse>
- <synapse id="97">
+ <synapse id="103">
<action_set>
<rsc_op id="37" operation="monitor" operation_key="app-bundle-2_monitor_0" on_node="node3" on_node_uuid="3">
<primitive id="app-bundle-2" class="ocf" provider="pacemaker" type="remote"/>
@@ -1544,11 +1657,11 @@
</action_set>
<inputs>
<trigger>
- <rsc_op id="106" operation="start" operation_key="app-bundle-podman-2_start_0" on_node="node4" on_node_uuid="4"/>
+ <rsc_op id="107" operation="start" operation_key="app-bundle-podman-2_start_0" on_node="node4" on_node_uuid="4"/>
</trigger>
</inputs>
</synapse>
- <synapse id="98">
+ <synapse id="104">
<action_set>
<rsc_op id="25" operation="monitor" operation_key="app-bundle-2_monitor_0" on_node="node2" on_node_uuid="2">
<primitive id="app-bundle-2" class="ocf" provider="pacemaker" type="remote"/>
@@ -1557,11 +1670,11 @@
</action_set>
<inputs>
<trigger>
- <rsc_op id="106" operation="start" operation_key="app-bundle-podman-2_start_0" on_node="node4" on_node_uuid="4"/>
+ <rsc_op id="107" operation="start" operation_key="app-bundle-podman-2_start_0" on_node="node4" on_node_uuid="4"/>
</trigger>
</inputs>
</synapse>
- <synapse id="99">
+ <synapse id="105">
<action_set>
<rsc_op id="13" operation="monitor" operation_key="app-bundle-2_monitor_0" on_node="node1" on_node_uuid="1">
<primitive id="app-bundle-2" class="ocf" provider="pacemaker" type="remote"/>
@@ -1570,34 +1683,61 @@
</action_set>
<inputs>
<trigger>
- <rsc_op id="106" operation="start" operation_key="app-bundle-podman-2_start_0" on_node="node4" on_node_uuid="4"/>
+ <rsc_op id="107" operation="start" operation_key="app-bundle-podman-2_start_0" on_node="node4" on_node_uuid="4"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="106" priority="1000000">
+ <action_set>
+ <pseudo_event id="131" operation="promoted" operation_key="app-bundle_promoted_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="127" operation="promoted" operation_key="app-bundle-clone_promoted_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="107">
+ <action_set>
+ <pseudo_event id="130" operation="promote" operation_key="app-bundle_promote_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="96" operation="promoted" operation_key="base-bundle_promoted_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="112" operation="running" operation_key="app-bundle_running_0"/>
</trigger>
</inputs>
</synapse>
- <synapse id="100" priority="1000000">
+ <synapse id="108" priority="1000000">
<action_set>
- <pseudo_event id="111" operation="running" operation_key="app-bundle_running_0">
+ <pseudo_event id="112" operation="running" operation_key="app-bundle_running_0">
<attributes CRM_meta_timeout="20000" />
</pseudo_event>
</action_set>
<inputs>
<trigger>
- <rsc_op id="98" operation="start" operation_key="app-bundle-podman-0_start_0" on_node="node2" on_node_uuid="2"/>
+ <rsc_op id="99" operation="start" operation_key="app-bundle-podman-0_start_0" on_node="node2" on_node_uuid="2"/>
</trigger>
<trigger>
- <rsc_op id="102" operation="start" operation_key="app-bundle-podman-1_start_0" on_node="node3" on_node_uuid="3"/>
+ <rsc_op id="103" operation="start" operation_key="app-bundle-podman-1_start_0" on_node="node3" on_node_uuid="3"/>
</trigger>
<trigger>
- <rsc_op id="106" operation="start" operation_key="app-bundle-podman-2_start_0" on_node="node4" on_node_uuid="4"/>
+ <rsc_op id="107" operation="start" operation_key="app-bundle-podman-2_start_0" on_node="node4" on_node_uuid="4"/>
</trigger>
<trigger>
- <pseudo_event id="121" operation="running" operation_key="app-bundle-clone_running_0"/>
+ <pseudo_event id="123" operation="running" operation_key="app-bundle-clone_running_0"/>
</trigger>
</inputs>
</synapse>
- <synapse id="101">
+ <synapse id="109">
<action_set>
- <pseudo_event id="110" operation="start" operation_key="app-bundle_start_0">
+ <pseudo_event id="111" operation="start" operation_key="app-bundle_start_0">
<attributes CRM_meta_timeout="20000" />
</pseudo_event>
</action_set>
@@ -1607,7 +1747,31 @@
</trigger>
</inputs>
</synapse>
- <synapse id="102" priority="1000000">
+ <synapse id="110" priority="1000000">
+ <action_set>
+ <pseudo_event id="96" operation="promoted" operation_key="base-bundle_promoted_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="92" operation="promoted" operation_key="base-bundle-clone_promoted_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="111">
+ <action_set>
+ <pseudo_event id="95" operation="promote" operation_key="base-bundle_promote_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="77" operation="running" operation_key="base-bundle_running_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="112" priority="1000000">
<action_set>
<pseudo_event id="77" operation="running" operation_key="base-bundle_running_0">
<attributes CRM_meta_timeout="20000" />
@@ -1624,11 +1788,11 @@
<rsc_op id="72" operation="start" operation_key="base-bundle-podman-2_start_0" on_node="node4" on_node_uuid="4"/>
</trigger>
<trigger>
- <pseudo_event id="87" operation="running" operation_key="base-bundle-clone_running_0"/>
+ <pseudo_event id="88" operation="running" operation_key="base-bundle-clone_running_0"/>
</trigger>
</inputs>
</synapse>
- <synapse id="103">
+ <synapse id="113">
<action_set>
<pseudo_event id="76" operation="start" operation_key="base-bundle_start_0">
<attributes CRM_meta_timeout="20000" />
diff --git a/cts/scheduler/exp/bundle-nested-colocation.exp b/cts/scheduler/exp/bundle-nested-colocation.exp
index 025699c..ec7a71f 100644
--- a/cts/scheduler/exp/bundle-nested-colocation.exp
+++ b/cts/scheduler/exp/bundle-nested-colocation.exp
@@ -177,6 +177,9 @@
<rsc_op id="47" operation="start" operation_key="rabbitmq-bundle-2_start_0" on_node="overcloud-controller-2" on_node_uuid="3"/>
</trigger>
<trigger>
+ <rsc_op id="53" operation="start" operation_key="rabbitmq:0_start_0" on_node="rabbitmq-bundle-0" on_node_uuid="rabbitmq-bundle-0" router_node="overcloud-controller-0"/>
+ </trigger>
+ <trigger>
<rsc_op id="55" operation="start" operation_key="rabbitmq:1_start_0" on_node="rabbitmq-bundle-1" on_node_uuid="rabbitmq-bundle-1" router_node="overcloud-controller-1"/>
</trigger>
<trigger>
diff --git a/cts/scheduler/exp/bundle-order-fencing.exp b/cts/scheduler/exp/bundle-order-fencing.exp
index 3149204..a47bd4f 100644
--- a/cts/scheduler/exp/bundle-order-fencing.exp
+++ b/cts/scheduler/exp/bundle-order-fencing.exp
@@ -1,104 +1,104 @@
<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0">
- <synapse id="0" priority="1000000">
+ <synapse id="0">
<action_set>
- <pseudo_event id="217" operation="notified" operation_key="rabbitmq_notified_0" internal_operation_key="rabbitmq:0_confirmed-post_notify_stonith_0">
- <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="true" CRM_meta_notify_key_operation="stonith" CRM_meta_notify_key_type="confirmed-post" CRM_meta_notify_operation="stop" CRM_meta_notify_type="post" CRM_meta_timeout="20000" set_policy="ha-all ^(?!amq\.).* {&quot;ha-mode&quot;:&quot;all&quot;}"/>
- </pseudo_event>
+ <rsc_op id="241" operation="notify" operation_key="rabbitmq_pre_notify_stop_0" internal_operation_key="rabbitmq:1_pre_notify_stop_0" on_node="rabbitmq-bundle-1" on_node_uuid="rabbitmq-bundle-1" router_node="controller-1">
+ <primitive id="rabbitmq" long-id="rabbitmq:1" class="ocf" provider="heartbeat" type="rabbitmq-cluster"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_notify="true" CRM_meta_notify_active_resource="rabbitmq:0 rabbitmq:1 rabbitmq:2" CRM_meta_notify_active_uname="rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="controller-0 controller-1 controller-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource=" " CRM_meta_notify_key_operation="stop" CRM_meta_notify_key_type="pre" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="stop" CRM_meta_notify_promote_resource=" " CRM_meta_notify_promote_uname=" " CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource=" " CRM_meta_notify_slave_uname=" " CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource="rabbitmq:0" CRM_meta_notify_stop_uname="rabbitmq-bundle-0" CRM_meta_notify_type="pre" CRM_meta_notify_unpromoted_resource=" " CRM_meta_notify_unpromoted_uname=" " CRM_meta_on_node="rabbitmq-bundle-1" CRM_meta_on_node_uuid="rabbitmq-bundle-1" CRM_meta_physical_host="controller-1" CRM_meta_timeout="20000" set_policy="ha-all ^(?!amq\.).* {&quot;ha-mode&quot;:&quot;all&quot;}"/>
+ </rsc_op>
</action_set>
<inputs>
<trigger>
- <pseudo_event id="216" operation="notify" operation_key="rabbitmq_post_notify_stop_0" internal_operation_key="rabbitmq:0_post_notify_stonith_0"/>
- </trigger>
- <trigger>
- <rsc_op id="218" operation="notify" operation_key="rabbitmq_post_notify_stop_0" internal_operation_key="rabbitmq:1_post_notify_stop_0" on_node="rabbitmq-bundle-1" on_node_uuid="rabbitmq-bundle-1" router_node="controller-1"/>
- </trigger>
- <trigger>
- <rsc_op id="219" operation="notify" operation_key="rabbitmq_post_notify_stop_0" internal_operation_key="rabbitmq:2_post_notify_stop_0" on_node="rabbitmq-bundle-2" on_node_uuid="rabbitmq-bundle-2" router_node="controller-2"/>
+ <pseudo_event id="72" operation="notify" operation_key="rabbitmq-bundle-clone_pre_notify_stop_0"/>
</trigger>
</inputs>
</synapse>
<synapse id="1" priority="1000000">
<action_set>
- <pseudo_event id="216" operation="notify" operation_key="rabbitmq_post_notify_stop_0" internal_operation_key="rabbitmq:0_post_notify_stonith_0">
- <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="true" CRM_meta_notify_key_operation="stonith" CRM_meta_notify_key_type="post" CRM_meta_notify_operation="stop" CRM_meta_notify_type="post" CRM_meta_timeout="20000" set_policy="ha-all ^(?!amq\.).* {&quot;ha-mode&quot;:&quot;all&quot;}"/>
- </pseudo_event>
+ <rsc_op id="218" operation="notify" operation_key="rabbitmq_post_notify_stop_0" internal_operation_key="rabbitmq:1_post_notify_stop_0" on_node="rabbitmq-bundle-1" on_node_uuid="rabbitmq-bundle-1" router_node="controller-1">
+ <primitive id="rabbitmq" long-id="rabbitmq:1" class="ocf" provider="heartbeat" type="rabbitmq-cluster"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_notify="true" CRM_meta_notify_active_resource="rabbitmq:0 rabbitmq:1 rabbitmq:2" CRM_meta_notify_active_uname="rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="controller-0 controller-1 controller-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource=" " CRM_meta_notify_key_operation="stonith" CRM_meta_notify_key_type="post" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="stop" CRM_meta_notify_promote_resource=" " CRM_meta_notify_promote_uname=" " CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource=" " CRM_meta_notify_slave_uname=" " CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource="rabbitmq:0" CRM_meta_notify_stop_uname="rabbitmq-bundle-0" CRM_meta_notify_type="post" CRM_meta_notify_unpromoted_resource=" " CRM_meta_notify_unpromoted_uname=" " CRM_meta_on_node="rabbitmq-bundle-1" CRM_meta_on_node_uuid="rabbitmq-bundle-1" CRM_meta_physical_host="controller-1" CRM_meta_timeout="20000" set_policy="ha-all ^(?!amq\.).* {&quot;ha-mode&quot;:&quot;all&quot;}"/>
+ </rsc_op>
</action_set>
<inputs>
<trigger>
- <pseudo_event id="215" operation="stonith" operation_key="stonith-rabbitmq-bundle-0-off" on_node="rabbitmq-bundle-0" on_node_uuid="rabbitmq-bundle-0"/>
+ <pseudo_event id="74" operation="notify" operation_key="rabbitmq-bundle-clone_post_notify_stopped_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="216" operation="notify" operation_key="rabbitmq_post_notify_stop_0" internal_operation_key="rabbitmq:0_post_notify_stonith_0"/>
</trigger>
</inputs>
</synapse>
<synapse id="2">
<action_set>
- <pseudo_event id="61" operation="stop" operation_key="rabbitmq_stop_0" internal_operation_key="rabbitmq:0_stop_0">
- <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_name="stop" CRM_meta_notify="true" CRM_meta_notify_active_resource="rabbitmq:0 rabbitmq:1 rabbitmq:2" CRM_meta_notify_active_uname="rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="controller-0 controller-1 controller-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource=" " CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_promote_resource=" " CRM_meta_notify_promote_uname=" " CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource=" " CRM_meta_notify_slave_uname=" " CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource="rabbitmq:0" CRM_meta_notify_stop_uname="rabbitmq-bundle-0" CRM_meta_notify_unpromoted_resource=" " CRM_meta_notify_unpromoted_uname=" " CRM_meta_physical_host="controller-0" CRM_meta_timeout="200000" set_policy="ha-all ^(?!amq\.).* {&quot;ha-mode&quot;:&quot;all&quot;}"/>
- </pseudo_event>
+ <rsc_op id="242" operation="notify" operation_key="rabbitmq_pre_notify_stop_0" internal_operation_key="rabbitmq:2_pre_notify_stop_0" on_node="rabbitmq-bundle-2" on_node_uuid="rabbitmq-bundle-2" router_node="controller-2">
+ <primitive id="rabbitmq" long-id="rabbitmq:2" class="ocf" provider="heartbeat" type="rabbitmq-cluster"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_notify="true" CRM_meta_notify_active_resource="rabbitmq:0 rabbitmq:1 rabbitmq:2" CRM_meta_notify_active_uname="rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="controller-0 controller-1 controller-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource=" " CRM_meta_notify_key_operation="stop" CRM_meta_notify_key_type="pre" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="stop" CRM_meta_notify_promote_resource=" " CRM_meta_notify_promote_uname=" " CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource=" " CRM_meta_notify_slave_uname=" " CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource="rabbitmq:0" CRM_meta_notify_stop_uname="rabbitmq-bundle-0" CRM_meta_notify_type="pre" CRM_meta_notify_unpromoted_resource=" " CRM_meta_notify_unpromoted_uname=" " CRM_meta_on_node="rabbitmq-bundle-2" CRM_meta_on_node_uuid="rabbitmq-bundle-2" CRM_meta_physical_host="controller-2" CRM_meta_timeout="20000" set_policy="ha-all ^(?!amq\.).* {&quot;ha-mode&quot;:&quot;all&quot;}"/>
+ </rsc_op>
</action_set>
<inputs>
<trigger>
- <pseudo_event id="55" operation="stop" operation_key="rabbitmq-bundle_stop_0"/>
- </trigger>
- <trigger>
- <pseudo_event id="66" operation="stop" operation_key="rabbitmq-bundle-clone_stop_0"/>
+ <pseudo_event id="72" operation="notify" operation_key="rabbitmq-bundle-clone_pre_notify_stop_0"/>
</trigger>
</inputs>
</synapse>
- <synapse id="3">
+ <synapse id="3" priority="1000000">
<action_set>
- <rsc_op id="241" operation="notify" operation_key="rabbitmq_pre_notify_stop_0" internal_operation_key="rabbitmq:1_pre_notify_stop_0" on_node="rabbitmq-bundle-1" on_node_uuid="rabbitmq-bundle-1" router_node="controller-1">
- <primitive id="rabbitmq" long-id="rabbitmq:1" class="ocf" provider="heartbeat" type="rabbitmq-cluster"/>
- <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_notify="true" CRM_meta_notify_active_resource="rabbitmq:0 rabbitmq:1 rabbitmq:2" CRM_meta_notify_active_uname="rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="controller-0 controller-1 controller-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource=" " CRM_meta_notify_key_operation="stop" CRM_meta_notify_key_type="pre" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="stop" CRM_meta_notify_promote_resource=" " CRM_meta_notify_promote_uname=" " CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource=" " CRM_meta_notify_slave_uname=" " CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource="rabbitmq:0" CRM_meta_notify_stop_uname="rabbitmq-bundle-0" CRM_meta_notify_type="pre" CRM_meta_notify_unpromoted_resource=" " CRM_meta_notify_unpromoted_uname=" " CRM_meta_on_node="rabbitmq-bundle-1" CRM_meta_on_node_uuid="rabbitmq-bundle-1" CRM_meta_physical_host="controller-1" CRM_meta_timeout="20000" set_policy="ha-all ^(?!amq\.).* {&quot;ha-mode&quot;:&quot;all&quot;}"/>
+ <rsc_op id="219" operation="notify" operation_key="rabbitmq_post_notify_stop_0" internal_operation_key="rabbitmq:2_post_notify_stop_0" on_node="rabbitmq-bundle-2" on_node_uuid="rabbitmq-bundle-2" router_node="controller-2">
+ <primitive id="rabbitmq" long-id="rabbitmq:2" class="ocf" provider="heartbeat" type="rabbitmq-cluster"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_notify="true" CRM_meta_notify_active_resource="rabbitmq:0 rabbitmq:1 rabbitmq:2" CRM_meta_notify_active_uname="rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="controller-0 controller-1 controller-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource=" " CRM_meta_notify_key_operation="stonith" CRM_meta_notify_key_type="post" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="stop" CRM_meta_notify_promote_resource=" " CRM_meta_notify_promote_uname=" " CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource=" " CRM_meta_notify_slave_uname=" " CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource="rabbitmq:0" CRM_meta_notify_stop_uname="rabbitmq-bundle-0" CRM_meta_notify_type="post" CRM_meta_notify_unpromoted_resource=" " CRM_meta_notify_unpromoted_uname=" " CRM_meta_on_node="rabbitmq-bundle-2" CRM_meta_on_node_uuid="rabbitmq-bundle-2" CRM_meta_physical_host="controller-2" CRM_meta_timeout="20000" set_policy="ha-all ^(?!amq\.).* {&quot;ha-mode&quot;:&quot;all&quot;}"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <pseudo_event id="72" operation="notify" operation_key="rabbitmq-bundle-clone_pre_notify_stop_0"/>
+ <pseudo_event id="74" operation="notify" operation_key="rabbitmq-bundle-clone_post_notify_stopped_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="216" operation="notify" operation_key="rabbitmq_post_notify_stop_0" internal_operation_key="rabbitmq:0_post_notify_stonith_0"/>
</trigger>
</inputs>
</synapse>
<synapse id="4" priority="1000000">
<action_set>
- <rsc_op id="218" operation="notify" operation_key="rabbitmq_post_notify_stop_0" internal_operation_key="rabbitmq:1_post_notify_stop_0" on_node="rabbitmq-bundle-1" on_node_uuid="rabbitmq-bundle-1" router_node="controller-1">
- <primitive id="rabbitmq" long-id="rabbitmq:1" class="ocf" provider="heartbeat" type="rabbitmq-cluster"/>
- <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_notify="true" CRM_meta_notify_active_resource="rabbitmq:0 rabbitmq:1 rabbitmq:2" CRM_meta_notify_active_uname="rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="controller-0 controller-1 controller-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource=" " CRM_meta_notify_key_operation="stonith" CRM_meta_notify_key_type="post" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="stop" CRM_meta_notify_promote_resource=" " CRM_meta_notify_promote_uname=" " CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource=" " CRM_meta_notify_slave_uname=" " CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource="rabbitmq:0" CRM_meta_notify_stop_uname="rabbitmq-bundle-0" CRM_meta_notify_type="post" CRM_meta_notify_unpromoted_resource=" " CRM_meta_notify_unpromoted_uname=" " CRM_meta_on_node="rabbitmq-bundle-1" CRM_meta_on_node_uuid="rabbitmq-bundle-1" CRM_meta_physical_host="controller-1" CRM_meta_timeout="20000" set_policy="ha-all ^(?!amq\.).* {&quot;ha-mode&quot;:&quot;all&quot;}"/>
- </rsc_op>
+ <pseudo_event id="217" operation="notified" operation_key="rabbitmq_notified_0" internal_operation_key="rabbitmq:0_confirmed-post_notify_stonith_0">
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="true" CRM_meta_notify_key_operation="stonith" CRM_meta_notify_key_type="confirmed-post" CRM_meta_notify_operation="stop" CRM_meta_notify_type="post" CRM_meta_timeout="20000" set_policy="ha-all ^(?!amq\.).* {&quot;ha-mode&quot;:&quot;all&quot;}"/>
+ </pseudo_event>
</action_set>
<inputs>
<trigger>
- <pseudo_event id="74" operation="notify" operation_key="rabbitmq-bundle-clone_post_notify_stopped_0"/>
+ <pseudo_event id="216" operation="notify" operation_key="rabbitmq_post_notify_stop_0" internal_operation_key="rabbitmq:0_post_notify_stonith_0"/>
</trigger>
<trigger>
- <pseudo_event id="216" operation="notify" operation_key="rabbitmq_post_notify_stop_0" internal_operation_key="rabbitmq:0_post_notify_stonith_0"/>
+ <rsc_op id="218" operation="notify" operation_key="rabbitmq_post_notify_stop_0" internal_operation_key="rabbitmq:1_post_notify_stop_0" on_node="rabbitmq-bundle-1" on_node_uuid="rabbitmq-bundle-1" router_node="controller-1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="219" operation="notify" operation_key="rabbitmq_post_notify_stop_0" internal_operation_key="rabbitmq:2_post_notify_stop_0" on_node="rabbitmq-bundle-2" on_node_uuid="rabbitmq-bundle-2" router_node="controller-2"/>
</trigger>
</inputs>
</synapse>
- <synapse id="5">
+ <synapse id="5" priority="1000000">
<action_set>
- <rsc_op id="242" operation="notify" operation_key="rabbitmq_pre_notify_stop_0" internal_operation_key="rabbitmq:2_pre_notify_stop_0" on_node="rabbitmq-bundle-2" on_node_uuid="rabbitmq-bundle-2" router_node="controller-2">
- <primitive id="rabbitmq" long-id="rabbitmq:2" class="ocf" provider="heartbeat" type="rabbitmq-cluster"/>
- <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_notify="true" CRM_meta_notify_active_resource="rabbitmq:0 rabbitmq:1 rabbitmq:2" CRM_meta_notify_active_uname="rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="controller-0 controller-1 controller-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource=" " CRM_meta_notify_key_operation="stop" CRM_meta_notify_key_type="pre" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="stop" CRM_meta_notify_promote_resource=" " CRM_meta_notify_promote_uname=" " CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource=" " CRM_meta_notify_slave_uname=" " CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource="rabbitmq:0" CRM_meta_notify_stop_uname="rabbitmq-bundle-0" CRM_meta_notify_type="pre" CRM_meta_notify_unpromoted_resource=" " CRM_meta_notify_unpromoted_uname=" " CRM_meta_on_node="rabbitmq-bundle-2" CRM_meta_on_node_uuid="rabbitmq-bundle-2" CRM_meta_physical_host="controller-2" CRM_meta_timeout="20000" set_policy="ha-all ^(?!amq\.).* {&quot;ha-mode&quot;:&quot;all&quot;}"/>
- </rsc_op>
+ <pseudo_event id="216" operation="notify" operation_key="rabbitmq_post_notify_stop_0" internal_operation_key="rabbitmq:0_post_notify_stonith_0">
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="true" CRM_meta_notify_key_operation="stonith" CRM_meta_notify_key_type="post" CRM_meta_notify_operation="stop" CRM_meta_notify_type="post" CRM_meta_timeout="20000" set_policy="ha-all ^(?!amq\.).* {&quot;ha-mode&quot;:&quot;all&quot;}"/>
+ </pseudo_event>
</action_set>
<inputs>
<trigger>
- <pseudo_event id="72" operation="notify" operation_key="rabbitmq-bundle-clone_pre_notify_stop_0"/>
+ <pseudo_event id="215" operation="stonith" operation_key="stonith-rabbitmq-bundle-0-off" on_node="rabbitmq-bundle-0" on_node_uuid="rabbitmq-bundle-0"/>
</trigger>
</inputs>
</synapse>
- <synapse id="6" priority="1000000">
+ <synapse id="6">
<action_set>
- <rsc_op id="219" operation="notify" operation_key="rabbitmq_post_notify_stop_0" internal_operation_key="rabbitmq:2_post_notify_stop_0" on_node="rabbitmq-bundle-2" on_node_uuid="rabbitmq-bundle-2" router_node="controller-2">
- <primitive id="rabbitmq" long-id="rabbitmq:2" class="ocf" provider="heartbeat" type="rabbitmq-cluster"/>
- <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_notify="true" CRM_meta_notify_active_resource="rabbitmq:0 rabbitmq:1 rabbitmq:2" CRM_meta_notify_active_uname="rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="controller-0 controller-1 controller-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource=" " CRM_meta_notify_key_operation="stonith" CRM_meta_notify_key_type="post" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="stop" CRM_meta_notify_promote_resource=" " CRM_meta_notify_promote_uname=" " CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource=" " CRM_meta_notify_slave_uname=" " CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource="rabbitmq:0" CRM_meta_notify_stop_uname="rabbitmq-bundle-0" CRM_meta_notify_type="post" CRM_meta_notify_unpromoted_resource=" " CRM_meta_notify_unpromoted_uname=" " CRM_meta_on_node="rabbitmq-bundle-2" CRM_meta_on_node_uuid="rabbitmq-bundle-2" CRM_meta_physical_host="controller-2" CRM_meta_timeout="20000" set_policy="ha-all ^(?!amq\.).* {&quot;ha-mode&quot;:&quot;all&quot;}"/>
- </rsc_op>
+ <pseudo_event id="61" operation="stop" operation_key="rabbitmq_stop_0" internal_operation_key="rabbitmq:0_stop_0">
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_name="stop" CRM_meta_notify="true" CRM_meta_notify_active_resource="rabbitmq:0 rabbitmq:1 rabbitmq:2" CRM_meta_notify_active_uname="rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="controller-0 controller-1 controller-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource=" " CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_promote_resource=" " CRM_meta_notify_promote_uname=" " CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource=" " CRM_meta_notify_slave_uname=" " CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource="rabbitmq:0" CRM_meta_notify_stop_uname="rabbitmq-bundle-0" CRM_meta_notify_unpromoted_resource=" " CRM_meta_notify_unpromoted_uname=" " CRM_meta_physical_host="controller-0" CRM_meta_timeout="200000" set_policy="ha-all ^(?!amq\.).* {&quot;ha-mode&quot;:&quot;all&quot;}"/>
+ </pseudo_event>
</action_set>
<inputs>
<trigger>
- <pseudo_event id="74" operation="notify" operation_key="rabbitmq-bundle-clone_post_notify_stopped_0"/>
+ <pseudo_event id="55" operation="stop" operation_key="rabbitmq-bundle_stop_0"/>
</trigger>
<trigger>
- <pseudo_event id="216" operation="notify" operation_key="rabbitmq_post_notify_stop_0" internal_operation_key="rabbitmq:0_post_notify_stonith_0"/>
+ <pseudo_event id="66" operation="stop" operation_key="rabbitmq-bundle-clone_stop_0"/>
</trigger>
</inputs>
</synapse>
@@ -566,66 +566,6 @@
</synapse>
<synapse id="43" priority="1000000">
<action_set>
- <pseudo_event id="222" operation="notified" operation_key="redis_notified_0" internal_operation_key="redis:0_confirmed-post_notify_stonith_0">
- <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_key_operation="stonith" CRM_meta_notify_key_type="confirmed-post" CRM_meta_notify_operation="stop" CRM_meta_notify_type="post" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" wait_last_known_master="true"/>
- </pseudo_event>
- </action_set>
- <inputs>
- <trigger>
- <pseudo_event id="221" operation="notify" operation_key="redis_post_notify_stop_0" internal_operation_key="redis:0_post_notify_stonith_0"/>
- </trigger>
- <trigger>
- <rsc_op id="223" operation="notify" operation_key="redis_post_notify_stop_0" internal_operation_key="redis:1_post_notify_stop_0" on_node="redis-bundle-1" on_node_uuid="redis-bundle-1" router_node="controller-1"/>
- </trigger>
- <trigger>
- <rsc_op id="224" operation="notify" operation_key="redis_post_notify_stop_0" internal_operation_key="redis:2_post_notify_stop_0" on_node="redis-bundle-2" on_node_uuid="redis-bundle-2" router_node="controller-2"/>
- </trigger>
- </inputs>
- </synapse>
- <synapse id="44" priority="1000000">
- <action_set>
- <pseudo_event id="221" operation="notify" operation_key="redis_post_notify_stop_0" internal_operation_key="redis:0_post_notify_stonith_0">
- <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_key_operation="stonith" CRM_meta_notify_key_type="post" CRM_meta_notify_operation="stop" CRM_meta_notify_type="post" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" wait_last_known_master="true"/>
- </pseudo_event>
- </action_set>
- <inputs>
- <trigger>
- <pseudo_event id="220" operation="stonith" operation_key="stonith-redis-bundle-0-off" on_node="redis-bundle-0" on_node_uuid="redis-bundle-0"/>
- </trigger>
- </inputs>
- </synapse>
- <synapse id="45">
- <action_set>
- <pseudo_event id="140" operation="stop" operation_key="redis_stop_0" internal_operation_key="redis:0_stop_0">
- <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="stop" CRM_meta_notify="true" CRM_meta_notify_active_resource="redis:0 redis:1 redis:2" CRM_meta_notify_active_uname="redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="controller-0 controller-1 controller-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_demote_resource="redis:0" CRM_meta_notify_demote_uname="redis-bundle-0" CRM_meta_notify_inactive_resource=" " CRM_meta_notify_master_resource="redis:0" CRM_meta_notify_master_uname="redis-bundle-0" CRM_meta_notify_promote_resource="redis:1" CRM_meta_notify_promote_uname="redis-bundle-1" CRM_meta_notify_promoted_resource="redis:0" CRM_meta_notify_promoted_uname="redis-bundle-0" CRM_meta_notify_slave_resource="redis:1 redis:2" CRM_meta_notify_slave_uname="redis-bundle-1 redis-bundle-2" CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource="redis:0" CRM_meta_notify_stop_uname="redis-bundle-0" CRM_meta_notify_unpromoted_resource="redis:1 redis:2" CRM_meta_notify_unpromoted_uname="redis-bundle-1 redis-bundle-2" CRM_meta_physical_host="controller-0" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="200000" wait_last_known_master="true"/>
- </pseudo_event>
- </action_set>
- <inputs>
- <trigger>
- <pseudo_event id="131" operation="stop" operation_key="redis-bundle_stop_0"/>
- </trigger>
- <trigger>
- <pseudo_event id="139" operation="demote" operation_key="redis_demote_0" internal_operation_key="redis:0_demote_0"/>
- </trigger>
- <trigger>
- <pseudo_event id="146" operation="stop" operation_key="redis-bundle-master_stop_0"/>
- </trigger>
- </inputs>
- </synapse>
- <synapse id="46">
- <action_set>
- <pseudo_event id="139" operation="demote" operation_key="redis_demote_0" internal_operation_key="redis:0_demote_0">
- <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="demote" CRM_meta_notify="true" CRM_meta_notify_active_resource="redis:0 redis:1 redis:2" CRM_meta_notify_active_uname="redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="controller-0 controller-1 controller-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_demote_resource="redis:0" CRM_meta_notify_demote_uname="redis-bundle-0" CRM_meta_notify_inactive_resource=" " CRM_meta_notify_master_resource="redis:0" CRM_meta_notify_master_uname="redis-bundle-0" CRM_meta_notify_promote_resource="redis:1" CRM_meta_notify_promote_uname="redis-bundle-1" CRM_meta_notify_promoted_resource="redis:0" CRM_meta_notify_promoted_uname="redis-bundle-0" CRM_meta_notify_slave_resource="redis:1 redis:2" CRM_meta_notify_slave_uname="redis-bundle-1 redis-bundle-2" CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource="redis:0" CRM_meta_notify_stop_uname="redis-bundle-0" CRM_meta_notify_unpromoted_resource="redis:1 redis:2" CRM_meta_notify_unpromoted_uname="redis-bundle-1 redis-bundle-2" CRM_meta_physical_host="controller-0" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="120000" wait_last_known_master="true"/>
- </pseudo_event>
- </action_set>
- <inputs>
- <trigger>
- <pseudo_event id="162" operation="demote" operation_key="redis-bundle-master_demote_0"/>
- </trigger>
- </inputs>
- </synapse>
- <synapse id="47" priority="1000000">
- <action_set>
<rsc_op id="250" operation="notify" operation_key="redis_post_notify_demote_0" internal_operation_key="redis:1_post_notify_demote_0" on_node="redis-bundle-1" on_node_uuid="redis-bundle-1" router_node="controller-1">
<primitive id="redis" long-id="redis:1" class="ocf" provider="heartbeat" type="redis"/>
<attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_active_resource="redis:0 redis:1 redis:2" CRM_meta_notify_active_uname="redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="controller-0 controller-1 controller-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_demote_resource="redis:0" CRM_meta_notify_demote_uname="redis-bundle-0" CRM_meta_notify_inactive_resource=" " CRM_meta_notify_key_operation="demoted" CRM_meta_notify_key_type="post" CRM_meta_notify_master_resource="redis:0" CRM_meta_notify_master_uname="redis-bundle-0" CRM_meta_notify_operation="demote" CRM_meta_notify_promote_resource="redis:1" CRM_meta_notify_promote_uname="redis-bundle-1" CRM_meta_notify_promoted_resource="redis:0" CRM_meta_notify_promoted_uname="redis-bundle-0" CRM_meta_notify_slave_resource="redis:1 redis:2" CRM_meta_notify_slave_uname="redis-bundle-1 redis-bundle-2" CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource="redis:0" CRM_meta_notify_stop_uname="redis-bundle-0" CRM_meta_notify_type="post" CRM_meta_notify_unpromoted_resource="redis:1 redis:2" CRM_meta_notify_unpromoted_uname="redis-bundle-1 redis-bundle-2" CRM_meta_on_node="redis-bundle-1" CRM_meta_on_node_uuid="redis-bundle-1" CRM_meta_physical_host="controller-1" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" wait_last_known_master="true"/>
@@ -637,7 +577,7 @@
</trigger>
</inputs>
</synapse>
- <synapse id="48">
+ <synapse id="44">
<action_set>
<rsc_op id="249" operation="notify" operation_key="redis_pre_notify_demote_0" internal_operation_key="redis:1_pre_notify_demote_0" on_node="redis-bundle-1" on_node_uuid="redis-bundle-1" router_node="controller-1">
<primitive id="redis" long-id="redis:1" class="ocf" provider="heartbeat" type="redis"/>
@@ -650,7 +590,7 @@
</trigger>
</inputs>
</synapse>
- <synapse id="49" priority="1000000">
+ <synapse id="45" priority="1000000">
<action_set>
<rsc_op id="246" operation="notify" operation_key="redis_post_notify_promote_0" internal_operation_key="redis:1_post_notify_promote_0" on_node="redis-bundle-1" on_node_uuid="redis-bundle-1" router_node="controller-1">
<primitive id="redis" long-id="redis:1" class="ocf" provider="heartbeat" type="redis"/>
@@ -663,7 +603,7 @@
</trigger>
</inputs>
</synapse>
- <synapse id="50">
+ <synapse id="46">
<action_set>
<rsc_op id="245" operation="notify" operation_key="redis_pre_notify_promote_0" internal_operation_key="redis:1_pre_notify_promote_0" on_node="redis-bundle-1" on_node_uuid="redis-bundle-1" router_node="controller-1">
<primitive id="redis" long-id="redis:1" class="ocf" provider="heartbeat" type="redis"/>
@@ -676,7 +616,7 @@
</trigger>
</inputs>
</synapse>
- <synapse id="51">
+ <synapse id="47">
<action_set>
<rsc_op id="243" operation="notify" operation_key="redis_pre_notify_stop_0" internal_operation_key="redis:1_pre_notify_stop_0" on_node="redis-bundle-1" on_node_uuid="redis-bundle-1" router_node="controller-1">
<primitive id="redis" long-id="redis:1" class="ocf" provider="heartbeat" type="redis"/>
@@ -689,7 +629,7 @@
</trigger>
</inputs>
</synapse>
- <synapse id="52" priority="1000000">
+ <synapse id="48" priority="1000000">
<action_set>
<rsc_op id="223" operation="notify" operation_key="redis_post_notify_stop_0" internal_operation_key="redis:1_post_notify_stop_0" on_node="redis-bundle-1" on_node_uuid="redis-bundle-1" router_node="controller-1">
<primitive id="redis" long-id="redis:1" class="ocf" provider="heartbeat" type="redis"/>
@@ -705,7 +645,7 @@
</trigger>
</inputs>
</synapse>
- <synapse id="53">
+ <synapse id="49">
<action_set>
<rsc_op id="136" operation="monitor" operation_key="redis_monitor_20000" internal_operation_key="redis:1_monitor_20000" on_node="redis-bundle-1" on_node_uuid="redis-bundle-1" router_node="controller-1">
<primitive id="redis" long-id="redis:1" class="ocf" provider="heartbeat" type="redis"/>
@@ -730,7 +670,7 @@
</trigger>
</inputs>
</synapse>
- <synapse id="54">
+ <synapse id="50">
<action_set>
<rsc_op id="135" operation="promote" operation_key="redis_promote_0" internal_operation_key="redis:1_promote_0" on_node="redis-bundle-1" on_node_uuid="redis-bundle-1" router_node="controller-1">
<primitive id="redis" long-id="redis:1" class="ocf" provider="heartbeat" type="redis"/>
@@ -761,7 +701,7 @@
</trigger>
</inputs>
</synapse>
- <synapse id="55">
+ <synapse id="51">
<action_set>
<rsc_op id="28" operation="cancel" operation_key="redis_monitor_45000" internal_operation_key="redis:1_monitor_45000" on_node="redis-bundle-1" on_node_uuid="redis-bundle-1" router_node="controller-1">
<primitive id="redis" long-id="redis:1" class="ocf" provider="heartbeat" type="redis"/>
@@ -770,7 +710,7 @@
</action_set>
<inputs/>
</synapse>
- <synapse id="56">
+ <synapse id="52">
<action_set>
<rsc_op id="27" operation="cancel" operation_key="redis_monitor_60000" internal_operation_key="redis:1_monitor_60000" on_node="redis-bundle-1" on_node_uuid="redis-bundle-1" router_node="controller-1">
<primitive id="redis" long-id="redis:1" class="ocf" provider="heartbeat" type="redis"/>
@@ -779,7 +719,7 @@
</action_set>
<inputs/>
</synapse>
- <synapse id="57" priority="1000000">
+ <synapse id="53" priority="1000000">
<action_set>
<rsc_op id="252" operation="notify" operation_key="redis_post_notify_demote_0" internal_operation_key="redis:2_post_notify_demote_0" on_node="redis-bundle-2" on_node_uuid="redis-bundle-2" router_node="controller-2">
<primitive id="redis" long-id="redis:2" class="ocf" provider="heartbeat" type="redis"/>
@@ -792,7 +732,7 @@
</trigger>
</inputs>
</synapse>
- <synapse id="58">
+ <synapse id="54">
<action_set>
<rsc_op id="251" operation="notify" operation_key="redis_pre_notify_demote_0" internal_operation_key="redis:2_pre_notify_demote_0" on_node="redis-bundle-2" on_node_uuid="redis-bundle-2" router_node="controller-2">
<primitive id="redis" long-id="redis:2" class="ocf" provider="heartbeat" type="redis"/>
@@ -805,7 +745,7 @@
</trigger>
</inputs>
</synapse>
- <synapse id="59" priority="1000000">
+ <synapse id="55" priority="1000000">
<action_set>
<rsc_op id="248" operation="notify" operation_key="redis_post_notify_promote_0" internal_operation_key="redis:2_post_notify_promote_0" on_node="redis-bundle-2" on_node_uuid="redis-bundle-2" router_node="controller-2">
<primitive id="redis" long-id="redis:2" class="ocf" provider="heartbeat" type="redis"/>
@@ -818,7 +758,7 @@
</trigger>
</inputs>
</synapse>
- <synapse id="60">
+ <synapse id="56">
<action_set>
<rsc_op id="247" operation="notify" operation_key="redis_pre_notify_promote_0" internal_operation_key="redis:2_pre_notify_promote_0" on_node="redis-bundle-2" on_node_uuid="redis-bundle-2" router_node="controller-2">
<primitive id="redis" long-id="redis:2" class="ocf" provider="heartbeat" type="redis"/>
@@ -831,7 +771,7 @@
</trigger>
</inputs>
</synapse>
- <synapse id="61">
+ <synapse id="57">
<action_set>
<rsc_op id="244" operation="notify" operation_key="redis_pre_notify_stop_0" internal_operation_key="redis:2_pre_notify_stop_0" on_node="redis-bundle-2" on_node_uuid="redis-bundle-2" router_node="controller-2">
<primitive id="redis" long-id="redis:2" class="ocf" provider="heartbeat" type="redis"/>
@@ -844,7 +784,7 @@
</trigger>
</inputs>
</synapse>
- <synapse id="62" priority="1000000">
+ <synapse id="58" priority="1000000">
<action_set>
<rsc_op id="224" operation="notify" operation_key="redis_post_notify_stop_0" internal_operation_key="redis:2_post_notify_stop_0" on_node="redis-bundle-2" on_node_uuid="redis-bundle-2" router_node="controller-2">
<primitive id="redis" long-id="redis:2" class="ocf" provider="heartbeat" type="redis"/>
@@ -860,6 +800,66 @@
</trigger>
</inputs>
</synapse>
+ <synapse id="59" priority="1000000">
+ <action_set>
+ <pseudo_event id="222" operation="notified" operation_key="redis_notified_0" internal_operation_key="redis:0_confirmed-post_notify_stonith_0">
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_key_operation="stonith" CRM_meta_notify_key_type="confirmed-post" CRM_meta_notify_operation="stop" CRM_meta_notify_type="post" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" wait_last_known_master="true"/>
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="221" operation="notify" operation_key="redis_post_notify_stop_0" internal_operation_key="redis:0_post_notify_stonith_0"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="223" operation="notify" operation_key="redis_post_notify_stop_0" internal_operation_key="redis:1_post_notify_stop_0" on_node="redis-bundle-1" on_node_uuid="redis-bundle-1" router_node="controller-1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="224" operation="notify" operation_key="redis_post_notify_stop_0" internal_operation_key="redis:2_post_notify_stop_0" on_node="redis-bundle-2" on_node_uuid="redis-bundle-2" router_node="controller-2"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="60" priority="1000000">
+ <action_set>
+ <pseudo_event id="221" operation="notify" operation_key="redis_post_notify_stop_0" internal_operation_key="redis:0_post_notify_stonith_0">
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_key_operation="stonith" CRM_meta_notify_key_type="post" CRM_meta_notify_operation="stop" CRM_meta_notify_type="post" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" wait_last_known_master="true"/>
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="220" operation="stonith" operation_key="stonith-redis-bundle-0-off" on_node="redis-bundle-0" on_node_uuid="redis-bundle-0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="61">
+ <action_set>
+ <pseudo_event id="140" operation="stop" operation_key="redis_stop_0" internal_operation_key="redis:0_stop_0">
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="stop" CRM_meta_notify="true" CRM_meta_notify_active_resource="redis:0 redis:1 redis:2" CRM_meta_notify_active_uname="redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="controller-0 controller-1 controller-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_demote_resource="redis:0" CRM_meta_notify_demote_uname="redis-bundle-0" CRM_meta_notify_inactive_resource=" " CRM_meta_notify_master_resource="redis:0" CRM_meta_notify_master_uname="redis-bundle-0" CRM_meta_notify_promote_resource="redis:1" CRM_meta_notify_promote_uname="redis-bundle-1" CRM_meta_notify_promoted_resource="redis:0" CRM_meta_notify_promoted_uname="redis-bundle-0" CRM_meta_notify_slave_resource="redis:1 redis:2" CRM_meta_notify_slave_uname="redis-bundle-1 redis-bundle-2" CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource="redis:0" CRM_meta_notify_stop_uname="redis-bundle-0" CRM_meta_notify_unpromoted_resource="redis:1 redis:2" CRM_meta_notify_unpromoted_uname="redis-bundle-1 redis-bundle-2" CRM_meta_physical_host="controller-0" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="200000" wait_last_known_master="true"/>
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="131" operation="stop" operation_key="redis-bundle_stop_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="139" operation="demote" operation_key="redis_demote_0" internal_operation_key="redis:0_demote_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="146" operation="stop" operation_key="redis-bundle-master_stop_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="62">
+ <action_set>
+ <pseudo_event id="139" operation="demote" operation_key="redis_demote_0" internal_operation_key="redis:0_demote_0">
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="demote" CRM_meta_notify="true" CRM_meta_notify_active_resource="redis:0 redis:1 redis:2" CRM_meta_notify_active_uname="redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="controller-0 controller-1 controller-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_demote_resource="redis:0" CRM_meta_notify_demote_uname="redis-bundle-0" CRM_meta_notify_inactive_resource=" " CRM_meta_notify_master_resource="redis:0" CRM_meta_notify_master_uname="redis-bundle-0" CRM_meta_notify_promote_resource="redis:1" CRM_meta_notify_promote_uname="redis-bundle-1" CRM_meta_notify_promoted_resource="redis:0" CRM_meta_notify_promoted_uname="redis-bundle-0" CRM_meta_notify_slave_resource="redis:1 redis:2" CRM_meta_notify_slave_uname="redis-bundle-1 redis-bundle-2" CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource="redis:0" CRM_meta_notify_stop_uname="redis-bundle-0" CRM_meta_notify_unpromoted_resource="redis:1 redis:2" CRM_meta_notify_unpromoted_uname="redis-bundle-1 redis-bundle-2" CRM_meta_physical_host="controller-0" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="120000" wait_last_known_master="true"/>
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="162" operation="demote" operation_key="redis-bundle-master_demote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
<synapse id="63" priority="1000000">
<action_set>
<pseudo_event id="167" operation="notified" operation_key="redis-bundle-master_confirmed-post_notify_demoted_0">
diff --git a/cts/scheduler/exp/bundle-order-startup-clone-2.exp b/cts/scheduler/exp/bundle-order-startup-clone-2.exp
index 8752185..e4a35c7 100644
--- a/cts/scheduler/exp/bundle-order-startup-clone-2.exp
+++ b/cts/scheduler/exp/bundle-order-startup-clone-2.exp
@@ -408,6 +408,9 @@
<rsc_op id="77" operation="start" operation_key="galera-bundle-2_start_0" on_node="metal-3" on_node_uuid="3"/>
</trigger>
<trigger>
+ <rsc_op id="83" operation="start" operation_key="galera:0_start_0" on_node="galera-bundle-0" on_node_uuid="galera-bundle-0" router_node="metal-1"/>
+ </trigger>
+ <trigger>
<rsc_op id="86" operation="start" operation_key="galera:1_start_0" on_node="galera-bundle-1" on_node_uuid="galera-bundle-1" router_node="metal-2"/>
</trigger>
<trigger>
@@ -1425,6 +1428,9 @@
<rsc_op id="124" operation="start" operation_key="redis-bundle-2_start_0" on_node="metal-3" on_node_uuid="3"/>
</trigger>
<trigger>
+ <rsc_op id="130" operation="start" operation_key="redis:0_start_0" on_node="redis-bundle-0" on_node_uuid="redis-bundle-0" router_node="metal-1"/>
+ </trigger>
+ <trigger>
<rsc_op id="133" operation="start" operation_key="redis:1_start_0" on_node="redis-bundle-1" on_node_uuid="redis-bundle-1" router_node="metal-2"/>
</trigger>
<trigger>
diff --git a/cts/scheduler/exp/bundle-order-stop-on-remote.exp b/cts/scheduler/exp/bundle-order-stop-on-remote.exp
index 7e23dcc..11ec557 100644
--- a/cts/scheduler/exp/bundle-order-stop-on-remote.exp
+++ b/cts/scheduler/exp/bundle-order-stop-on-remote.exp
@@ -696,7 +696,59 @@
</synapse>
<synapse id="40" priority="1000000">
<action_set>
- <rsc_op id="254" operation="notify" operation_key="redis:1_post_notify_promote_0" on_node="redis-bundle-1" on_node_uuid="redis-bundle-1" router_node="controller-1">
+ <rsc_op id="254" operation="notify" operation_key="redis_post_notify_promote_0" internal_operation_key="redis:2_post_notify_promote_0" on_node="redis-bundle-2" on_node_uuid="redis-bundle-2" router_node="controller-2">
+ <primitive id="redis" long-id="redis:2" class="ocf" provider="heartbeat" type="redis"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_active_resource="redis:0 redis:2" CRM_meta_notify_active_uname="redis-bundle-0 redis-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 database-0 database-1 database-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 messaging-0 messaging-1 messaging-2 controller-0 redis-bundle-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 database-0 database-1 database-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 messaging-0 messaging-1 messaging-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="redis:1" CRM_meta_notify_key_operation="promoted" CRM_meta_notify_key_type="post" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="promote" CRM_meta_notify_promote_resource="redis:0" CRM_meta_notify_promote_uname="redis-bundle-0" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource="redis:0 redis:2" CRM_meta_notify_slave_uname="redis-bundle-0 redis-bundle-2" CRM_meta_notify_start_resource="redis:1" CRM_meta_notify_start_uname="redis-bundle-1" CRM_meta_notify_stop_resource=" " CRM_meta_notify_stop_uname=" " CRM_meta_notify_type="post" CRM_meta_notify_unpromoted_resource="redis:0 redis:2" CRM_meta_notify_unpromoted_uname="redis-bundle-0 redis-bundle-2" CRM_meta_on_node="redis-bundle-2" CRM_meta_on_node_uuid="redis-bundle-2" CRM_meta_physical_host="controller-2" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" wait_last_known_master="true"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="168" operation="notify" operation_key="redis-bundle-master_post_notify_promoted_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="41">
+ <action_set>
+ <rsc_op id="253" operation="notify" operation_key="redis_pre_notify_promote_0" internal_operation_key="redis:2_pre_notify_promote_0" on_node="redis-bundle-2" on_node_uuid="redis-bundle-2" router_node="controller-2">
+ <primitive id="redis" long-id="redis:2" class="ocf" provider="heartbeat" type="redis"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_active_resource="redis:0 redis:2" CRM_meta_notify_active_uname="redis-bundle-0 redis-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 database-0 database-1 database-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 messaging-0 messaging-1 messaging-2 controller-0 redis-bundle-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 database-0 database-1 database-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 messaging-0 messaging-1 messaging-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="redis:1" CRM_meta_notify_key_operation="promote" CRM_meta_notify_key_type="pre" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="promote" CRM_meta_notify_promote_resource="redis:0" CRM_meta_notify_promote_uname="redis-bundle-0" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource="redis:0 redis:2" CRM_meta_notify_slave_uname="redis-bundle-0 redis-bundle-2" CRM_meta_notify_start_resource="redis:1" CRM_meta_notify_start_uname="redis-bundle-1" CRM_meta_notify_stop_resource=" " CRM_meta_notify_stop_uname=" " CRM_meta_notify_type="pre" CRM_meta_notify_unpromoted_resource="redis:0 redis:2" CRM_meta_notify_unpromoted_uname="redis-bundle-0 redis-bundle-2" CRM_meta_on_node="redis-bundle-2" CRM_meta_on_node_uuid="redis-bundle-2" CRM_meta_physical_host="controller-2" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" wait_last_known_master="true"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="166" operation="notify" operation_key="redis-bundle-master_pre_notify_promote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="42" priority="1000000">
+ <action_set>
+ <rsc_op id="245" operation="notify" operation_key="redis_post_notify_start_0" internal_operation_key="redis:2_post_notify_start_0" on_node="redis-bundle-2" on_node_uuid="redis-bundle-2" router_node="controller-2">
+ <primitive id="redis" long-id="redis:2" class="ocf" provider="heartbeat" type="redis"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_active_resource="redis:0 redis:2" CRM_meta_notify_active_uname="redis-bundle-0 redis-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 database-0 database-1 database-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 messaging-0 messaging-1 messaging-2 controller-0 redis-bundle-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 database-0 database-1 database-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 messaging-0 messaging-1 messaging-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="redis:1" CRM_meta_notify_key_operation="running" CRM_meta_notify_key_type="post" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="start" CRM_meta_notify_promote_resource="redis:0" CRM_meta_notify_promote_uname="redis-bundle-0" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource="redis:0 redis:2" CRM_meta_notify_slave_uname="redis-bundle-0 redis-bundle-2" CRM_meta_notify_start_resource="redis:1" CRM_meta_notify_start_uname="redis-bundle-1" CRM_meta_notify_stop_resource=" " CRM_meta_notify_stop_uname=" " CRM_meta_notify_type="post" CRM_meta_notify_unpromoted_resource="redis:0 redis:2" CRM_meta_notify_unpromoted_uname="redis-bundle-0 redis-bundle-2" CRM_meta_on_node="redis-bundle-2" CRM_meta_on_node_uuid="redis-bundle-2" CRM_meta_physical_host="controller-2" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" wait_last_known_master="true"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="158" operation="notify" operation_key="redis-bundle-master_post_notify_running_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="43">
+ <action_set>
+ <rsc_op id="244" operation="notify" operation_key="redis_pre_notify_start_0" internal_operation_key="redis:2_pre_notify_start_0" on_node="redis-bundle-2" on_node_uuid="redis-bundle-2" router_node="controller-2">
+ <primitive id="redis" long-id="redis:2" class="ocf" provider="heartbeat" type="redis"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_active_resource="redis:0 redis:2" CRM_meta_notify_active_uname="redis-bundle-0 redis-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 database-0 database-1 database-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 messaging-0 messaging-1 messaging-2 controller-0 redis-bundle-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 database-0 database-1 database-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 messaging-0 messaging-1 messaging-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="redis:1" CRM_meta_notify_key_operation="start" CRM_meta_notify_key_type="pre" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="start" CRM_meta_notify_promote_resource="redis:0" CRM_meta_notify_promote_uname="redis-bundle-0" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource="redis:0 redis:2" CRM_meta_notify_slave_uname="redis-bundle-0 redis-bundle-2" CRM_meta_notify_start_resource="redis:1" CRM_meta_notify_start_uname="redis-bundle-1" CRM_meta_notify_stop_resource=" " CRM_meta_notify_stop_uname=" " CRM_meta_notify_type="pre" CRM_meta_notify_unpromoted_resource="redis:0 redis:2" CRM_meta_notify_unpromoted_uname="redis-bundle-0 redis-bundle-2" CRM_meta_on_node="redis-bundle-2" CRM_meta_on_node_uuid="redis-bundle-2" CRM_meta_physical_host="controller-2" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" wait_last_known_master="true"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="156" operation="notify" operation_key="redis-bundle-master_pre_notify_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="44" priority="1000000">
+ <action_set>
+ <rsc_op id="256" operation="notify" operation_key="redis:1_post_notify_promote_0" on_node="redis-bundle-1" on_node_uuid="redis-bundle-1" router_node="controller-1">
<primitive id="redis" long-id="redis:1" class="ocf" provider="heartbeat" type="redis"/>
<attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_active_resource="redis:0 redis:2" CRM_meta_notify_active_uname="redis-bundle-0 redis-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 database-0 database-1 database-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 messaging-0 messaging-1 messaging-2 controller-0 redis-bundle-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 database-0 database-1 database-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 messaging-0 messaging-1 messaging-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="redis:1" CRM_meta_notify_key_operation="promoted" CRM_meta_notify_key_type="post" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="promote" CRM_meta_notify_promote_resource="redis:0" CRM_meta_notify_promote_uname="redis-bundle-0" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource="redis:0 redis:2" CRM_meta_notify_slave_uname="redis-bundle-0 redis-bundle-2" CRM_meta_notify_start_resource="redis:1" CRM_meta_notify_start_uname="redis-bundle-1" CRM_meta_notify_stop_resource=" " CRM_meta_notify_stop_uname=" " CRM_meta_notify_type="post" CRM_meta_notify_unpromoted_resource="redis:0 redis:2" CRM_meta_notify_unpromoted_uname="redis-bundle-0 redis-bundle-2" CRM_meta_on_node="redis-bundle-1" CRM_meta_on_node_uuid="redis-bundle-1" CRM_meta_physical_host="controller-1" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" wait_last_known_master="true"/>
</rsc_op>
@@ -707,9 +759,9 @@
</trigger>
</inputs>
</synapse>
- <synapse id="41">
+ <synapse id="45">
<action_set>
- <rsc_op id="253" operation="notify" operation_key="redis:1_pre_notify_promote_0" on_node="redis-bundle-1" on_node_uuid="redis-bundle-1" router_node="controller-1">
+ <rsc_op id="255" operation="notify" operation_key="redis:1_pre_notify_promote_0" on_node="redis-bundle-1" on_node_uuid="redis-bundle-1" router_node="controller-1">
<primitive id="redis" long-id="redis:1" class="ocf" provider="heartbeat" type="redis"/>
<attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_active_resource="redis:0 redis:2" CRM_meta_notify_active_uname="redis-bundle-0 redis-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 database-0 database-1 database-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 messaging-0 messaging-1 messaging-2 controller-0 redis-bundle-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 database-0 database-1 database-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 messaging-0 messaging-1 messaging-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="redis:1" CRM_meta_notify_key_operation="promote" CRM_meta_notify_key_type="pre" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="promote" CRM_meta_notify_promote_resource="redis:0" CRM_meta_notify_promote_uname="redis-bundle-0" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource="redis:0 redis:2" CRM_meta_notify_slave_uname="redis-bundle-0 redis-bundle-2" CRM_meta_notify_start_resource="redis:1" CRM_meta_notify_start_uname="redis-bundle-1" CRM_meta_notify_stop_resource=" " CRM_meta_notify_stop_uname=" " CRM_meta_notify_type="pre" CRM_meta_notify_unpromoted_resource="redis:0 redis:2" CRM_meta_notify_unpromoted_uname="redis-bundle-0 redis-bundle-2" CRM_meta_on_node="redis-bundle-1" CRM_meta_on_node_uuid="redis-bundle-1" CRM_meta_physical_host="controller-1" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" wait_last_known_master="true"/>
</rsc_op>
@@ -720,9 +772,9 @@
</trigger>
</inputs>
</synapse>
- <synapse id="42" priority="1000000">
+ <synapse id="46" priority="1000000">
<action_set>
- <rsc_op id="244" operation="notify" operation_key="redis:1_post_notify_start_0" on_node="redis-bundle-1" on_node_uuid="redis-bundle-1" router_node="controller-1">
+ <rsc_op id="246" operation="notify" operation_key="redis:1_post_notify_start_0" on_node="redis-bundle-1" on_node_uuid="redis-bundle-1" router_node="controller-1">
<primitive id="redis" long-id="redis:1" class="ocf" provider="heartbeat" type="redis"/>
<attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_active_resource="redis:0 redis:2" CRM_meta_notify_active_uname="redis-bundle-0 redis-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 database-0 database-1 database-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 messaging-0 messaging-1 messaging-2 controller-0 redis-bundle-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 database-0 database-1 database-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 messaging-0 messaging-1 messaging-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="redis:1" CRM_meta_notify_key_operation="running" CRM_meta_notify_key_type="post" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="start" CRM_meta_notify_promote_resource="redis:0" CRM_meta_notify_promote_uname="redis-bundle-0" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource="redis:0 redis:2" CRM_meta_notify_slave_uname="redis-bundle-0 redis-bundle-2" CRM_meta_notify_start_resource="redis:1" CRM_meta_notify_start_uname="redis-bundle-1" CRM_meta_notify_stop_resource=" " CRM_meta_notify_stop_uname=" " CRM_meta_notify_type="post" CRM_meta_notify_unpromoted_resource="redis:0 redis:2" CRM_meta_notify_unpromoted_uname="redis-bundle-0 redis-bundle-2" CRM_meta_on_node="redis-bundle-1" CRM_meta_on_node_uuid="redis-bundle-1" CRM_meta_physical_host="controller-1" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" wait_last_known_master="true"/>
</rsc_op>
@@ -733,7 +785,7 @@
</trigger>
</inputs>
</synapse>
- <synapse id="43">
+ <synapse id="47">
<action_set>
<rsc_op id="151" operation="monitor" operation_key="redis:1_monitor_60000" on_node="redis-bundle-1" on_node_uuid="redis-bundle-1" router_node="controller-1">
<primitive id="redis" long-id="redis:1" class="ocf" provider="heartbeat" type="redis"/>
@@ -755,7 +807,7 @@
</trigger>
</inputs>
</synapse>
- <synapse id="44">
+ <synapse id="48">
<action_set>
<rsc_op id="150" operation="monitor" operation_key="redis:1_monitor_45000" on_node="redis-bundle-1" on_node_uuid="redis-bundle-1" router_node="controller-1">
<primitive id="redis" long-id="redis:1" class="ocf" provider="heartbeat" type="redis"/>
@@ -777,7 +829,7 @@
</trigger>
</inputs>
</synapse>
- <synapse id="45">
+ <synapse id="49">
<action_set>
<rsc_op id="149" operation="start" operation_key="redis:1_start_0" on_node="redis-bundle-1" on_node_uuid="redis-bundle-1" router_node="controller-1">
<primitive id="redis" long-id="redis:1" class="ocf" provider="heartbeat" type="redis"/>
@@ -802,58 +854,6 @@
</trigger>
</inputs>
</synapse>
- <synapse id="46" priority="1000000">
- <action_set>
- <rsc_op id="256" operation="notify" operation_key="redis_post_notify_promote_0" internal_operation_key="redis:2_post_notify_promote_0" on_node="redis-bundle-2" on_node_uuid="redis-bundle-2" router_node="controller-2">
- <primitive id="redis" long-id="redis:2" class="ocf" provider="heartbeat" type="redis"/>
- <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_active_resource="redis:0 redis:2" CRM_meta_notify_active_uname="redis-bundle-0 redis-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 database-0 database-1 database-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 messaging-0 messaging-1 messaging-2 controller-0 redis-bundle-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 database-0 database-1 database-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 messaging-0 messaging-1 messaging-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="redis:1" CRM_meta_notify_key_operation="promoted" CRM_meta_notify_key_type="post" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="promote" CRM_meta_notify_promote_resource="redis:0" CRM_meta_notify_promote_uname="redis-bundle-0" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource="redis:0 redis:2" CRM_meta_notify_slave_uname="redis-bundle-0 redis-bundle-2" CRM_meta_notify_start_resource="redis:1" CRM_meta_notify_start_uname="redis-bundle-1" CRM_meta_notify_stop_resource=" " CRM_meta_notify_stop_uname=" " CRM_meta_notify_type="post" CRM_meta_notify_unpromoted_resource="redis:0 redis:2" CRM_meta_notify_unpromoted_uname="redis-bundle-0 redis-bundle-2" CRM_meta_on_node="redis-bundle-2" CRM_meta_on_node_uuid="redis-bundle-2" CRM_meta_physical_host="controller-2" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" wait_last_known_master="true"/>
- </rsc_op>
- </action_set>
- <inputs>
- <trigger>
- <pseudo_event id="168" operation="notify" operation_key="redis-bundle-master_post_notify_promoted_0"/>
- </trigger>
- </inputs>
- </synapse>
- <synapse id="47">
- <action_set>
- <rsc_op id="255" operation="notify" operation_key="redis_pre_notify_promote_0" internal_operation_key="redis:2_pre_notify_promote_0" on_node="redis-bundle-2" on_node_uuid="redis-bundle-2" router_node="controller-2">
- <primitive id="redis" long-id="redis:2" class="ocf" provider="heartbeat" type="redis"/>
- <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_active_resource="redis:0 redis:2" CRM_meta_notify_active_uname="redis-bundle-0 redis-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 database-0 database-1 database-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 messaging-0 messaging-1 messaging-2 controller-0 redis-bundle-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 database-0 database-1 database-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 messaging-0 messaging-1 messaging-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="redis:1" CRM_meta_notify_key_operation="promote" CRM_meta_notify_key_type="pre" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="promote" CRM_meta_notify_promote_resource="redis:0" CRM_meta_notify_promote_uname="redis-bundle-0" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource="redis:0 redis:2" CRM_meta_notify_slave_uname="redis-bundle-0 redis-bundle-2" CRM_meta_notify_start_resource="redis:1" CRM_meta_notify_start_uname="redis-bundle-1" CRM_meta_notify_stop_resource=" " CRM_meta_notify_stop_uname=" " CRM_meta_notify_type="pre" CRM_meta_notify_unpromoted_resource="redis:0 redis:2" CRM_meta_notify_unpromoted_uname="redis-bundle-0 redis-bundle-2" CRM_meta_on_node="redis-bundle-2" CRM_meta_on_node_uuid="redis-bundle-2" CRM_meta_physical_host="controller-2" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" wait_last_known_master="true"/>
- </rsc_op>
- </action_set>
- <inputs>
- <trigger>
- <pseudo_event id="166" operation="notify" operation_key="redis-bundle-master_pre_notify_promote_0"/>
- </trigger>
- </inputs>
- </synapse>
- <synapse id="48" priority="1000000">
- <action_set>
- <rsc_op id="246" operation="notify" operation_key="redis_post_notify_start_0" internal_operation_key="redis:2_post_notify_start_0" on_node="redis-bundle-2" on_node_uuid="redis-bundle-2" router_node="controller-2">
- <primitive id="redis" long-id="redis:2" class="ocf" provider="heartbeat" type="redis"/>
- <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_active_resource="redis:0 redis:2" CRM_meta_notify_active_uname="redis-bundle-0 redis-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 database-0 database-1 database-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 messaging-0 messaging-1 messaging-2 controller-0 redis-bundle-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 database-0 database-1 database-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 messaging-0 messaging-1 messaging-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="redis:1" CRM_meta_notify_key_operation="running" CRM_meta_notify_key_type="post" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="start" CRM_meta_notify_promote_resource="redis:0" CRM_meta_notify_promote_uname="redis-bundle-0" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource="redis:0 redis:2" CRM_meta_notify_slave_uname="redis-bundle-0 redis-bundle-2" CRM_meta_notify_start_resource="redis:1" CRM_meta_notify_start_uname="redis-bundle-1" CRM_meta_notify_stop_resource=" " CRM_meta_notify_stop_uname=" " CRM_meta_notify_type="post" CRM_meta_notify_unpromoted_resource="redis:0 redis:2" CRM_meta_notify_unpromoted_uname="redis-bundle-0 redis-bundle-2" CRM_meta_on_node="redis-bundle-2" CRM_meta_on_node_uuid="redis-bundle-2" CRM_meta_physical_host="controller-2" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" wait_last_known_master="true"/>
- </rsc_op>
- </action_set>
- <inputs>
- <trigger>
- <pseudo_event id="158" operation="notify" operation_key="redis-bundle-master_post_notify_running_0"/>
- </trigger>
- </inputs>
- </synapse>
- <synapse id="49">
- <action_set>
- <rsc_op id="245" operation="notify" operation_key="redis_pre_notify_start_0" internal_operation_key="redis:2_pre_notify_start_0" on_node="redis-bundle-2" on_node_uuid="redis-bundle-2" router_node="controller-2">
- <primitive id="redis" long-id="redis:2" class="ocf" provider="heartbeat" type="redis"/>
- <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_active_resource="redis:0 redis:2" CRM_meta_notify_active_uname="redis-bundle-0 redis-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 database-0 database-1 database-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 messaging-0 messaging-1 messaging-2 controller-0 redis-bundle-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 database-0 database-1 database-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 messaging-0 messaging-1 messaging-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="redis:1" CRM_meta_notify_key_operation="start" CRM_meta_notify_key_type="pre" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="start" CRM_meta_notify_promote_resource="redis:0" CRM_meta_notify_promote_uname="redis-bundle-0" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource="redis:0 redis:2" CRM_meta_notify_slave_uname="redis-bundle-0 redis-bundle-2" CRM_meta_notify_start_resource="redis:1" CRM_meta_notify_start_uname="redis-bundle-1" CRM_meta_notify_stop_resource=" " CRM_meta_notify_stop_uname=" " CRM_meta_notify_type="pre" CRM_meta_notify_unpromoted_resource="redis:0 redis:2" CRM_meta_notify_unpromoted_uname="redis-bundle-0 redis-bundle-2" CRM_meta_on_node="redis-bundle-2" CRM_meta_on_node_uuid="redis-bundle-2" CRM_meta_physical_host="controller-2" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" wait_last_known_master="true"/>
- </rsc_op>
- </action_set>
- <inputs>
- <trigger>
- <pseudo_event id="156" operation="notify" operation_key="redis-bundle-master_pre_notify_start_0"/>
- </trigger>
- </inputs>
- </synapse>
<synapse id="50" priority="1000000">
<action_set>
<pseudo_event id="169" operation="notified" operation_key="redis-bundle-master_confirmed-post_notify_promoted_0">
@@ -868,10 +868,10 @@
<rsc_op id="252" operation="notify" operation_key="redis_post_notify_promote_0" internal_operation_key="redis:0_post_notify_promote_0" on_node="redis-bundle-0" on_node_uuid="redis-bundle-0" router_node="controller-0"/>
</trigger>
<trigger>
- <rsc_op id="254" operation="notify" operation_key="redis:1_post_notify_promote_0" on_node="redis-bundle-1" on_node_uuid="redis-bundle-1" router_node="controller-1"/>
+ <rsc_op id="254" operation="notify" operation_key="redis_post_notify_promote_0" internal_operation_key="redis:2_post_notify_promote_0" on_node="redis-bundle-2" on_node_uuid="redis-bundle-2" router_node="controller-2"/>
</trigger>
<trigger>
- <rsc_op id="256" operation="notify" operation_key="redis_post_notify_promote_0" internal_operation_key="redis:2_post_notify_promote_0" on_node="redis-bundle-2" on_node_uuid="redis-bundle-2" router_node="controller-2"/>
+ <rsc_op id="256" operation="notify" operation_key="redis:1_post_notify_promote_0" on_node="redis-bundle-1" on_node_uuid="redis-bundle-1" router_node="controller-1"/>
</trigger>
</inputs>
</synapse>
@@ -904,10 +904,10 @@
<rsc_op id="251" operation="notify" operation_key="redis_pre_notify_promote_0" internal_operation_key="redis:0_pre_notify_promote_0" on_node="redis-bundle-0" on_node_uuid="redis-bundle-0" router_node="controller-0"/>
</trigger>
<trigger>
- <rsc_op id="253" operation="notify" operation_key="redis:1_pre_notify_promote_0" on_node="redis-bundle-1" on_node_uuid="redis-bundle-1" router_node="controller-1"/>
+ <rsc_op id="253" operation="notify" operation_key="redis_pre_notify_promote_0" internal_operation_key="redis:2_pre_notify_promote_0" on_node="redis-bundle-2" on_node_uuid="redis-bundle-2" router_node="controller-2"/>
</trigger>
<trigger>
- <rsc_op id="255" operation="notify" operation_key="redis_pre_notify_promote_0" internal_operation_key="redis:2_pre_notify_promote_0" on_node="redis-bundle-2" on_node_uuid="redis-bundle-2" router_node="controller-2"/>
+ <rsc_op id="255" operation="notify" operation_key="redis:1_pre_notify_promote_0" on_node="redis-bundle-1" on_node_uuid="redis-bundle-1" router_node="controller-1"/>
</trigger>
</inputs>
</synapse>
@@ -967,10 +967,10 @@
<rsc_op id="243" operation="notify" operation_key="redis_post_notify_start_0" internal_operation_key="redis:0_post_notify_start_0" on_node="redis-bundle-0" on_node_uuid="redis-bundle-0" router_node="controller-0"/>
</trigger>
<trigger>
- <rsc_op id="244" operation="notify" operation_key="redis:1_post_notify_start_0" on_node="redis-bundle-1" on_node_uuid="redis-bundle-1" router_node="controller-1"/>
+ <rsc_op id="245" operation="notify" operation_key="redis_post_notify_start_0" internal_operation_key="redis:2_post_notify_start_0" on_node="redis-bundle-2" on_node_uuid="redis-bundle-2" router_node="controller-2"/>
</trigger>
<trigger>
- <rsc_op id="246" operation="notify" operation_key="redis_post_notify_start_0" internal_operation_key="redis:2_post_notify_start_0" on_node="redis-bundle-2" on_node_uuid="redis-bundle-2" router_node="controller-2"/>
+ <rsc_op id="246" operation="notify" operation_key="redis:1_post_notify_start_0" on_node="redis-bundle-1" on_node_uuid="redis-bundle-1" router_node="controller-1"/>
</trigger>
</inputs>
</synapse>
@@ -1003,7 +1003,7 @@
<rsc_op id="242" operation="notify" operation_key="redis_pre_notify_start_0" internal_operation_key="redis:0_pre_notify_start_0" on_node="redis-bundle-0" on_node_uuid="redis-bundle-0" router_node="controller-0"/>
</trigger>
<trigger>
- <rsc_op id="245" operation="notify" operation_key="redis_pre_notify_start_0" internal_operation_key="redis:2_pre_notify_start_0" on_node="redis-bundle-2" on_node_uuid="redis-bundle-2" router_node="controller-2"/>
+ <rsc_op id="244" operation="notify" operation_key="redis_pre_notify_start_0" internal_operation_key="redis:2_pre_notify_start_0" on_node="redis-bundle-2" on_node_uuid="redis-bundle-2" router_node="controller-2"/>
</trigger>
</inputs>
</synapse>
diff --git a/cts/scheduler/exp/bundle-probe-remotes.exp b/cts/scheduler/exp/bundle-probe-remotes.exp
index b1b14db..41a6cf1 100644
--- a/cts/scheduler/exp/bundle-probe-remotes.exp
+++ b/cts/scheduler/exp/bundle-probe-remotes.exp
@@ -280,6 +280,9 @@
<rsc_op id="80" operation="start" operation_key="scale1-bundle-2_start_0" on_node="c09-h07-r630" on_node_uuid="3"/>
</trigger>
<trigger>
+ <rsc_op id="98" operation="start" operation_key="dummy1:0_start_0" on_node="scale1-bundle-0" on_node_uuid="scale1-bundle-0" router_node="c09-h05-r630"/>
+ </trigger>
+ <trigger>
<rsc_op id="100" operation="start" operation_key="dummy1:1_start_0" on_node="scale1-bundle-1" on_node_uuid="scale1-bundle-1" router_node="c09-h06-r630"/>
</trigger>
<trigger>
@@ -318,6 +321,12 @@
<rsc_op id="84" operation="start" operation_key="scale1-bundle-3_start_0" on_node="c09-h05-r630" on_node_uuid="1"/>
</trigger>
<trigger>
+ <rsc_op id="98" operation="start" operation_key="dummy1:0_start_0" on_node="scale1-bundle-0" on_node_uuid="scale1-bundle-0" router_node="c09-h05-r630"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="100" operation="start" operation_key="dummy1:1_start_0" on_node="scale1-bundle-1" on_node_uuid="scale1-bundle-1" router_node="c09-h06-r630"/>
+ </trigger>
+ <trigger>
<rsc_op id="102" operation="start" operation_key="dummy1:2_start_0" on_node="scale1-bundle-2" on_node_uuid="scale1-bundle-2" router_node="c09-h07-r630"/>
</trigger>
<trigger>
@@ -356,6 +365,15 @@
<rsc_op id="88" operation="start" operation_key="scale1-bundle-4_start_0" on_node="c09-h06-r630" on_node_uuid="2"/>
</trigger>
<trigger>
+ <rsc_op id="98" operation="start" operation_key="dummy1:0_start_0" on_node="scale1-bundle-0" on_node_uuid="scale1-bundle-0" router_node="c09-h05-r630"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="100" operation="start" operation_key="dummy1:1_start_0" on_node="scale1-bundle-1" on_node_uuid="scale1-bundle-1" router_node="c09-h06-r630"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="102" operation="start" operation_key="dummy1:2_start_0" on_node="scale1-bundle-2" on_node_uuid="scale1-bundle-2" router_node="c09-h07-r630"/>
+ </trigger>
+ <trigger>
<rsc_op id="104" operation="start" operation_key="dummy1:3_start_0" on_node="scale1-bundle-3" on_node_uuid="scale1-bundle-3" router_node="c09-h05-r630"/>
</trigger>
<trigger>
@@ -394,6 +412,18 @@
<rsc_op id="92" operation="start" operation_key="scale1-bundle-5_start_0" on_node="c09-h07-r630" on_node_uuid="3"/>
</trigger>
<trigger>
+ <rsc_op id="98" operation="start" operation_key="dummy1:0_start_0" on_node="scale1-bundle-0" on_node_uuid="scale1-bundle-0" router_node="c09-h05-r630"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="100" operation="start" operation_key="dummy1:1_start_0" on_node="scale1-bundle-1" on_node_uuid="scale1-bundle-1" router_node="c09-h06-r630"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="102" operation="start" operation_key="dummy1:2_start_0" on_node="scale1-bundle-2" on_node_uuid="scale1-bundle-2" router_node="c09-h07-r630"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="104" operation="start" operation_key="dummy1:3_start_0" on_node="scale1-bundle-3" on_node_uuid="scale1-bundle-3" router_node="c09-h05-r630"/>
+ </trigger>
+ <trigger>
<rsc_op id="106" operation="start" operation_key="dummy1:4_start_0" on_node="scale1-bundle-4" on_node_uuid="scale1-bundle-4" router_node="c09-h06-r630"/>
</trigger>
<trigger>
diff --git a/cts/scheduler/exp/bundle-promoted-anticolocation-1.exp b/cts/scheduler/exp/bundle-promoted-anticolocation-1.exp
new file mode 100644
index 0000000..bb2aee1
--- /dev/null
+++ b/cts/scheduler/exp/bundle-promoted-anticolocation-1.exp
@@ -0,0 +1,37 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0">
+ <synapse id="0">
+ <action_set>
+ <rsc_op id="52" operation="monitor" operation_key="vip_monitor_10000" on_node="node1" on_node_uuid="1">
+ <primitive id="vip" class="ocf" provider="heartbeat" type="IPaddr2"/>
+ <attributes CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" cidr_netmask="32" ip="192.168.22.81"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="51" operation="start" operation_key="vip_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="1">
+ <action_set>
+ <rsc_op id="51" operation="start" operation_key="vip_start_0" on_node="node1" on_node_uuid="1">
+ <primitive id="vip" class="ocf" provider="heartbeat" type="IPaddr2"/>
+ <attributes CRM_meta_name="start" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" cidr_netmask="32" ip="192.168.22.81"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="50" operation="stop" operation_key="vip_stop_0" on_node="node3" on_node_uuid="3"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="2">
+ <action_set>
+ <rsc_op id="50" operation="stop" operation_key="vip_stop_0" on_node="node3" on_node_uuid="3">
+ <primitive id="vip" class="ocf" provider="heartbeat" type="IPaddr2"/>
+ <attributes CRM_meta_name="stop" CRM_meta_on_node="node3" CRM_meta_on_node_uuid="3" CRM_meta_timeout="20000" cidr_netmask="32" ip="192.168.22.81"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+</transition_graph>
diff --git a/cts/scheduler/exp/bundle-promoted-anticolocation-2.exp b/cts/scheduler/exp/bundle-promoted-anticolocation-2.exp
new file mode 100644
index 0000000..bb2aee1
--- /dev/null
+++ b/cts/scheduler/exp/bundle-promoted-anticolocation-2.exp
@@ -0,0 +1,37 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0">
+ <synapse id="0">
+ <action_set>
+ <rsc_op id="52" operation="monitor" operation_key="vip_monitor_10000" on_node="node1" on_node_uuid="1">
+ <primitive id="vip" class="ocf" provider="heartbeat" type="IPaddr2"/>
+ <attributes CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" cidr_netmask="32" ip="192.168.22.81"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="51" operation="start" operation_key="vip_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="1">
+ <action_set>
+ <rsc_op id="51" operation="start" operation_key="vip_start_0" on_node="node1" on_node_uuid="1">
+ <primitive id="vip" class="ocf" provider="heartbeat" type="IPaddr2"/>
+ <attributes CRM_meta_name="start" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" cidr_netmask="32" ip="192.168.22.81"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="50" operation="stop" operation_key="vip_stop_0" on_node="node3" on_node_uuid="3"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="2">
+ <action_set>
+ <rsc_op id="50" operation="stop" operation_key="vip_stop_0" on_node="node3" on_node_uuid="3">
+ <primitive id="vip" class="ocf" provider="heartbeat" type="IPaddr2"/>
+ <attributes CRM_meta_name="stop" CRM_meta_on_node="node3" CRM_meta_on_node_uuid="3" CRM_meta_timeout="20000" cidr_netmask="32" ip="192.168.22.81"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+</transition_graph>
diff --git a/cts/scheduler/exp/bundle-promoted-anticolocation-3.exp b/cts/scheduler/exp/bundle-promoted-anticolocation-3.exp
new file mode 100644
index 0000000..7febd99
--- /dev/null
+++ b/cts/scheduler/exp/bundle-promoted-anticolocation-3.exp
@@ -0,0 +1,179 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0">
+ <synapse id="0">
+ <action_set>
+ <rsc_op id="33" operation="monitor" operation_key="base_monitor_15000" internal_operation_key="base:1_monitor_15000" on_node="base-bundle-1" on_node_uuid="base-bundle-1" router_node="node2">
+ <primitive id="base" long-id="base:1" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_interval="15000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="base-bundle-1" CRM_meta_on_node_uuid="base-bundle-1" CRM_meta_op_target_rc="8" CRM_meta_physical_host="node2" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Promoted" CRM_meta_timeout="15000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="32" operation="promote" operation_key="base_promote_0" internal_operation_key="base:1_promote_0" on_node="base-bundle-1" on_node_uuid="base-bundle-1" router_node="node2"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="1">
+ <action_set>
+ <rsc_op id="32" operation="promote" operation_key="base_promote_0" internal_operation_key="base:1_promote_0" on_node="base-bundle-1" on_node_uuid="base-bundle-1" router_node="node2">
+ <primitive id="base" long-id="base:1" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_on_node="base-bundle-1" CRM_meta_on_node_uuid="base-bundle-1" CRM_meta_physical_host="node2" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="10" operation="cancel" operation_key="base_monitor_16000" internal_operation_key="base:1_monitor_16000" on_node="base-bundle-1" on_node_uuid="base-bundle-1" router_node="node2"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="44" operation="promote" operation_key="base-bundle-clone_promote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="2">
+ <action_set>
+ <rsc_op id="10" operation="cancel" operation_key="base_monitor_16000" internal_operation_key="base:1_monitor_16000" on_node="base-bundle-1" on_node_uuid="base-bundle-1" router_node="node2">
+ <primitive id="base" long-id="base:1" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="16000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="base-bundle-1" CRM_meta_on_node_uuid="base-bundle-1" CRM_meta_operation="monitor" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Unpromoted" CRM_meta_timeout="16000" />
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="3">
+ <action_set>
+ <rsc_op id="39" operation="monitor" operation_key="base_monitor_16000" internal_operation_key="base:2_monitor_16000" on_node="base-bundle-2" on_node_uuid="base-bundle-2" router_node="node3">
+ <primitive id="base" long-id="base:2" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_interval="16000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="base-bundle-2" CRM_meta_on_node_uuid="base-bundle-2" CRM_meta_physical_host="node3" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Unpromoted" CRM_meta_timeout="16000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="37" operation="demote" operation_key="base_demote_0" internal_operation_key="base:2_demote_0" on_node="base-bundle-2" on_node_uuid="base-bundle-2" router_node="node3"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="4">
+ <action_set>
+ <rsc_op id="37" operation="demote" operation_key="base_demote_0" internal_operation_key="base:2_demote_0" on_node="base-bundle-2" on_node_uuid="base-bundle-2" router_node="node3">
+ <primitive id="base" long-id="base:2" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_on_node="base-bundle-2" CRM_meta_on_node_uuid="base-bundle-2" CRM_meta_physical_host="node3" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="11" operation="cancel" operation_key="base_monitor_15000" internal_operation_key="base:2_monitor_15000" on_node="base-bundle-2" on_node_uuid="base-bundle-2" router_node="node3"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="46" operation="demote" operation_key="base-bundle-clone_demote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="5">
+ <action_set>
+ <rsc_op id="11" operation="cancel" operation_key="base_monitor_15000" internal_operation_key="base:2_monitor_15000" on_node="base-bundle-2" on_node_uuid="base-bundle-2" router_node="node3">
+ <primitive id="base" long-id="base:2" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="15000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="base-bundle-2" CRM_meta_on_node_uuid="base-bundle-2" CRM_meta_operation="monitor" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Promoted" CRM_meta_timeout="15000" />
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="6" priority="1000000">
+ <action_set>
+ <pseudo_event id="47" operation="demoted" operation_key="base-bundle-clone_demoted_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="37" operation="demote" operation_key="base_demote_0" internal_operation_key="base:2_demote_0" on_node="base-bundle-2" on_node_uuid="base-bundle-2" router_node="node3"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="46" operation="demote" operation_key="base-bundle-clone_demote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="7">
+ <action_set>
+ <pseudo_event id="46" operation="demote" operation_key="base-bundle-clone_demote_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="50" operation="demote" operation_key="base-bundle_demote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="8" priority="1000000">
+ <action_set>
+ <pseudo_event id="45" operation="promoted" operation_key="base-bundle-clone_promoted_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="32" operation="promote" operation_key="base_promote_0" internal_operation_key="base:1_promote_0" on_node="base-bundle-1" on_node_uuid="base-bundle-1" router_node="node2"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="9">
+ <action_set>
+ <pseudo_event id="44" operation="promote" operation_key="base-bundle-clone_promote_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="47" operation="demoted" operation_key="base-bundle-clone_demoted_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="48" operation="promote" operation_key="base-bundle_promote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="10" priority="1000000">
+ <action_set>
+ <pseudo_event id="51" operation="demoted" operation_key="base-bundle_demoted_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="47" operation="demoted" operation_key="base-bundle-clone_demoted_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="50" operation="demote" operation_key="base-bundle_demote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="11">
+ <action_set>
+ <pseudo_event id="50" operation="demote" operation_key="base-bundle_demote_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="12" priority="1000000">
+ <action_set>
+ <pseudo_event id="49" operation="promoted" operation_key="base-bundle_promoted_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="45" operation="promoted" operation_key="base-bundle-clone_promoted_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="13">
+ <action_set>
+ <pseudo_event id="48" operation="promote" operation_key="base-bundle_promote_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="51" operation="demoted" operation_key="base-bundle_demoted_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+</transition_graph>
diff --git a/cts/scheduler/exp/bundle-promoted-anticolocation-4.exp b/cts/scheduler/exp/bundle-promoted-anticolocation-4.exp
new file mode 100644
index 0000000..7febd99
--- /dev/null
+++ b/cts/scheduler/exp/bundle-promoted-anticolocation-4.exp
@@ -0,0 +1,179 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0">
+ <synapse id="0">
+ <action_set>
+ <rsc_op id="33" operation="monitor" operation_key="base_monitor_15000" internal_operation_key="base:1_monitor_15000" on_node="base-bundle-1" on_node_uuid="base-bundle-1" router_node="node2">
+ <primitive id="base" long-id="base:1" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_interval="15000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="base-bundle-1" CRM_meta_on_node_uuid="base-bundle-1" CRM_meta_op_target_rc="8" CRM_meta_physical_host="node2" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Promoted" CRM_meta_timeout="15000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="32" operation="promote" operation_key="base_promote_0" internal_operation_key="base:1_promote_0" on_node="base-bundle-1" on_node_uuid="base-bundle-1" router_node="node2"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="1">
+ <action_set>
+ <rsc_op id="32" operation="promote" operation_key="base_promote_0" internal_operation_key="base:1_promote_0" on_node="base-bundle-1" on_node_uuid="base-bundle-1" router_node="node2">
+ <primitive id="base" long-id="base:1" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_on_node="base-bundle-1" CRM_meta_on_node_uuid="base-bundle-1" CRM_meta_physical_host="node2" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="10" operation="cancel" operation_key="base_monitor_16000" internal_operation_key="base:1_monitor_16000" on_node="base-bundle-1" on_node_uuid="base-bundle-1" router_node="node2"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="44" operation="promote" operation_key="base-bundle-clone_promote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="2">
+ <action_set>
+ <rsc_op id="10" operation="cancel" operation_key="base_monitor_16000" internal_operation_key="base:1_monitor_16000" on_node="base-bundle-1" on_node_uuid="base-bundle-1" router_node="node2">
+ <primitive id="base" long-id="base:1" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="16000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="base-bundle-1" CRM_meta_on_node_uuid="base-bundle-1" CRM_meta_operation="monitor" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Unpromoted" CRM_meta_timeout="16000" />
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="3">
+ <action_set>
+ <rsc_op id="39" operation="monitor" operation_key="base_monitor_16000" internal_operation_key="base:2_monitor_16000" on_node="base-bundle-2" on_node_uuid="base-bundle-2" router_node="node3">
+ <primitive id="base" long-id="base:2" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_interval="16000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="base-bundle-2" CRM_meta_on_node_uuid="base-bundle-2" CRM_meta_physical_host="node3" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Unpromoted" CRM_meta_timeout="16000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="37" operation="demote" operation_key="base_demote_0" internal_operation_key="base:2_demote_0" on_node="base-bundle-2" on_node_uuid="base-bundle-2" router_node="node3"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="4">
+ <action_set>
+ <rsc_op id="37" operation="demote" operation_key="base_demote_0" internal_operation_key="base:2_demote_0" on_node="base-bundle-2" on_node_uuid="base-bundle-2" router_node="node3">
+ <primitive id="base" long-id="base:2" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_on_node="base-bundle-2" CRM_meta_on_node_uuid="base-bundle-2" CRM_meta_physical_host="node3" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="11" operation="cancel" operation_key="base_monitor_15000" internal_operation_key="base:2_monitor_15000" on_node="base-bundle-2" on_node_uuid="base-bundle-2" router_node="node3"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="46" operation="demote" operation_key="base-bundle-clone_demote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="5">
+ <action_set>
+ <rsc_op id="11" operation="cancel" operation_key="base_monitor_15000" internal_operation_key="base:2_monitor_15000" on_node="base-bundle-2" on_node_uuid="base-bundle-2" router_node="node3">
+ <primitive id="base" long-id="base:2" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="15000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="base-bundle-2" CRM_meta_on_node_uuid="base-bundle-2" CRM_meta_operation="monitor" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Promoted" CRM_meta_timeout="15000" />
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="6" priority="1000000">
+ <action_set>
+ <pseudo_event id="47" operation="demoted" operation_key="base-bundle-clone_demoted_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="37" operation="demote" operation_key="base_demote_0" internal_operation_key="base:2_demote_0" on_node="base-bundle-2" on_node_uuid="base-bundle-2" router_node="node3"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="46" operation="demote" operation_key="base-bundle-clone_demote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="7">
+ <action_set>
+ <pseudo_event id="46" operation="demote" operation_key="base-bundle-clone_demote_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="50" operation="demote" operation_key="base-bundle_demote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="8" priority="1000000">
+ <action_set>
+ <pseudo_event id="45" operation="promoted" operation_key="base-bundle-clone_promoted_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="32" operation="promote" operation_key="base_promote_0" internal_operation_key="base:1_promote_0" on_node="base-bundle-1" on_node_uuid="base-bundle-1" router_node="node2"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="9">
+ <action_set>
+ <pseudo_event id="44" operation="promote" operation_key="base-bundle-clone_promote_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="47" operation="demoted" operation_key="base-bundle-clone_demoted_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="48" operation="promote" operation_key="base-bundle_promote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="10" priority="1000000">
+ <action_set>
+ <pseudo_event id="51" operation="demoted" operation_key="base-bundle_demoted_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="47" operation="demoted" operation_key="base-bundle-clone_demoted_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="50" operation="demote" operation_key="base-bundle_demote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="11">
+ <action_set>
+ <pseudo_event id="50" operation="demote" operation_key="base-bundle_demote_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="12" priority="1000000">
+ <action_set>
+ <pseudo_event id="49" operation="promoted" operation_key="base-bundle_promoted_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="45" operation="promoted" operation_key="base-bundle-clone_promoted_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="13">
+ <action_set>
+ <pseudo_event id="48" operation="promote" operation_key="base-bundle_promote_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="51" operation="demoted" operation_key="base-bundle_demoted_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+</transition_graph>
diff --git a/cts/scheduler/exp/bundle-promoted-anticolocation-5.exp b/cts/scheduler/exp/bundle-promoted-anticolocation-5.exp
new file mode 100644
index 0000000..d5861ab
--- /dev/null
+++ b/cts/scheduler/exp/bundle-promoted-anticolocation-5.exp
@@ -0,0 +1,179 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0">
+ <synapse id="0">
+ <action_set>
+ <rsc_op id="41" operation="monitor" operation_key="bundle-a-rsc_monitor_15000" internal_operation_key="bundle-a-rsc:2_monitor_15000" on_node="bundle-a-2" on_node_uuid="bundle-a-2" router_node="node2">
+ <primitive id="bundle-a-rsc" long-id="bundle-a-rsc:2" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_interval="15000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="bundle-a-2" CRM_meta_on_node_uuid="bundle-a-2" CRM_meta_op_target_rc="8" CRM_meta_physical_host="node2" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Promoted" CRM_meta_timeout="15000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="40" operation="promote" operation_key="bundle-a-rsc_promote_0" internal_operation_key="bundle-a-rsc:2_promote_0" on_node="bundle-a-2" on_node_uuid="bundle-a-2" router_node="node2"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="1">
+ <action_set>
+ <rsc_op id="40" operation="promote" operation_key="bundle-a-rsc_promote_0" internal_operation_key="bundle-a-rsc:2_promote_0" on_node="bundle-a-2" on_node_uuid="bundle-a-2" router_node="node2">
+ <primitive id="bundle-a-rsc" long-id="bundle-a-rsc:2" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_on_node="bundle-a-2" CRM_meta_on_node_uuid="bundle-a-2" CRM_meta_physical_host="node2" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="19" operation="cancel" operation_key="bundle-a-rsc_monitor_16000" internal_operation_key="bundle-a-rsc:2_monitor_16000" on_node="bundle-a-2" on_node_uuid="bundle-a-2" router_node="node2"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="52" operation="promote" operation_key="bundle-a-clone_promote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="2">
+ <action_set>
+ <rsc_op id="19" operation="cancel" operation_key="bundle-a-rsc_monitor_16000" internal_operation_key="bundle-a-rsc:2_monitor_16000" on_node="bundle-a-2" on_node_uuid="bundle-a-2" router_node="node2">
+ <primitive id="bundle-a-rsc" long-id="bundle-a-rsc:2" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="16000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="bundle-a-2" CRM_meta_on_node_uuid="bundle-a-2" CRM_meta_operation="monitor" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Unpromoted" CRM_meta_timeout="16000" />
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="3">
+ <action_set>
+ <rsc_op id="47" operation="monitor" operation_key="bundle-a-rsc_monitor_16000" internal_operation_key="bundle-a-rsc:1_monitor_16000" on_node="bundle-a-1" on_node_uuid="bundle-a-1" router_node="node3">
+ <primitive id="bundle-a-rsc" long-id="bundle-a-rsc:1" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_interval="16000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="bundle-a-1" CRM_meta_on_node_uuid="bundle-a-1" CRM_meta_physical_host="node3" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Unpromoted" CRM_meta_timeout="16000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="45" operation="demote" operation_key="bundle-a-rsc_demote_0" internal_operation_key="bundle-a-rsc:1_demote_0" on_node="bundle-a-1" on_node_uuid="bundle-a-1" router_node="node3"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="4">
+ <action_set>
+ <rsc_op id="45" operation="demote" operation_key="bundle-a-rsc_demote_0" internal_operation_key="bundle-a-rsc:1_demote_0" on_node="bundle-a-1" on_node_uuid="bundle-a-1" router_node="node3">
+ <primitive id="bundle-a-rsc" long-id="bundle-a-rsc:1" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_on_node="bundle-a-1" CRM_meta_on_node_uuid="bundle-a-1" CRM_meta_physical_host="node3" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="14" operation="cancel" operation_key="bundle-a-rsc_monitor_15000" internal_operation_key="bundle-a-rsc:1_monitor_15000" on_node="bundle-a-1" on_node_uuid="bundle-a-1" router_node="node3"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="54" operation="demote" operation_key="bundle-a-clone_demote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="5">
+ <action_set>
+ <rsc_op id="14" operation="cancel" operation_key="bundle-a-rsc_monitor_15000" internal_operation_key="bundle-a-rsc:1_monitor_15000" on_node="bundle-a-1" on_node_uuid="bundle-a-1" router_node="node3">
+ <primitive id="bundle-a-rsc" long-id="bundle-a-rsc:1" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="15000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="bundle-a-1" CRM_meta_on_node_uuid="bundle-a-1" CRM_meta_operation="monitor" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Promoted" CRM_meta_timeout="15000" />
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="6" priority="1000000">
+ <action_set>
+ <pseudo_event id="55" operation="demoted" operation_key="bundle-a-clone_demoted_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="45" operation="demote" operation_key="bundle-a-rsc_demote_0" internal_operation_key="bundle-a-rsc:1_demote_0" on_node="bundle-a-1" on_node_uuid="bundle-a-1" router_node="node3"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="54" operation="demote" operation_key="bundle-a-clone_demote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="7">
+ <action_set>
+ <pseudo_event id="54" operation="demote" operation_key="bundle-a-clone_demote_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="58" operation="demote" operation_key="bundle-a_demote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="8" priority="1000000">
+ <action_set>
+ <pseudo_event id="53" operation="promoted" operation_key="bundle-a-clone_promoted_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="40" operation="promote" operation_key="bundle-a-rsc_promote_0" internal_operation_key="bundle-a-rsc:2_promote_0" on_node="bundle-a-2" on_node_uuid="bundle-a-2" router_node="node2"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="9">
+ <action_set>
+ <pseudo_event id="52" operation="promote" operation_key="bundle-a-clone_promote_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="55" operation="demoted" operation_key="bundle-a-clone_demoted_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="56" operation="promote" operation_key="bundle-a_promote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="10" priority="1000000">
+ <action_set>
+ <pseudo_event id="59" operation="demoted" operation_key="bundle-a_demoted_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="55" operation="demoted" operation_key="bundle-a-clone_demoted_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="58" operation="demote" operation_key="bundle-a_demote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="11">
+ <action_set>
+ <pseudo_event id="58" operation="demote" operation_key="bundle-a_demote_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="12" priority="1000000">
+ <action_set>
+ <pseudo_event id="57" operation="promoted" operation_key="bundle-a_promoted_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="53" operation="promoted" operation_key="bundle-a-clone_promoted_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="13">
+ <action_set>
+ <pseudo_event id="56" operation="promote" operation_key="bundle-a_promote_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="59" operation="demoted" operation_key="bundle-a_demoted_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+</transition_graph>
diff --git a/cts/scheduler/exp/bundle-promoted-anticolocation-6.exp b/cts/scheduler/exp/bundle-promoted-anticolocation-6.exp
new file mode 100644
index 0000000..d5861ab
--- /dev/null
+++ b/cts/scheduler/exp/bundle-promoted-anticolocation-6.exp
@@ -0,0 +1,179 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0">
+ <synapse id="0">
+ <action_set>
+ <rsc_op id="41" operation="monitor" operation_key="bundle-a-rsc_monitor_15000" internal_operation_key="bundle-a-rsc:2_monitor_15000" on_node="bundle-a-2" on_node_uuid="bundle-a-2" router_node="node2">
+ <primitive id="bundle-a-rsc" long-id="bundle-a-rsc:2" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_interval="15000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="bundle-a-2" CRM_meta_on_node_uuid="bundle-a-2" CRM_meta_op_target_rc="8" CRM_meta_physical_host="node2" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Promoted" CRM_meta_timeout="15000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="40" operation="promote" operation_key="bundle-a-rsc_promote_0" internal_operation_key="bundle-a-rsc:2_promote_0" on_node="bundle-a-2" on_node_uuid="bundle-a-2" router_node="node2"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="1">
+ <action_set>
+ <rsc_op id="40" operation="promote" operation_key="bundle-a-rsc_promote_0" internal_operation_key="bundle-a-rsc:2_promote_0" on_node="bundle-a-2" on_node_uuid="bundle-a-2" router_node="node2">
+ <primitive id="bundle-a-rsc" long-id="bundle-a-rsc:2" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_on_node="bundle-a-2" CRM_meta_on_node_uuid="bundle-a-2" CRM_meta_physical_host="node2" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="19" operation="cancel" operation_key="bundle-a-rsc_monitor_16000" internal_operation_key="bundle-a-rsc:2_monitor_16000" on_node="bundle-a-2" on_node_uuid="bundle-a-2" router_node="node2"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="52" operation="promote" operation_key="bundle-a-clone_promote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="2">
+ <action_set>
+ <rsc_op id="19" operation="cancel" operation_key="bundle-a-rsc_monitor_16000" internal_operation_key="bundle-a-rsc:2_monitor_16000" on_node="bundle-a-2" on_node_uuid="bundle-a-2" router_node="node2">
+ <primitive id="bundle-a-rsc" long-id="bundle-a-rsc:2" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="16000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="bundle-a-2" CRM_meta_on_node_uuid="bundle-a-2" CRM_meta_operation="monitor" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Unpromoted" CRM_meta_timeout="16000" />
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="3">
+ <action_set>
+ <rsc_op id="47" operation="monitor" operation_key="bundle-a-rsc_monitor_16000" internal_operation_key="bundle-a-rsc:1_monitor_16000" on_node="bundle-a-1" on_node_uuid="bundle-a-1" router_node="node3">
+ <primitive id="bundle-a-rsc" long-id="bundle-a-rsc:1" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_interval="16000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="bundle-a-1" CRM_meta_on_node_uuid="bundle-a-1" CRM_meta_physical_host="node3" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Unpromoted" CRM_meta_timeout="16000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="45" operation="demote" operation_key="bundle-a-rsc_demote_0" internal_operation_key="bundle-a-rsc:1_demote_0" on_node="bundle-a-1" on_node_uuid="bundle-a-1" router_node="node3"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="4">
+ <action_set>
+ <rsc_op id="45" operation="demote" operation_key="bundle-a-rsc_demote_0" internal_operation_key="bundle-a-rsc:1_demote_0" on_node="bundle-a-1" on_node_uuid="bundle-a-1" router_node="node3">
+ <primitive id="bundle-a-rsc" long-id="bundle-a-rsc:1" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_on_node="bundle-a-1" CRM_meta_on_node_uuid="bundle-a-1" CRM_meta_physical_host="node3" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="14" operation="cancel" operation_key="bundle-a-rsc_monitor_15000" internal_operation_key="bundle-a-rsc:1_monitor_15000" on_node="bundle-a-1" on_node_uuid="bundle-a-1" router_node="node3"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="54" operation="demote" operation_key="bundle-a-clone_demote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="5">
+ <action_set>
+ <rsc_op id="14" operation="cancel" operation_key="bundle-a-rsc_monitor_15000" internal_operation_key="bundle-a-rsc:1_monitor_15000" on_node="bundle-a-1" on_node_uuid="bundle-a-1" router_node="node3">
+ <primitive id="bundle-a-rsc" long-id="bundle-a-rsc:1" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="15000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="bundle-a-1" CRM_meta_on_node_uuid="bundle-a-1" CRM_meta_operation="monitor" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Promoted" CRM_meta_timeout="15000" />
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="6" priority="1000000">
+ <action_set>
+ <pseudo_event id="55" operation="demoted" operation_key="bundle-a-clone_demoted_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="45" operation="demote" operation_key="bundle-a-rsc_demote_0" internal_operation_key="bundle-a-rsc:1_demote_0" on_node="bundle-a-1" on_node_uuid="bundle-a-1" router_node="node3"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="54" operation="demote" operation_key="bundle-a-clone_demote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="7">
+ <action_set>
+ <pseudo_event id="54" operation="demote" operation_key="bundle-a-clone_demote_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="58" operation="demote" operation_key="bundle-a_demote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="8" priority="1000000">
+ <action_set>
+ <pseudo_event id="53" operation="promoted" operation_key="bundle-a-clone_promoted_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="40" operation="promote" operation_key="bundle-a-rsc_promote_0" internal_operation_key="bundle-a-rsc:2_promote_0" on_node="bundle-a-2" on_node_uuid="bundle-a-2" router_node="node2"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="9">
+ <action_set>
+ <pseudo_event id="52" operation="promote" operation_key="bundle-a-clone_promote_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="55" operation="demoted" operation_key="bundle-a-clone_demoted_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="56" operation="promote" operation_key="bundle-a_promote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="10" priority="1000000">
+ <action_set>
+ <pseudo_event id="59" operation="demoted" operation_key="bundle-a_demoted_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="55" operation="demoted" operation_key="bundle-a-clone_demoted_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="58" operation="demote" operation_key="bundle-a_demote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="11">
+ <action_set>
+ <pseudo_event id="58" operation="demote" operation_key="bundle-a_demote_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="12" priority="1000000">
+ <action_set>
+ <pseudo_event id="57" operation="promoted" operation_key="bundle-a_promoted_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="53" operation="promoted" operation_key="bundle-a-clone_promoted_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="13">
+ <action_set>
+ <pseudo_event id="56" operation="promote" operation_key="bundle-a_promote_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="59" operation="demoted" operation_key="bundle-a_demoted_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+</transition_graph>
diff --git a/cts/scheduler/exp/bundle-promoted-colocation-1.exp b/cts/scheduler/exp/bundle-promoted-colocation-1.exp
new file mode 100644
index 0000000..8d7ea7a
--- /dev/null
+++ b/cts/scheduler/exp/bundle-promoted-colocation-1.exp
@@ -0,0 +1,37 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0">
+ <synapse id="0">
+ <action_set>
+ <rsc_op id="52" operation="monitor" operation_key="vip_monitor_10000" on_node="node3" on_node_uuid="3">
+ <primitive id="vip" class="ocf" provider="heartbeat" type="IPaddr2"/>
+ <attributes CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_on_node="node3" CRM_meta_on_node_uuid="3" CRM_meta_timeout="20000" cidr_netmask="32" ip="192.168.22.81"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="51" operation="start" operation_key="vip_start_0" on_node="node3" on_node_uuid="3"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="1">
+ <action_set>
+ <rsc_op id="51" operation="start" operation_key="vip_start_0" on_node="node3" on_node_uuid="3">
+ <primitive id="vip" class="ocf" provider="heartbeat" type="IPaddr2"/>
+ <attributes CRM_meta_name="start" CRM_meta_on_node="node3" CRM_meta_on_node_uuid="3" CRM_meta_timeout="20000" cidr_netmask="32" ip="192.168.22.81"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="50" operation="stop" operation_key="vip_stop_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="2">
+ <action_set>
+ <rsc_op id="50" operation="stop" operation_key="vip_stop_0" on_node="node1" on_node_uuid="1">
+ <primitive id="vip" class="ocf" provider="heartbeat" type="IPaddr2"/>
+ <attributes CRM_meta_name="stop" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" cidr_netmask="32" ip="192.168.22.81"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+</transition_graph>
diff --git a/cts/scheduler/exp/bundle-promoted-colocation-2.exp b/cts/scheduler/exp/bundle-promoted-colocation-2.exp
new file mode 100644
index 0000000..8d7ea7a
--- /dev/null
+++ b/cts/scheduler/exp/bundle-promoted-colocation-2.exp
@@ -0,0 +1,37 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0">
+ <synapse id="0">
+ <action_set>
+ <rsc_op id="52" operation="monitor" operation_key="vip_monitor_10000" on_node="node3" on_node_uuid="3">
+ <primitive id="vip" class="ocf" provider="heartbeat" type="IPaddr2"/>
+ <attributes CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_on_node="node3" CRM_meta_on_node_uuid="3" CRM_meta_timeout="20000" cidr_netmask="32" ip="192.168.22.81"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="51" operation="start" operation_key="vip_start_0" on_node="node3" on_node_uuid="3"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="1">
+ <action_set>
+ <rsc_op id="51" operation="start" operation_key="vip_start_0" on_node="node3" on_node_uuid="3">
+ <primitive id="vip" class="ocf" provider="heartbeat" type="IPaddr2"/>
+ <attributes CRM_meta_name="start" CRM_meta_on_node="node3" CRM_meta_on_node_uuid="3" CRM_meta_timeout="20000" cidr_netmask="32" ip="192.168.22.81"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="50" operation="stop" operation_key="vip_stop_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="2">
+ <action_set>
+ <rsc_op id="50" operation="stop" operation_key="vip_stop_0" on_node="node1" on_node_uuid="1">
+ <primitive id="vip" class="ocf" provider="heartbeat" type="IPaddr2"/>
+ <attributes CRM_meta_name="stop" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" cidr_netmask="32" ip="192.168.22.81"/>
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+</transition_graph>
diff --git a/cts/scheduler/exp/bundle-promoted-colocation-3.exp b/cts/scheduler/exp/bundle-promoted-colocation-3.exp
new file mode 100644
index 0000000..1963bbb
--- /dev/null
+++ b/cts/scheduler/exp/bundle-promoted-colocation-3.exp
@@ -0,0 +1,179 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0">
+ <synapse id="0">
+ <action_set>
+ <rsc_op id="33" operation="monitor" operation_key="base_monitor_15000" internal_operation_key="base:0_monitor_15000" on_node="base-bundle-0" on_node_uuid="base-bundle-0" router_node="node1">
+ <primitive id="base" long-id="base:0" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_interval="15000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="base-bundle-0" CRM_meta_on_node_uuid="base-bundle-0" CRM_meta_op_target_rc="8" CRM_meta_physical_host="node1" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Promoted" CRM_meta_timeout="15000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="32" operation="promote" operation_key="base_promote_0" internal_operation_key="base:0_promote_0" on_node="base-bundle-0" on_node_uuid="base-bundle-0" router_node="node1"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="1">
+ <action_set>
+ <rsc_op id="32" operation="promote" operation_key="base_promote_0" internal_operation_key="base:0_promote_0" on_node="base-bundle-0" on_node_uuid="base-bundle-0" router_node="node1">
+ <primitive id="base" long-id="base:0" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_on_node="base-bundle-0" CRM_meta_on_node_uuid="base-bundle-0" CRM_meta_physical_host="node1" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="7" operation="cancel" operation_key="base_monitor_16000" internal_operation_key="base:0_monitor_16000" on_node="base-bundle-0" on_node_uuid="base-bundle-0" router_node="node1"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="44" operation="promote" operation_key="base-bundle-clone_promote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="2">
+ <action_set>
+ <rsc_op id="7" operation="cancel" operation_key="base_monitor_16000" internal_operation_key="base:0_monitor_16000" on_node="base-bundle-0" on_node_uuid="base-bundle-0" router_node="node1">
+ <primitive id="base" long-id="base:0" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="16000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="base-bundle-0" CRM_meta_on_node_uuid="base-bundle-0" CRM_meta_operation="monitor" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Unpromoted" CRM_meta_timeout="16000" />
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="3">
+ <action_set>
+ <rsc_op id="37" operation="monitor" operation_key="base_monitor_16000" internal_operation_key="base:2_monitor_16000" on_node="base-bundle-2" on_node_uuid="base-bundle-2" router_node="node3">
+ <primitive id="base" long-id="base:2" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_interval="16000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="base-bundle-2" CRM_meta_on_node_uuid="base-bundle-2" CRM_meta_physical_host="node3" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Unpromoted" CRM_meta_timeout="16000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="35" operation="demote" operation_key="base_demote_0" internal_operation_key="base:2_demote_0" on_node="base-bundle-2" on_node_uuid="base-bundle-2" router_node="node3"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="4">
+ <action_set>
+ <rsc_op id="35" operation="demote" operation_key="base_demote_0" internal_operation_key="base:2_demote_0" on_node="base-bundle-2" on_node_uuid="base-bundle-2" router_node="node3">
+ <primitive id="base" long-id="base:2" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_on_node="base-bundle-2" CRM_meta_on_node_uuid="base-bundle-2" CRM_meta_physical_host="node3" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="11" operation="cancel" operation_key="base_monitor_15000" internal_operation_key="base:2_monitor_15000" on_node="base-bundle-2" on_node_uuid="base-bundle-2" router_node="node3"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="46" operation="demote" operation_key="base-bundle-clone_demote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="5">
+ <action_set>
+ <rsc_op id="11" operation="cancel" operation_key="base_monitor_15000" internal_operation_key="base:2_monitor_15000" on_node="base-bundle-2" on_node_uuid="base-bundle-2" router_node="node3">
+ <primitive id="base" long-id="base:2" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="15000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="base-bundle-2" CRM_meta_on_node_uuid="base-bundle-2" CRM_meta_operation="monitor" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Promoted" CRM_meta_timeout="15000" />
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="6" priority="1000000">
+ <action_set>
+ <pseudo_event id="47" operation="demoted" operation_key="base-bundle-clone_demoted_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="35" operation="demote" operation_key="base_demote_0" internal_operation_key="base:2_demote_0" on_node="base-bundle-2" on_node_uuid="base-bundle-2" router_node="node3"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="46" operation="demote" operation_key="base-bundle-clone_demote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="7">
+ <action_set>
+ <pseudo_event id="46" operation="demote" operation_key="base-bundle-clone_demote_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="50" operation="demote" operation_key="base-bundle_demote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="8" priority="1000000">
+ <action_set>
+ <pseudo_event id="45" operation="promoted" operation_key="base-bundle-clone_promoted_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="32" operation="promote" operation_key="base_promote_0" internal_operation_key="base:0_promote_0" on_node="base-bundle-0" on_node_uuid="base-bundle-0" router_node="node1"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="9">
+ <action_set>
+ <pseudo_event id="44" operation="promote" operation_key="base-bundle-clone_promote_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="47" operation="demoted" operation_key="base-bundle-clone_demoted_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="48" operation="promote" operation_key="base-bundle_promote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="10" priority="1000000">
+ <action_set>
+ <pseudo_event id="51" operation="demoted" operation_key="base-bundle_demoted_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="47" operation="demoted" operation_key="base-bundle-clone_demoted_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="50" operation="demote" operation_key="base-bundle_demote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="11">
+ <action_set>
+ <pseudo_event id="50" operation="demote" operation_key="base-bundle_demote_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="12" priority="1000000">
+ <action_set>
+ <pseudo_event id="49" operation="promoted" operation_key="base-bundle_promoted_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="45" operation="promoted" operation_key="base-bundle-clone_promoted_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="13">
+ <action_set>
+ <pseudo_event id="48" operation="promote" operation_key="base-bundle_promote_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="51" operation="demoted" operation_key="base-bundle_demoted_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+</transition_graph>
diff --git a/cts/scheduler/exp/bundle-promoted-colocation-4.exp b/cts/scheduler/exp/bundle-promoted-colocation-4.exp
new file mode 100644
index 0000000..1963bbb
--- /dev/null
+++ b/cts/scheduler/exp/bundle-promoted-colocation-4.exp
@@ -0,0 +1,179 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0">
+ <synapse id="0">
+ <action_set>
+ <rsc_op id="33" operation="monitor" operation_key="base_monitor_15000" internal_operation_key="base:0_monitor_15000" on_node="base-bundle-0" on_node_uuid="base-bundle-0" router_node="node1">
+ <primitive id="base" long-id="base:0" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_interval="15000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="base-bundle-0" CRM_meta_on_node_uuid="base-bundle-0" CRM_meta_op_target_rc="8" CRM_meta_physical_host="node1" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Promoted" CRM_meta_timeout="15000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="32" operation="promote" operation_key="base_promote_0" internal_operation_key="base:0_promote_0" on_node="base-bundle-0" on_node_uuid="base-bundle-0" router_node="node1"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="1">
+ <action_set>
+ <rsc_op id="32" operation="promote" operation_key="base_promote_0" internal_operation_key="base:0_promote_0" on_node="base-bundle-0" on_node_uuid="base-bundle-0" router_node="node1">
+ <primitive id="base" long-id="base:0" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_on_node="base-bundle-0" CRM_meta_on_node_uuid="base-bundle-0" CRM_meta_physical_host="node1" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="7" operation="cancel" operation_key="base_monitor_16000" internal_operation_key="base:0_monitor_16000" on_node="base-bundle-0" on_node_uuid="base-bundle-0" router_node="node1"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="44" operation="promote" operation_key="base-bundle-clone_promote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="2">
+ <action_set>
+ <rsc_op id="7" operation="cancel" operation_key="base_monitor_16000" internal_operation_key="base:0_monitor_16000" on_node="base-bundle-0" on_node_uuid="base-bundle-0" router_node="node1">
+ <primitive id="base" long-id="base:0" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="16000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="base-bundle-0" CRM_meta_on_node_uuid="base-bundle-0" CRM_meta_operation="monitor" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Unpromoted" CRM_meta_timeout="16000" />
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="3">
+ <action_set>
+ <rsc_op id="37" operation="monitor" operation_key="base_monitor_16000" internal_operation_key="base:2_monitor_16000" on_node="base-bundle-2" on_node_uuid="base-bundle-2" router_node="node3">
+ <primitive id="base" long-id="base:2" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_interval="16000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="base-bundle-2" CRM_meta_on_node_uuid="base-bundle-2" CRM_meta_physical_host="node3" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Unpromoted" CRM_meta_timeout="16000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="35" operation="demote" operation_key="base_demote_0" internal_operation_key="base:2_demote_0" on_node="base-bundle-2" on_node_uuid="base-bundle-2" router_node="node3"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="4">
+ <action_set>
+ <rsc_op id="35" operation="demote" operation_key="base_demote_0" internal_operation_key="base:2_demote_0" on_node="base-bundle-2" on_node_uuid="base-bundle-2" router_node="node3">
+ <primitive id="base" long-id="base:2" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_on_node="base-bundle-2" CRM_meta_on_node_uuid="base-bundle-2" CRM_meta_physical_host="node3" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="11" operation="cancel" operation_key="base_monitor_15000" internal_operation_key="base:2_monitor_15000" on_node="base-bundle-2" on_node_uuid="base-bundle-2" router_node="node3"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="46" operation="demote" operation_key="base-bundle-clone_demote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="5">
+ <action_set>
+ <rsc_op id="11" operation="cancel" operation_key="base_monitor_15000" internal_operation_key="base:2_monitor_15000" on_node="base-bundle-2" on_node_uuid="base-bundle-2" router_node="node3">
+ <primitive id="base" long-id="base:2" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="15000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="base-bundle-2" CRM_meta_on_node_uuid="base-bundle-2" CRM_meta_operation="monitor" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Promoted" CRM_meta_timeout="15000" />
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="6" priority="1000000">
+ <action_set>
+ <pseudo_event id="47" operation="demoted" operation_key="base-bundle-clone_demoted_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="35" operation="demote" operation_key="base_demote_0" internal_operation_key="base:2_demote_0" on_node="base-bundle-2" on_node_uuid="base-bundle-2" router_node="node3"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="46" operation="demote" operation_key="base-bundle-clone_demote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="7">
+ <action_set>
+ <pseudo_event id="46" operation="demote" operation_key="base-bundle-clone_demote_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="50" operation="demote" operation_key="base-bundle_demote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="8" priority="1000000">
+ <action_set>
+ <pseudo_event id="45" operation="promoted" operation_key="base-bundle-clone_promoted_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="32" operation="promote" operation_key="base_promote_0" internal_operation_key="base:0_promote_0" on_node="base-bundle-0" on_node_uuid="base-bundle-0" router_node="node1"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="9">
+ <action_set>
+ <pseudo_event id="44" operation="promote" operation_key="base-bundle-clone_promote_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="47" operation="demoted" operation_key="base-bundle-clone_demoted_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="48" operation="promote" operation_key="base-bundle_promote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="10" priority="1000000">
+ <action_set>
+ <pseudo_event id="51" operation="demoted" operation_key="base-bundle_demoted_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="47" operation="demoted" operation_key="base-bundle-clone_demoted_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="50" operation="demote" operation_key="base-bundle_demote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="11">
+ <action_set>
+ <pseudo_event id="50" operation="demote" operation_key="base-bundle_demote_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="12" priority="1000000">
+ <action_set>
+ <pseudo_event id="49" operation="promoted" operation_key="base-bundle_promoted_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="45" operation="promoted" operation_key="base-bundle-clone_promoted_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="13">
+ <action_set>
+ <pseudo_event id="48" operation="promote" operation_key="base-bundle_promote_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="51" operation="demoted" operation_key="base-bundle_demoted_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+</transition_graph>
diff --git a/cts/scheduler/exp/bundle-promoted-colocation-5.exp b/cts/scheduler/exp/bundle-promoted-colocation-5.exp
new file mode 100644
index 0000000..d3c6df3
--- /dev/null
+++ b/cts/scheduler/exp/bundle-promoted-colocation-5.exp
@@ -0,0 +1,179 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0">
+ <synapse id="0">
+ <action_set>
+ <rsc_op id="41" operation="monitor" operation_key="bundle-a-rsc_monitor_15000" internal_operation_key="bundle-a-rsc:2_monitor_15000" on_node="bundle-a-2" on_node_uuid="bundle-a-2" router_node="node2">
+ <primitive id="bundle-a-rsc" long-id="bundle-a-rsc:2" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_interval="15000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="bundle-a-2" CRM_meta_on_node_uuid="bundle-a-2" CRM_meta_op_target_rc="8" CRM_meta_physical_host="node2" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Promoted" CRM_meta_timeout="15000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="40" operation="promote" operation_key="bundle-a-rsc_promote_0" internal_operation_key="bundle-a-rsc:2_promote_0" on_node="bundle-a-2" on_node_uuid="bundle-a-2" router_node="node2"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="1">
+ <action_set>
+ <rsc_op id="40" operation="promote" operation_key="bundle-a-rsc_promote_0" internal_operation_key="bundle-a-rsc:2_promote_0" on_node="bundle-a-2" on_node_uuid="bundle-a-2" router_node="node2">
+ <primitive id="bundle-a-rsc" long-id="bundle-a-rsc:2" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_on_node="bundle-a-2" CRM_meta_on_node_uuid="bundle-a-2" CRM_meta_physical_host="node2" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="19" operation="cancel" operation_key="bundle-a-rsc_monitor_16000" internal_operation_key="bundle-a-rsc:2_monitor_16000" on_node="bundle-a-2" on_node_uuid="bundle-a-2" router_node="node2"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="52" operation="promote" operation_key="bundle-a-clone_promote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="2">
+ <action_set>
+ <rsc_op id="19" operation="cancel" operation_key="bundle-a-rsc_monitor_16000" internal_operation_key="bundle-a-rsc:2_monitor_16000" on_node="bundle-a-2" on_node_uuid="bundle-a-2" router_node="node2">
+ <primitive id="bundle-a-rsc" long-id="bundle-a-rsc:2" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="16000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="bundle-a-2" CRM_meta_on_node_uuid="bundle-a-2" CRM_meta_operation="monitor" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Unpromoted" CRM_meta_timeout="16000" />
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="3">
+ <action_set>
+ <rsc_op id="45" operation="monitor" operation_key="bundle-a-rsc_monitor_16000" internal_operation_key="bundle-a-rsc:1_monitor_16000" on_node="bundle-a-1" on_node_uuid="bundle-a-1" router_node="node3">
+ <primitive id="bundle-a-rsc" long-id="bundle-a-rsc:1" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_interval="16000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="bundle-a-1" CRM_meta_on_node_uuid="bundle-a-1" CRM_meta_physical_host="node3" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Unpromoted" CRM_meta_timeout="16000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="43" operation="demote" operation_key="bundle-a-rsc_demote_0" internal_operation_key="bundle-a-rsc:1_demote_0" on_node="bundle-a-1" on_node_uuid="bundle-a-1" router_node="node3"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="4">
+ <action_set>
+ <rsc_op id="43" operation="demote" operation_key="bundle-a-rsc_demote_0" internal_operation_key="bundle-a-rsc:1_demote_0" on_node="bundle-a-1" on_node_uuid="bundle-a-1" router_node="node3">
+ <primitive id="bundle-a-rsc" long-id="bundle-a-rsc:1" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_on_node="bundle-a-1" CRM_meta_on_node_uuid="bundle-a-1" CRM_meta_physical_host="node3" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="14" operation="cancel" operation_key="bundle-a-rsc_monitor_15000" internal_operation_key="bundle-a-rsc:1_monitor_15000" on_node="bundle-a-1" on_node_uuid="bundle-a-1" router_node="node3"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="54" operation="demote" operation_key="bundle-a-clone_demote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="5">
+ <action_set>
+ <rsc_op id="14" operation="cancel" operation_key="bundle-a-rsc_monitor_15000" internal_operation_key="bundle-a-rsc:1_monitor_15000" on_node="bundle-a-1" on_node_uuid="bundle-a-1" router_node="node3">
+ <primitive id="bundle-a-rsc" long-id="bundle-a-rsc:1" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="15000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="bundle-a-1" CRM_meta_on_node_uuid="bundle-a-1" CRM_meta_operation="monitor" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Promoted" CRM_meta_timeout="15000" />
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="6" priority="1000000">
+ <action_set>
+ <pseudo_event id="55" operation="demoted" operation_key="bundle-a-clone_demoted_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="43" operation="demote" operation_key="bundle-a-rsc_demote_0" internal_operation_key="bundle-a-rsc:1_demote_0" on_node="bundle-a-1" on_node_uuid="bundle-a-1" router_node="node3"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="54" operation="demote" operation_key="bundle-a-clone_demote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="7">
+ <action_set>
+ <pseudo_event id="54" operation="demote" operation_key="bundle-a-clone_demote_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="58" operation="demote" operation_key="bundle-a_demote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="8" priority="1000000">
+ <action_set>
+ <pseudo_event id="53" operation="promoted" operation_key="bundle-a-clone_promoted_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="40" operation="promote" operation_key="bundle-a-rsc_promote_0" internal_operation_key="bundle-a-rsc:2_promote_0" on_node="bundle-a-2" on_node_uuid="bundle-a-2" router_node="node2"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="9">
+ <action_set>
+ <pseudo_event id="52" operation="promote" operation_key="bundle-a-clone_promote_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="55" operation="demoted" operation_key="bundle-a-clone_demoted_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="56" operation="promote" operation_key="bundle-a_promote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="10" priority="1000000">
+ <action_set>
+ <pseudo_event id="59" operation="demoted" operation_key="bundle-a_demoted_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="55" operation="demoted" operation_key="bundle-a-clone_demoted_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="58" operation="demote" operation_key="bundle-a_demote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="11">
+ <action_set>
+ <pseudo_event id="58" operation="demote" operation_key="bundle-a_demote_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="12" priority="1000000">
+ <action_set>
+ <pseudo_event id="57" operation="promoted" operation_key="bundle-a_promoted_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="53" operation="promoted" operation_key="bundle-a-clone_promoted_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="13">
+ <action_set>
+ <pseudo_event id="56" operation="promote" operation_key="bundle-a_promote_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="59" operation="demoted" operation_key="bundle-a_demoted_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+</transition_graph>
diff --git a/cts/scheduler/exp/bundle-promoted-colocation-6.exp b/cts/scheduler/exp/bundle-promoted-colocation-6.exp
new file mode 100644
index 0000000..d3c6df3
--- /dev/null
+++ b/cts/scheduler/exp/bundle-promoted-colocation-6.exp
@@ -0,0 +1,179 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0">
+ <synapse id="0">
+ <action_set>
+ <rsc_op id="41" operation="monitor" operation_key="bundle-a-rsc_monitor_15000" internal_operation_key="bundle-a-rsc:2_monitor_15000" on_node="bundle-a-2" on_node_uuid="bundle-a-2" router_node="node2">
+ <primitive id="bundle-a-rsc" long-id="bundle-a-rsc:2" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_interval="15000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="bundle-a-2" CRM_meta_on_node_uuid="bundle-a-2" CRM_meta_op_target_rc="8" CRM_meta_physical_host="node2" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Promoted" CRM_meta_timeout="15000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="40" operation="promote" operation_key="bundle-a-rsc_promote_0" internal_operation_key="bundle-a-rsc:2_promote_0" on_node="bundle-a-2" on_node_uuid="bundle-a-2" router_node="node2"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="1">
+ <action_set>
+ <rsc_op id="40" operation="promote" operation_key="bundle-a-rsc_promote_0" internal_operation_key="bundle-a-rsc:2_promote_0" on_node="bundle-a-2" on_node_uuid="bundle-a-2" router_node="node2">
+ <primitive id="bundle-a-rsc" long-id="bundle-a-rsc:2" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_on_node="bundle-a-2" CRM_meta_on_node_uuid="bundle-a-2" CRM_meta_physical_host="node2" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="19" operation="cancel" operation_key="bundle-a-rsc_monitor_16000" internal_operation_key="bundle-a-rsc:2_monitor_16000" on_node="bundle-a-2" on_node_uuid="bundle-a-2" router_node="node2"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="52" operation="promote" operation_key="bundle-a-clone_promote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="2">
+ <action_set>
+ <rsc_op id="19" operation="cancel" operation_key="bundle-a-rsc_monitor_16000" internal_operation_key="bundle-a-rsc:2_monitor_16000" on_node="bundle-a-2" on_node_uuid="bundle-a-2" router_node="node2">
+ <primitive id="bundle-a-rsc" long-id="bundle-a-rsc:2" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="16000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="bundle-a-2" CRM_meta_on_node_uuid="bundle-a-2" CRM_meta_operation="monitor" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Unpromoted" CRM_meta_timeout="16000" />
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="3">
+ <action_set>
+ <rsc_op id="45" operation="monitor" operation_key="bundle-a-rsc_monitor_16000" internal_operation_key="bundle-a-rsc:1_monitor_16000" on_node="bundle-a-1" on_node_uuid="bundle-a-1" router_node="node3">
+ <primitive id="bundle-a-rsc" long-id="bundle-a-rsc:1" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_interval="16000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="bundle-a-1" CRM_meta_on_node_uuid="bundle-a-1" CRM_meta_physical_host="node3" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Unpromoted" CRM_meta_timeout="16000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="43" operation="demote" operation_key="bundle-a-rsc_demote_0" internal_operation_key="bundle-a-rsc:1_demote_0" on_node="bundle-a-1" on_node_uuid="bundle-a-1" router_node="node3"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="4">
+ <action_set>
+ <rsc_op id="43" operation="demote" operation_key="bundle-a-rsc_demote_0" internal_operation_key="bundle-a-rsc:1_demote_0" on_node="bundle-a-1" on_node_uuid="bundle-a-1" router_node="node3">
+ <primitive id="bundle-a-rsc" long-id="bundle-a-rsc:1" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_on_node="bundle-a-1" CRM_meta_on_node_uuid="bundle-a-1" CRM_meta_physical_host="node3" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="14" operation="cancel" operation_key="bundle-a-rsc_monitor_15000" internal_operation_key="bundle-a-rsc:1_monitor_15000" on_node="bundle-a-1" on_node_uuid="bundle-a-1" router_node="node3"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="54" operation="demote" operation_key="bundle-a-clone_demote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="5">
+ <action_set>
+ <rsc_op id="14" operation="cancel" operation_key="bundle-a-rsc_monitor_15000" internal_operation_key="bundle-a-rsc:1_monitor_15000" on_node="bundle-a-1" on_node_uuid="bundle-a-1" router_node="node3">
+ <primitive id="bundle-a-rsc" long-id="bundle-a-rsc:1" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="15000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="bundle-a-1" CRM_meta_on_node_uuid="bundle-a-1" CRM_meta_operation="monitor" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Promoted" CRM_meta_timeout="15000" />
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="6" priority="1000000">
+ <action_set>
+ <pseudo_event id="55" operation="demoted" operation_key="bundle-a-clone_demoted_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="43" operation="demote" operation_key="bundle-a-rsc_demote_0" internal_operation_key="bundle-a-rsc:1_demote_0" on_node="bundle-a-1" on_node_uuid="bundle-a-1" router_node="node3"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="54" operation="demote" operation_key="bundle-a-clone_demote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="7">
+ <action_set>
+ <pseudo_event id="54" operation="demote" operation_key="bundle-a-clone_demote_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="58" operation="demote" operation_key="bundle-a_demote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="8" priority="1000000">
+ <action_set>
+ <pseudo_event id="53" operation="promoted" operation_key="bundle-a-clone_promoted_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="40" operation="promote" operation_key="bundle-a-rsc_promote_0" internal_operation_key="bundle-a-rsc:2_promote_0" on_node="bundle-a-2" on_node_uuid="bundle-a-2" router_node="node2"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="9">
+ <action_set>
+ <pseudo_event id="52" operation="promote" operation_key="bundle-a-clone_promote_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="55" operation="demoted" operation_key="bundle-a-clone_demoted_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="56" operation="promote" operation_key="bundle-a_promote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="10" priority="1000000">
+ <action_set>
+ <pseudo_event id="59" operation="demoted" operation_key="bundle-a_demoted_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="55" operation="demoted" operation_key="bundle-a-clone_demoted_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="58" operation="demote" operation_key="bundle-a_demote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="11">
+ <action_set>
+ <pseudo_event id="58" operation="demote" operation_key="bundle-a_demote_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="12" priority="1000000">
+ <action_set>
+ <pseudo_event id="57" operation="promoted" operation_key="bundle-a_promoted_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="53" operation="promoted" operation_key="bundle-a-clone_promoted_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="13">
+ <action_set>
+ <pseudo_event id="56" operation="promote" operation_key="bundle-a_promote_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="59" operation="demoted" operation_key="bundle-a_demoted_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+</transition_graph>
diff --git a/cts/scheduler/exp/bundle-promoted-location-1.exp b/cts/scheduler/exp/bundle-promoted-location-1.exp
new file mode 100644
index 0000000..56e315f
--- /dev/null
+++ b/cts/scheduler/exp/bundle-promoted-location-1.exp
@@ -0,0 +1 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0"/>
diff --git a/cts/scheduler/exp/bundle-promoted-location-2.exp b/cts/scheduler/exp/bundle-promoted-location-2.exp
new file mode 100644
index 0000000..cbb74ba
--- /dev/null
+++ b/cts/scheduler/exp/bundle-promoted-location-2.exp
@@ -0,0 +1,328 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0">
+ <synapse id="0">
+ <action_set>
+ <rsc_op id="31" operation="monitor" operation_key="base_monitor_15000" internal_operation_key="base:1_monitor_15000" on_node="base-bundle-1" on_node_uuid="base-bundle-1" router_node="node2">
+ <primitive id="base" long-id="base:1" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_interval="15000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="base-bundle-1" CRM_meta_on_node_uuid="base-bundle-1" CRM_meta_op_target_rc="8" CRM_meta_physical_host="node2" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Promoted" CRM_meta_timeout="15000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="30" operation="promote" operation_key="base_promote_0" internal_operation_key="base:1_promote_0" on_node="base-bundle-1" on_node_uuid="base-bundle-1" router_node="node2"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="1">
+ <action_set>
+ <rsc_op id="30" operation="promote" operation_key="base_promote_0" internal_operation_key="base:1_promote_0" on_node="base-bundle-1" on_node_uuid="base-bundle-1" router_node="node2">
+ <primitive id="base" long-id="base:1" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_on_node="base-bundle-1" CRM_meta_on_node_uuid="base-bundle-1" CRM_meta_physical_host="node2" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="9" operation="cancel" operation_key="base_monitor_16000" internal_operation_key="base:1_monitor_16000" on_node="base-bundle-1" on_node_uuid="base-bundle-1" router_node="node2"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="42" operation="promote" operation_key="base-bundle-clone_promote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="2">
+ <action_set>
+ <rsc_op id="9" operation="cancel" operation_key="base_monitor_16000" internal_operation_key="base:1_monitor_16000" on_node="base-bundle-1" on_node_uuid="base-bundle-1" router_node="node2">
+ <primitive id="base" long-id="base:1" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="16000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="base-bundle-1" CRM_meta_on_node_uuid="base-bundle-1" CRM_meta_operation="monitor" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Unpromoted" CRM_meta_timeout="16000" />
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="3">
+ <action_set>
+ <rsc_op id="36" operation="stop" operation_key="base_stop_0" internal_operation_key="base:0_stop_0" on_node="base-bundle-0" on_node_uuid="base-bundle-0" router_node="node3">
+ <primitive id="base" long-id="base:0" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_on_node="base-bundle-0" CRM_meta_on_node_uuid="base-bundle-0" CRM_meta_physical_host="node3" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="26" operation="stop" operation_key="base-bundle_stop_0"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="35" operation="demote" operation_key="base_demote_0" internal_operation_key="base:0_demote_0" on_node="base-bundle-0" on_node_uuid="base-bundle-0" router_node="node3"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="40" operation="stop" operation_key="base-bundle-clone_stop_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="4">
+ <action_set>
+ <rsc_op id="35" operation="demote" operation_key="base_demote_0" internal_operation_key="base:0_demote_0" on_node="base-bundle-0" on_node_uuid="base-bundle-0" router_node="node3">
+ <primitive id="base" long-id="base:0" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_on_node="base-bundle-0" CRM_meta_on_node_uuid="base-bundle-0" CRM_meta_physical_host="node3" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="8" operation="cancel" operation_key="base_monitor_15000" internal_operation_key="base:0_monitor_15000" on_node="base-bundle-0" on_node_uuid="base-bundle-0" router_node="node3"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="44" operation="demote" operation_key="base-bundle-clone_demote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="5">
+ <action_set>
+ <rsc_op id="8" operation="cancel" operation_key="base_monitor_15000" internal_operation_key="base:0_monitor_15000" on_node="base-bundle-0" on_node_uuid="base-bundle-0" router_node="node3">
+ <primitive id="base" long-id="base:0" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="15000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="base-bundle-0" CRM_meta_on_node_uuid="base-bundle-0" CRM_meta_operation="monitor" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Promoted" CRM_meta_timeout="15000" />
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="6" priority="1000000">
+ <action_set>
+ <pseudo_event id="45" operation="demoted" operation_key="base-bundle-clone_demoted_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="35" operation="demote" operation_key="base_demote_0" internal_operation_key="base:0_demote_0" on_node="base-bundle-0" on_node_uuid="base-bundle-0" router_node="node3"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="44" operation="demote" operation_key="base-bundle-clone_demote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="7">
+ <action_set>
+ <pseudo_event id="44" operation="demote" operation_key="base-bundle-clone_demote_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="48" operation="demote" operation_key="base-bundle_demote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="8" priority="1000000">
+ <action_set>
+ <pseudo_event id="43" operation="promoted" operation_key="base-bundle-clone_promoted_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="30" operation="promote" operation_key="base_promote_0" internal_operation_key="base:1_promote_0" on_node="base-bundle-1" on_node_uuid="base-bundle-1" router_node="node2"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="9">
+ <action_set>
+ <pseudo_event id="42" operation="promote" operation_key="base-bundle-clone_promote_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="39" operation="running" operation_key="base-bundle-clone_running_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="41" operation="stopped" operation_key="base-bundle-clone_stopped_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="45" operation="demoted" operation_key="base-bundle-clone_demoted_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="46" operation="promote" operation_key="base-bundle_promote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="10" priority="1000000">
+ <action_set>
+ <pseudo_event id="41" operation="stopped" operation_key="base-bundle-clone_stopped_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="36" operation="stop" operation_key="base_stop_0" internal_operation_key="base:0_stop_0" on_node="base-bundle-0" on_node_uuid="base-bundle-0" router_node="node3"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="40" operation="stop" operation_key="base-bundle-clone_stop_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="11">
+ <action_set>
+ <pseudo_event id="40" operation="stop" operation_key="base-bundle-clone_stop_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="26" operation="stop" operation_key="base-bundle_stop_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="45" operation="demoted" operation_key="base-bundle-clone_demoted_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="12" priority="1000000">
+ <action_set>
+ <pseudo_event id="39" operation="running" operation_key="base-bundle-clone_running_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="38" operation="start" operation_key="base-bundle-clone_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="13">
+ <action_set>
+ <pseudo_event id="38" operation="start" operation_key="base-bundle-clone_start_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="41" operation="stopped" operation_key="base-bundle-clone_stopped_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="45" operation="demoted" operation_key="base-bundle-clone_demoted_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="14">
+ <action_set>
+ <rsc_op id="13" operation="stop" operation_key="base-bundle-podman-0_stop_0" on_node="node3" on_node_uuid="3">
+ <primitive id="base-bundle-podman-0" class="ocf" provider="heartbeat" type="podman"/>
+ <attributes CRM_meta_on_node="node3" CRM_meta_on_node_uuid="3" CRM_meta_timeout="20000" allow_pull="true" force_kill="false" image="localhost/pcmktest" monitor_cmd="/bin/true" mount_points="/var/log/pacemaker/bundles/base-bundle-0" reuse="false" run_cmd="/usr/sbin/pacemaker-remoted" run_opts=" -e PCMK_stderr=1 -e PCMK_remote_port=3121 -v /etc/pacemaker/authkey:/etc/pacemaker/authkey -v /var/log/pacemaker/bundles/base-bundle-0:/var/log -p 3121:3121 "/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="14" operation="stop" operation_key="base-bundle-0_stop_0" on_node="node3" on_node_uuid="3"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="26" operation="stop" operation_key="base-bundle_stop_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="15">
+ <action_set>
+ <rsc_op id="14" operation="stop" operation_key="base-bundle-0_stop_0" on_node="node3" on_node_uuid="3">
+ <primitive id="base-bundle-0" class="ocf" provider="pacemaker" type="remote"/>
+ <attributes CRM_meta_container="base-bundle-podman-0" CRM_meta_on_node="node3" CRM_meta_on_node_uuid="3" CRM_meta_timeout="20000" addr="node3" port="3121"/>
+ <downed>
+ <node id="base-bundle-0"/>
+ </downed>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="35" operation="demote" operation_key="base_demote_0" internal_operation_key="base:0_demote_0" on_node="base-bundle-0" on_node_uuid="base-bundle-0" router_node="node3"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="36" operation="stop" operation_key="base_stop_0" internal_operation_key="base:0_stop_0" on_node="base-bundle-0" on_node_uuid="base-bundle-0" router_node="node3"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="16" priority="1000000">
+ <action_set>
+ <pseudo_event id="49" operation="demoted" operation_key="base-bundle_demoted_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="45" operation="demoted" operation_key="base-bundle-clone_demoted_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="48" operation="demote" operation_key="base-bundle_demote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="17">
+ <action_set>
+ <pseudo_event id="48" operation="demote" operation_key="base-bundle_demote_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="18" priority="1000000">
+ <action_set>
+ <pseudo_event id="47" operation="promoted" operation_key="base-bundle_promoted_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="43" operation="promoted" operation_key="base-bundle-clone_promoted_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="19">
+ <action_set>
+ <pseudo_event id="46" operation="promote" operation_key="base-bundle_promote_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="25" operation="running" operation_key="base-bundle_running_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="27" operation="stopped" operation_key="base-bundle_stopped_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="49" operation="demoted" operation_key="base-bundle_demoted_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="20" priority="1000000">
+ <action_set>
+ <pseudo_event id="27" operation="stopped" operation_key="base-bundle_stopped_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="13" operation="stop" operation_key="base-bundle-podman-0_stop_0" on_node="node3" on_node_uuid="3"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="41" operation="stopped" operation_key="base-bundle-clone_stopped_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="21">
+ <action_set>
+ <pseudo_event id="26" operation="stop" operation_key="base-bundle_stop_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="49" operation="demoted" operation_key="base-bundle_demoted_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="22" priority="1000000">
+ <action_set>
+ <pseudo_event id="25" operation="running" operation_key="base-bundle_running_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="39" operation="running" operation_key="base-bundle-clone_running_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+</transition_graph>
diff --git a/cts/scheduler/exp/bundle-promoted-location-3.exp b/cts/scheduler/exp/bundle-promoted-location-3.exp
new file mode 100644
index 0000000..56e315f
--- /dev/null
+++ b/cts/scheduler/exp/bundle-promoted-location-3.exp
@@ -0,0 +1 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0"/>
diff --git a/cts/scheduler/exp/bundle-promoted-location-4.exp b/cts/scheduler/exp/bundle-promoted-location-4.exp
new file mode 100644
index 0000000..56e315f
--- /dev/null
+++ b/cts/scheduler/exp/bundle-promoted-location-4.exp
@@ -0,0 +1 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0"/>
diff --git a/cts/scheduler/exp/bundle-promoted-location-5.exp b/cts/scheduler/exp/bundle-promoted-location-5.exp
new file mode 100644
index 0000000..56e315f
--- /dev/null
+++ b/cts/scheduler/exp/bundle-promoted-location-5.exp
@@ -0,0 +1 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0"/>
diff --git a/cts/scheduler/exp/bundle-promoted-location-6.exp b/cts/scheduler/exp/bundle-promoted-location-6.exp
new file mode 100644
index 0000000..07a6a2d
--- /dev/null
+++ b/cts/scheduler/exp/bundle-promoted-location-6.exp
@@ -0,0 +1,136 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0">
+ <synapse id="0">
+ <action_set>
+ <rsc_op id="35" operation="stop" operation_key="base_stop_0" internal_operation_key="base:1_stop_0" on_node="base-bundle-1" on_node_uuid="base-bundle-1" router_node="node2">
+ <primitive id="base" long-id="base:1" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_on_node="base-bundle-1" CRM_meta_on_node_uuid="base-bundle-1" CRM_meta_physical_host="node2" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="26" operation="stop" operation_key="base-bundle_stop_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="38" operation="stop" operation_key="base-bundle-clone_stop_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="1" priority="1000000">
+ <action_set>
+ <pseudo_event id="39" operation="stopped" operation_key="base-bundle-clone_stopped_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="35" operation="stop" operation_key="base_stop_0" internal_operation_key="base:1_stop_0" on_node="base-bundle-1" on_node_uuid="base-bundle-1" router_node="node2"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="38" operation="stop" operation_key="base-bundle-clone_stop_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="2">
+ <action_set>
+ <pseudo_event id="38" operation="stop" operation_key="base-bundle-clone_stop_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="26" operation="stop" operation_key="base-bundle_stop_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="3" priority="1000000">
+ <action_set>
+ <pseudo_event id="37" operation="running" operation_key="base-bundle-clone_running_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="36" operation="start" operation_key="base-bundle-clone_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="4">
+ <action_set>
+ <pseudo_event id="36" operation="start" operation_key="base-bundle-clone_start_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="39" operation="stopped" operation_key="base-bundle-clone_stopped_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="5">
+ <action_set>
+ <rsc_op id="17" operation="stop" operation_key="base-bundle-podman-1_stop_0" on_node="node2" on_node_uuid="2">
+ <primitive id="base-bundle-podman-1" class="ocf" provider="heartbeat" type="podman"/>
+ <attributes CRM_meta_on_node="node2" CRM_meta_on_node_uuid="2" CRM_meta_timeout="20000" allow_pull="true" force_kill="false" image="localhost/pcmktest" monitor_cmd="/bin/true" mount_points="/var/log/pacemaker/bundles/base-bundle-1" reuse="false" run_cmd="/usr/sbin/pacemaker-remoted" run_opts=" -e PCMK_stderr=1 -e PCMK_remote_port=3121 -v /etc/pacemaker/authkey:/etc/pacemaker/authkey -v /var/log/pacemaker/bundles/base-bundle-1:/var/log -p 3121:3121 "/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="18" operation="stop" operation_key="base-bundle-1_stop_0" on_node="node2" on_node_uuid="2"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="26" operation="stop" operation_key="base-bundle_stop_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="6">
+ <action_set>
+ <rsc_op id="18" operation="stop" operation_key="base-bundle-1_stop_0" on_node="node2" on_node_uuid="2">
+ <primitive id="base-bundle-1" class="ocf" provider="pacemaker" type="remote"/>
+ <attributes CRM_meta_container="base-bundle-podman-1" CRM_meta_on_node="node2" CRM_meta_on_node_uuid="2" CRM_meta_timeout="20000" addr="node2" port="3121"/>
+ <downed>
+ <node id="base-bundle-1"/>
+ </downed>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="35" operation="stop" operation_key="base_stop_0" internal_operation_key="base:1_stop_0" on_node="base-bundle-1" on_node_uuid="base-bundle-1" router_node="node2"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="7" priority="1000000">
+ <action_set>
+ <pseudo_event id="27" operation="stopped" operation_key="base-bundle_stopped_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="17" operation="stop" operation_key="base-bundle-podman-1_stop_0" on_node="node2" on_node_uuid="2"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="39" operation="stopped" operation_key="base-bundle-clone_stopped_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="8">
+ <action_set>
+ <pseudo_event id="26" operation="stop" operation_key="base-bundle_stop_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="9" priority="1000000">
+ <action_set>
+ <pseudo_event id="25" operation="running" operation_key="base-bundle_running_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="37" operation="running" operation_key="base-bundle-clone_running_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+</transition_graph>
diff --git a/cts/scheduler/exp/bundle-replicas-change.exp b/cts/scheduler/exp/bundle-replicas-change.exp
index b14dbf2..ec89d94 100644
--- a/cts/scheduler/exp/bundle-replicas-change.exp
+++ b/cts/scheduler/exp/bundle-replicas-change.exp
@@ -116,6 +116,9 @@
<rsc_op id="29" operation="start" operation_key="httpd-bundle-2_start_0" on_node="rh74-test" on_node_uuid="3232287163"/>
</trigger>
<trigger>
+ <rsc_op id="35" operation="start" operation_key="httpd:0_start_0" on_node="httpd-bundle-0" on_node_uuid="httpd-bundle-0" router_node="rh74-test"/>
+ </trigger>
+ <trigger>
<rsc_op id="37" operation="start" operation_key="httpd:1_start_0" on_node="httpd-bundle-1" on_node_uuid="httpd-bundle-1" router_node="rh74-test"/>
</trigger>
<trigger>
diff --git a/cts/scheduler/exp/cancel-behind-moving-remote.exp b/cts/scheduler/exp/cancel-behind-moving-remote.exp
index 17759cb..91651ba 100644
--- a/cts/scheduler/exp/cancel-behind-moving-remote.exp
+++ b/cts/scheduler/exp/cancel-behind-moving-remote.exp
@@ -1,46 +1,46 @@
<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0">
<synapse id="0" priority="1000000">
<action_set>
- <pseudo_event id="146" operation="notified" operation_key="rabbitmq-bundle-clone_confirmed-post_notify_running_0">
+ <pseudo_event id="142" operation="notified" operation_key="rabbitmq-bundle-clone_confirmed-post_notify_running_0">
<attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="true" CRM_meta_notify_key_operation="running" CRM_meta_notify_key_type="confirmed-post" CRM_meta_notify_operation="start" CRM_meta_notify_type="post" CRM_meta_timeout="120000" />
</pseudo_event>
</action_set>
<inputs>
<trigger>
- <pseudo_event id="145" operation="notify" operation_key="rabbitmq-bundle-clone_post_notify_running_0"/>
+ <pseudo_event id="141" operation="notify" operation_key="rabbitmq-bundle-clone_post_notify_running_0"/>
</trigger>
</inputs>
</synapse>
<synapse id="1" priority="1000000">
<action_set>
- <pseudo_event id="145" operation="notify" operation_key="rabbitmq-bundle-clone_post_notify_running_0">
+ <pseudo_event id="141" operation="notify" operation_key="rabbitmq-bundle-clone_post_notify_running_0">
<attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="true" CRM_meta_notify_key_operation="running" CRM_meta_notify_key_type="post" CRM_meta_notify_operation="start" CRM_meta_notify_type="post" CRM_meta_timeout="120000" />
</pseudo_event>
</action_set>
<inputs>
<trigger>
- <pseudo_event id="140" operation="running" operation_key="rabbitmq-bundle-clone_running_0"/>
+ <pseudo_event id="136" operation="running" operation_key="rabbitmq-bundle-clone_running_0"/>
</trigger>
<trigger>
- <pseudo_event id="144" operation="notified" operation_key="rabbitmq-bundle-clone_confirmed-pre_notify_start_0"/>
+ <pseudo_event id="140" operation="notified" operation_key="rabbitmq-bundle-clone_confirmed-pre_notify_start_0"/>
</trigger>
</inputs>
</synapse>
<synapse id="2">
<action_set>
- <pseudo_event id="144" operation="notified" operation_key="rabbitmq-bundle-clone_confirmed-pre_notify_start_0">
+ <pseudo_event id="140" operation="notified" operation_key="rabbitmq-bundle-clone_confirmed-pre_notify_start_0">
<attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="true" CRM_meta_notify_key_operation="start" CRM_meta_notify_key_type="confirmed-pre" CRM_meta_notify_operation="start" CRM_meta_notify_type="pre" CRM_meta_timeout="120000" />
</pseudo_event>
</action_set>
<inputs>
<trigger>
- <pseudo_event id="143" operation="notify" operation_key="rabbitmq-bundle-clone_pre_notify_start_0"/>
+ <pseudo_event id="139" operation="notify" operation_key="rabbitmq-bundle-clone_pre_notify_start_0"/>
</trigger>
</inputs>
</synapse>
<synapse id="3">
<action_set>
- <pseudo_event id="143" operation="notify" operation_key="rabbitmq-bundle-clone_pre_notify_start_0">
+ <pseudo_event id="139" operation="notify" operation_key="rabbitmq-bundle-clone_pre_notify_start_0">
<attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="true" CRM_meta_notify_key_operation="start" CRM_meta_notify_key_type="pre" CRM_meta_notify_operation="start" CRM_meta_notify_type="pre" CRM_meta_timeout="120000" />
</pseudo_event>
</action_set>
@@ -48,824 +48,529 @@
</synapse>
<synapse id="4" priority="1000000">
<action_set>
- <pseudo_event id="140" operation="running" operation_key="rabbitmq-bundle-clone_running_0">
+ <pseudo_event id="136" operation="running" operation_key="rabbitmq-bundle-clone_running_0">
<attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="true" CRM_meta_timeout="120000" />
</pseudo_event>
</action_set>
<inputs>
<trigger>
- <pseudo_event id="139" operation="start" operation_key="rabbitmq-bundle-clone_start_0"/>
+ <pseudo_event id="135" operation="start" operation_key="rabbitmq-bundle-clone_start_0"/>
</trigger>
</inputs>
</synapse>
<synapse id="5">
<action_set>
- <pseudo_event id="139" operation="start" operation_key="rabbitmq-bundle-clone_start_0">
+ <pseudo_event id="135" operation="start" operation_key="rabbitmq-bundle-clone_start_0">
<attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="true" CRM_meta_timeout="120000" />
</pseudo_event>
</action_set>
<inputs>
<trigger>
- <pseudo_event id="129" operation="start" operation_key="rabbitmq-bundle_start_0"/>
+ <pseudo_event id="125" operation="start" operation_key="rabbitmq-bundle_start_0"/>
</trigger>
<trigger>
- <pseudo_event id="144" operation="notified" operation_key="rabbitmq-bundle-clone_confirmed-pre_notify_start_0"/>
+ <pseudo_event id="140" operation="notified" operation_key="rabbitmq-bundle-clone_confirmed-pre_notify_start_0"/>
</trigger>
</inputs>
</synapse>
<synapse id="6" priority="1000000">
<action_set>
- <rsc_op id="384" operation="notify" operation_key="ovndb_servers:0_post_notify_promote_0" on_node="ovn-dbs-bundle-0" on_node_uuid="ovn-dbs-bundle-0" router_node="controller-2">
- <primitive id="ovndb_servers" long-id="ovndb_servers:0" class="ocf" provider="ovn" type="ovndb-servers"/>
- <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="notify" CRM_meta_notify="true" CRM_meta_notify_active_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_active_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_all_hosts="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 controller-2 controller-1 messaging-0 rabbitmq-bundle-1 messaging-2 controller-2 controller-0 controller-1" CRM_meta_notify_all_uname="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="ovndb_servers:0" CRM_meta_notify_key_operation="promoted" CRM_meta_notify_key_type="post" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="promote" CRM_meta_notify_promote_resource="ovndb_servers:1" CRM_meta_notify_promote_uname="ovn-dbs-bundle-1" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_slave_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_start_resource="ovndb_servers:0 ovndb_servers:1" CRM_meta_notify_start_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1" CRM_meta_notify_stop_resource="ovndb_servers:1" CRM_meta_notify_stop_uname="ovn-dbs-bundle-1" CRM_meta_notify_type="post" CRM_meta_notify_unpromoted_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_unpromoted_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_on_node="ovn-dbs-bundle-0" CRM_meta_on_node_uuid="ovn-dbs-bundle-0" CRM_meta_physical_host="controller-2" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" inactive_probe_interval="180000" listen_on_master_ip_only="yes" manage_northd="yes" master_ip="172.17.1.87" nb_master_port="6641" nb_master_protocol="ssl" ovn_nb_db_cacert="/etc/ipa/ca.crt" ovn_nb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_nb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" ovn_sb_db_cacert="/etc/ipa/ca.crt" ovn_sb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_sb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" sb_master_port="6642" sb_master_protocol="ssl"/>
+ <rsc_op id="381" operation="notify" operation_key="ovndb_servers_post_notify_promote_0" internal_operation_key="ovndb_servers:2_post_notify_promote_0" on_node="ovn-dbs-bundle-2" on_node_uuid="ovn-dbs-bundle-2" router_node="controller-1">
+ <primitive id="ovndb_servers" long-id="ovndb_servers:2" class="ocf" provider="ovn" type="ovndb-servers"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="notify" CRM_meta_notify="true" CRM_meta_notify_active_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_active_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_all_hosts="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 controller-2 controller-1 messaging-0 rabbitmq-bundle-1 messaging-2 controller-2 controller-0 controller-1" CRM_meta_notify_all_uname="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="ovndb_servers:0" CRM_meta_notify_key_operation="promoted" CRM_meta_notify_key_type="post" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="promote" CRM_meta_notify_promote_resource="ovndb_servers:2" CRM_meta_notify_promote_uname="ovn-dbs-bundle-2" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_slave_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_start_resource="ovndb_servers:0" CRM_meta_notify_start_uname="ovn-dbs-bundle-0" CRM_meta_notify_stop_resource=" " CRM_meta_notify_stop_uname=" " CRM_meta_notify_type="post" CRM_meta_notify_unpromoted_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_unpromoted_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_on_node="ovn-dbs-bundle-2" CRM_meta_on_node_uuid="ovn-dbs-bundle-2" CRM_meta_physical_host="controller-1" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" inactive_probe_interval="180000" listen_on_master_ip_only="yes" manage_northd="yes" master_ip="172.17.1.87" nb_master_port="6641" nb_master_protocol="ssl" ovn_nb_db_cacert="/etc/ipa/ca.crt" ovn_nb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_nb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" ovn_sb_db_cacert="/etc/ipa/ca.crt" ovn_sb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_sb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" sb_master_port="6642" sb_master_protocol="ssl"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <pseudo_event id="266" operation="notify" operation_key="ovn-dbs-bundle-master_post_notify_promoted_0"/>
+ <pseudo_event id="261" operation="notify" operation_key="ovn-dbs-bundle-master_post_notify_promoted_0"/>
</trigger>
</inputs>
</synapse>
<synapse id="7">
<action_set>
- <rsc_op id="383" operation="notify" operation_key="ovndb_servers:0_pre_notify_promote_0" on_node="ovn-dbs-bundle-0" on_node_uuid="ovn-dbs-bundle-0" router_node="controller-2">
- <primitive id="ovndb_servers" long-id="ovndb_servers:0" class="ocf" provider="ovn" type="ovndb-servers"/>
- <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="notify" CRM_meta_notify="true" CRM_meta_notify_active_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_active_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_all_hosts="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 controller-2 controller-1 messaging-0 rabbitmq-bundle-1 messaging-2 controller-2 controller-0 controller-1" CRM_meta_notify_all_uname="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="ovndb_servers:0" CRM_meta_notify_key_operation="promote" CRM_meta_notify_key_type="pre" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="promote" CRM_meta_notify_promote_resource="ovndb_servers:1" CRM_meta_notify_promote_uname="ovn-dbs-bundle-1" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_slave_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_start_resource="ovndb_servers:0 ovndb_servers:1" CRM_meta_notify_start_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1" CRM_meta_notify_stop_resource="ovndb_servers:1" CRM_meta_notify_stop_uname="ovn-dbs-bundle-1" CRM_meta_notify_type="pre" CRM_meta_notify_unpromoted_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_unpromoted_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_on_node="ovn-dbs-bundle-0" CRM_meta_on_node_uuid="ovn-dbs-bundle-0" CRM_meta_physical_host="controller-2" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" inactive_probe_interval="180000" listen_on_master_ip_only="yes" manage_northd="yes" master_ip="172.17.1.87" nb_master_port="6641" nb_master_protocol="ssl" ovn_nb_db_cacert="/etc/ipa/ca.crt" ovn_nb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_nb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" ovn_sb_db_cacert="/etc/ipa/ca.crt" ovn_sb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_sb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" sb_master_port="6642" sb_master_protocol="ssl"/>
+ <rsc_op id="380" operation="notify" operation_key="ovndb_servers_pre_notify_promote_0" internal_operation_key="ovndb_servers:2_pre_notify_promote_0" on_node="ovn-dbs-bundle-2" on_node_uuid="ovn-dbs-bundle-2" router_node="controller-1">
+ <primitive id="ovndb_servers" long-id="ovndb_servers:2" class="ocf" provider="ovn" type="ovndb-servers"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="notify" CRM_meta_notify="true" CRM_meta_notify_active_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_active_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_all_hosts="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 controller-2 controller-1 messaging-0 rabbitmq-bundle-1 messaging-2 controller-2 controller-0 controller-1" CRM_meta_notify_all_uname="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="ovndb_servers:0" CRM_meta_notify_key_operation="promote" CRM_meta_notify_key_type="pre" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="promote" CRM_meta_notify_promote_resource="ovndb_servers:2" CRM_meta_notify_promote_uname="ovn-dbs-bundle-2" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_slave_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_start_resource="ovndb_servers:0" CRM_meta_notify_start_uname="ovn-dbs-bundle-0" CRM_meta_notify_stop_resource=" " CRM_meta_notify_stop_uname=" " CRM_meta_notify_type="pre" CRM_meta_notify_unpromoted_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_unpromoted_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_on_node="ovn-dbs-bundle-2" CRM_meta_on_node_uuid="ovn-dbs-bundle-2" CRM_meta_physical_host="controller-1" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" inactive_probe_interval="180000" listen_on_master_ip_only="yes" manage_northd="yes" master_ip="172.17.1.87" nb_master_port="6641" nb_master_protocol="ssl" ovn_nb_db_cacert="/etc/ipa/ca.crt" ovn_nb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_nb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" ovn_sb_db_cacert="/etc/ipa/ca.crt" ovn_sb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_sb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" sb_master_port="6642" sb_master_protocol="ssl"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <pseudo_event id="264" operation="notify" operation_key="ovn-dbs-bundle-master_pre_notify_promote_0"/>
+ <pseudo_event id="259" operation="notify" operation_key="ovn-dbs-bundle-master_pre_notify_promote_0"/>
</trigger>
</inputs>
</synapse>
<synapse id="8" priority="1000000">
<action_set>
- <rsc_op id="376" operation="notify" operation_key="ovndb_servers:0_post_notify_start_0" on_node="ovn-dbs-bundle-0" on_node_uuid="ovn-dbs-bundle-0" router_node="controller-2">
- <primitive id="ovndb_servers" long-id="ovndb_servers:0" class="ocf" provider="ovn" type="ovndb-servers"/>
- <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="notify" CRM_meta_notify="true" CRM_meta_notify_active_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_active_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_all_hosts="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 controller-2 controller-1 messaging-0 rabbitmq-bundle-1 messaging-2 controller-2 controller-0 controller-1" CRM_meta_notify_all_uname="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="ovndb_servers:0" CRM_meta_notify_key_operation="running" CRM_meta_notify_key_type="post" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="start" CRM_meta_notify_promote_resource="ovndb_servers:1" CRM_meta_notify_promote_uname="ovn-dbs-bundle-1" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_slave_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_start_resource="ovndb_servers:0 ovndb_servers:1" CRM_meta_notify_start_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1" CRM_meta_notify_stop_resource="ovndb_servers:1" CRM_meta_notify_stop_uname="ovn-dbs-bundle-1" CRM_meta_notify_type="post" CRM_meta_notify_unpromoted_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_unpromoted_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_on_node="ovn-dbs-bundle-0" CRM_meta_on_node_uuid="ovn-dbs-bundle-0" CRM_meta_physical_host="controller-2" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" inactive_probe_interval="180000" listen_on_master_ip_only="yes" manage_northd="yes" master_ip="172.17.1.87" nb_master_port="6641" nb_master_protocol="ssl" ovn_nb_db_cacert="/etc/ipa/ca.crt" ovn_nb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_nb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" ovn_sb_db_cacert="/etc/ipa/ca.crt" ovn_sb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_sb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" sb_master_port="6642" sb_master_protocol="ssl"/>
+ <rsc_op id="372" operation="notify" operation_key="ovndb_servers_post_notify_start_0" internal_operation_key="ovndb_servers:2_post_notify_start_0" on_node="ovn-dbs-bundle-2" on_node_uuid="ovn-dbs-bundle-2" router_node="controller-1">
+ <primitive id="ovndb_servers" long-id="ovndb_servers:2" class="ocf" provider="ovn" type="ovndb-servers"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="notify" CRM_meta_notify="true" CRM_meta_notify_active_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_active_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_all_hosts="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 controller-2 controller-1 messaging-0 rabbitmq-bundle-1 messaging-2 controller-2 controller-0 controller-1" CRM_meta_notify_all_uname="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="ovndb_servers:0" CRM_meta_notify_key_operation="running" CRM_meta_notify_key_type="post" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="start" CRM_meta_notify_promote_resource="ovndb_servers:2" CRM_meta_notify_promote_uname="ovn-dbs-bundle-2" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_slave_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_start_resource="ovndb_servers:0" CRM_meta_notify_start_uname="ovn-dbs-bundle-0" CRM_meta_notify_stop_resource=" " CRM_meta_notify_stop_uname=" " CRM_meta_notify_type="post" CRM_meta_notify_unpromoted_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_unpromoted_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_on_node="ovn-dbs-bundle-2" CRM_meta_on_node_uuid="ovn-dbs-bundle-2" CRM_meta_physical_host="controller-1" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" inactive_probe_interval="180000" listen_on_master_ip_only="yes" manage_northd="yes" master_ip="172.17.1.87" nb_master_port="6641" nb_master_protocol="ssl" ovn_nb_db_cacert="/etc/ipa/ca.crt" ovn_nb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_nb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" ovn_sb_db_cacert="/etc/ipa/ca.crt" ovn_sb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_sb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" sb_master_port="6642" sb_master_protocol="ssl"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <pseudo_event id="256" operation="notify" operation_key="ovn-dbs-bundle-master_post_notify_running_0"/>
+ <pseudo_event id="251" operation="notify" operation_key="ovn-dbs-bundle-master_post_notify_running_0"/>
</trigger>
</inputs>
</synapse>
<synapse id="9">
<action_set>
- <rsc_op id="249" operation="monitor" operation_key="ovndb_servers:0_monitor_30000" on_node="ovn-dbs-bundle-0" on_node_uuid="ovn-dbs-bundle-0" router_node="controller-2">
- <primitive id="ovndb_servers" long-id="ovndb_servers:0" class="ocf" provider="ovn" type="ovndb-servers"/>
- <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_interval="30000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="true" CRM_meta_on_node="ovn-dbs-bundle-0" CRM_meta_on_node_uuid="ovn-dbs-bundle-0" CRM_meta_physical_host="controller-2" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Unpromoted" CRM_meta_timeout="60000" inactive_probe_interval="180000" listen_on_master_ip_only="yes" manage_northd="yes" master_ip="172.17.1.87" nb_master_port="6641" nb_master_protocol="ssl" ovn_nb_db_cacert="/etc/ipa/ca.crt" ovn_nb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_nb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" ovn_sb_db_cacert="/etc/ipa/ca.crt" ovn_sb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_sb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" sb_master_port="6642" sb_master_protocol="ssl"/>
+ <rsc_op id="371" operation="notify" operation_key="ovndb_servers_pre_notify_start_0" internal_operation_key="ovndb_servers:2_pre_notify_start_0" on_node="ovn-dbs-bundle-2" on_node_uuid="ovn-dbs-bundle-2" router_node="controller-1">
+ <primitive id="ovndb_servers" long-id="ovndb_servers:2" class="ocf" provider="ovn" type="ovndb-servers"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="notify" CRM_meta_notify="true" CRM_meta_notify_active_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_active_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_all_hosts="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 controller-2 controller-1 messaging-0 rabbitmq-bundle-1 messaging-2 controller-2 controller-0 controller-1" CRM_meta_notify_all_uname="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="ovndb_servers:0" CRM_meta_notify_key_operation="start" CRM_meta_notify_key_type="pre" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="start" CRM_meta_notify_promote_resource="ovndb_servers:2" CRM_meta_notify_promote_uname="ovn-dbs-bundle-2" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_slave_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_start_resource="ovndb_servers:0" CRM_meta_notify_start_uname="ovn-dbs-bundle-0" CRM_meta_notify_stop_resource=" " CRM_meta_notify_stop_uname=" " CRM_meta_notify_type="pre" CRM_meta_notify_unpromoted_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_unpromoted_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_on_node="ovn-dbs-bundle-2" CRM_meta_on_node_uuid="ovn-dbs-bundle-2" CRM_meta_physical_host="controller-1" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" inactive_probe_interval="180000" listen_on_master_ip_only="yes" manage_northd="yes" master_ip="172.17.1.87" nb_master_port="6641" nb_master_protocol="ssl" ovn_nb_db_cacert="/etc/ipa/ca.crt" ovn_nb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_nb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" ovn_sb_db_cacert="/etc/ipa/ca.crt" ovn_sb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_sb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" sb_master_port="6642" sb_master_protocol="ssl"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="227" operation="start" operation_key="ovn-dbs-bundle-0_start_0" on_node="controller-2" on_node_uuid="3"/>
- </trigger>
- <trigger>
- <rsc_op id="248" operation="start" operation_key="ovndb_servers:0_start_0" on_node="ovn-dbs-bundle-0" on_node_uuid="ovn-dbs-bundle-0" router_node="controller-2"/>
- </trigger>
- <trigger>
- <pseudo_event id="257" operation="notified" operation_key="ovn-dbs-bundle-master_confirmed-post_notify_running_0"/>
- </trigger>
- <trigger>
- <pseudo_event id="267" operation="notified" operation_key="ovn-dbs-bundle-master_confirmed-post_notify_promoted_0"/>
+ <pseudo_event id="249" operation="notify" operation_key="ovn-dbs-bundle-master_pre_notify_start_0"/>
</trigger>
</inputs>
</synapse>
<synapse id="10">
<action_set>
- <rsc_op id="248" operation="start" operation_key="ovndb_servers:0_start_0" on_node="ovn-dbs-bundle-0" on_node_uuid="ovn-dbs-bundle-0" router_node="controller-2">
- <primitive id="ovndb_servers" long-id="ovndb_servers:0" class="ocf" provider="ovn" type="ovndb-servers"/>
- <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="start" CRM_meta_notify="true" CRM_meta_notify_active_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_active_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_all_hosts="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 controller-2 controller-1 messaging-0 rabbitmq-bundle-1 messaging-2 controller-2 controller-0 controller-1" CRM_meta_notify_all_uname="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="ovndb_servers:0" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_promote_resource="ovndb_servers:1" CRM_meta_notify_promote_uname="ovn-dbs-bundle-1" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_slave_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_start_resource="ovndb_servers:0 ovndb_servers:1" CRM_meta_notify_start_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1" CRM_meta_notify_stop_resource="ovndb_servers:1" CRM_meta_notify_stop_uname="ovn-dbs-bundle-1" CRM_meta_notify_unpromoted_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_unpromoted_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_on_node="ovn-dbs-bundle-0" CRM_meta_on_node_uuid="ovn-dbs-bundle-0" CRM_meta_physical_host="controller-2" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="200000" inactive_probe_interval="180000" listen_on_master_ip_only="yes" manage_northd="yes" master_ip="172.17.1.87" nb_master_port="6641" nb_master_protocol="ssl" ovn_nb_db_cacert="/etc/ipa/ca.crt" ovn_nb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_nb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" ovn_sb_db_cacert="/etc/ipa/ca.crt" ovn_sb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_sb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" sb_master_port="6642" sb_master_protocol="ssl"/>
+ <rsc_op id="240" operation="monitor" operation_key="ovndb_servers_monitor_10000" internal_operation_key="ovndb_servers:2_monitor_10000" on_node="ovn-dbs-bundle-2" on_node_uuid="ovn-dbs-bundle-2" router_node="controller-1">
+ <primitive id="ovndb_servers" long-id="ovndb_servers:2" class="ocf" provider="ovn" type="ovndb-servers"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_interval="10000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="true" CRM_meta_on_node="ovn-dbs-bundle-2" CRM_meta_on_node_uuid="ovn-dbs-bundle-2" CRM_meta_op_target_rc="8" CRM_meta_physical_host="controller-1" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Promoted" CRM_meta_timeout="60000" inactive_probe_interval="180000" listen_on_master_ip_only="yes" manage_northd="yes" master_ip="172.17.1.87" nb_master_port="6641" nb_master_protocol="ssl" ovn_nb_db_cacert="/etc/ipa/ca.crt" ovn_nb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_nb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" ovn_sb_db_cacert="/etc/ipa/ca.crt" ovn_sb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_sb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" sb_master_port="6642" sb_master_protocol="ssl"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="225" operation="start" operation_key="ovn-dbs-bundle-podman-0_start_0" on_node="controller-2" on_node_uuid="3"/>
+ <rsc_op id="239" operation="promote" operation_key="ovndb_servers_promote_0" internal_operation_key="ovndb_servers:2_promote_0" on_node="ovn-dbs-bundle-2" on_node_uuid="ovn-dbs-bundle-2" router_node="controller-1"/>
</trigger>
<trigger>
- <rsc_op id="227" operation="start" operation_key="ovn-dbs-bundle-0_start_0" on_node="controller-2" on_node_uuid="3"/>
+ <pseudo_event id="252" operation="notified" operation_key="ovn-dbs-bundle-master_confirmed-post_notify_running_0"/>
</trigger>
<trigger>
- <pseudo_event id="250" operation="start" operation_key="ovn-dbs-bundle-master_start_0"/>
+ <pseudo_event id="262" operation="notified" operation_key="ovn-dbs-bundle-master_confirmed-post_notify_promoted_0"/>
</trigger>
</inputs>
</synapse>
- <synapse id="11" priority="1000000">
+ <synapse id="11">
<action_set>
- <rsc_op id="386" operation="notify" operation_key="ovndb_servers_post_notify_promote_0" internal_operation_key="ovndb_servers:1_post_notify_promote_0" on_node="ovn-dbs-bundle-1" on_node_uuid="ovn-dbs-bundle-1" router_node="controller-0">
- <primitive id="ovndb_servers" long-id="ovndb_servers:1" class="ocf" provider="ovn" type="ovndb-servers"/>
- <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="notify" CRM_meta_notify="true" CRM_meta_notify_active_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_active_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_all_hosts="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 controller-2 controller-1 messaging-0 rabbitmq-bundle-1 messaging-2 controller-2 controller-0 controller-1" CRM_meta_notify_all_uname="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="ovndb_servers:0" CRM_meta_notify_key_operation="promoted" CRM_meta_notify_key_type="post" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="promote" CRM_meta_notify_promote_resource="ovndb_servers:1" CRM_meta_notify_promote_uname="ovn-dbs-bundle-1" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_slave_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_start_resource="ovndb_servers:0 ovndb_servers:1" CRM_meta_notify_start_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1" CRM_meta_notify_stop_resource="ovndb_servers:1" CRM_meta_notify_stop_uname="ovn-dbs-bundle-1" CRM_meta_notify_type="post" CRM_meta_notify_unpromoted_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_unpromoted_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_on_node="ovn-dbs-bundle-1" CRM_meta_on_node_uuid="ovn-dbs-bundle-1" CRM_meta_physical_host="controller-0" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" inactive_probe_interval="180000" listen_on_master_ip_only="yes" manage_northd="yes" master_ip="172.17.1.87" nb_master_port="6641" nb_master_protocol="ssl" ovn_nb_db_cacert="/etc/ipa/ca.crt" ovn_nb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_nb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" ovn_sb_db_cacert="/etc/ipa/ca.crt" ovn_sb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_sb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" sb_master_port="6642" sb_master_protocol="ssl"/>
+ <rsc_op id="239" operation="promote" operation_key="ovndb_servers_promote_0" internal_operation_key="ovndb_servers:2_promote_0" on_node="ovn-dbs-bundle-2" on_node_uuid="ovn-dbs-bundle-2" router_node="controller-1">
+ <primitive id="ovndb_servers" long-id="ovndb_servers:2" class="ocf" provider="ovn" type="ovndb-servers"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="promote" CRM_meta_notify="true" CRM_meta_notify_active_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_active_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_all_hosts="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 controller-2 controller-1 messaging-0 rabbitmq-bundle-1 messaging-2 controller-2 controller-0 controller-1" CRM_meta_notify_all_uname="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="ovndb_servers:0" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_promote_resource="ovndb_servers:2" CRM_meta_notify_promote_uname="ovn-dbs-bundle-2" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_slave_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_start_resource="ovndb_servers:0" CRM_meta_notify_start_uname="ovn-dbs-bundle-0" CRM_meta_notify_stop_resource=" " CRM_meta_notify_stop_uname=" " CRM_meta_notify_unpromoted_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_unpromoted_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_on_node="ovn-dbs-bundle-2" CRM_meta_on_node_uuid="ovn-dbs-bundle-2" CRM_meta_physical_host="controller-1" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="50000" inactive_probe_interval="180000" listen_on_master_ip_only="yes" manage_northd="yes" master_ip="172.17.1.87" nb_master_port="6641" nb_master_protocol="ssl" ovn_nb_db_cacert="/etc/ipa/ca.crt" ovn_nb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_nb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" ovn_sb_db_cacert="/etc/ipa/ca.crt" ovn_sb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_sb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" sb_master_port="6642" sb_master_protocol="ssl"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <pseudo_event id="266" operation="notify" operation_key="ovn-dbs-bundle-master_post_notify_promoted_0"/>
+ <rsc_op id="67" operation="cancel" operation_key="ovndb_servers_monitor_30000" internal_operation_key="ovndb_servers:2_monitor_30000" on_node="ovn-dbs-bundle-2" on_node_uuid="ovn-dbs-bundle-2" router_node="controller-1"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="257" operation="promote" operation_key="ovn-dbs-bundle-master_promote_0"/>
</trigger>
</inputs>
</synapse>
<synapse id="12">
<action_set>
- <rsc_op id="385" operation="notify" operation_key="ovndb_servers_pre_notify_promote_0" internal_operation_key="ovndb_servers:1_pre_notify_promote_0" on_node="ovn-dbs-bundle-1" on_node_uuid="ovn-dbs-bundle-1" router_node="controller-0">
- <primitive id="ovndb_servers" long-id="ovndb_servers:1" class="ocf" provider="ovn" type="ovndb-servers"/>
- <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="notify" CRM_meta_notify="true" CRM_meta_notify_active_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_active_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_all_hosts="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 controller-2 controller-1 messaging-0 rabbitmq-bundle-1 messaging-2 controller-2 controller-0 controller-1" CRM_meta_notify_all_uname="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="ovndb_servers:0" CRM_meta_notify_key_operation="promote" CRM_meta_notify_key_type="pre" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="promote" CRM_meta_notify_promote_resource="ovndb_servers:1" CRM_meta_notify_promote_uname="ovn-dbs-bundle-1" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_slave_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_start_resource="ovndb_servers:0 ovndb_servers:1" CRM_meta_notify_start_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1" CRM_meta_notify_stop_resource="ovndb_servers:1" CRM_meta_notify_stop_uname="ovn-dbs-bundle-1" CRM_meta_notify_type="pre" CRM_meta_notify_unpromoted_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_unpromoted_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_on_node="ovn-dbs-bundle-1" CRM_meta_on_node_uuid="ovn-dbs-bundle-1" CRM_meta_physical_host="controller-0" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" inactive_probe_interval="180000" listen_on_master_ip_only="yes" manage_northd="yes" master_ip="172.17.1.87" nb_master_port="6641" nb_master_protocol="ssl" ovn_nb_db_cacert="/etc/ipa/ca.crt" ovn_nb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_nb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" ovn_sb_db_cacert="/etc/ipa/ca.crt" ovn_sb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_sb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" sb_master_port="6642" sb_master_protocol="ssl"/>
+ <rsc_op id="67" operation="cancel" operation_key="ovndb_servers_monitor_30000" internal_operation_key="ovndb_servers:2_monitor_30000" on_node="ovn-dbs-bundle-2" on_node_uuid="ovn-dbs-bundle-2" router_node="controller-1">
+ <primitive id="ovndb_servers" long-id="ovndb_servers:2" class="ocf" provider="ovn" type="ovndb-servers"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="30000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="true" CRM_meta_on_node="ovn-dbs-bundle-2" CRM_meta_on_node_uuid="ovn-dbs-bundle-2" CRM_meta_operation="monitor" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Unpromoted" CRM_meta_timeout="60000" inactive_probe_interval="180000" listen_on_master_ip_only="yes" manage_northd="yes" master_ip="172.17.1.87" nb_master_port="6641" nb_master_protocol="ssl" ovn_nb_db_cacert="/etc/ipa/ca.crt" ovn_nb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_nb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" ovn_sb_db_cacert="/etc/ipa/ca.crt" ovn_sb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_sb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" sb_master_port="6642" sb_master_protocol="ssl"/>
</rsc_op>
</action_set>
- <inputs>
- <trigger>
- <pseudo_event id="264" operation="notify" operation_key="ovn-dbs-bundle-master_pre_notify_promote_0"/>
- </trigger>
- </inputs>
+ <inputs/>
</synapse>
- <synapse id="13">
+ <synapse id="13" priority="1000000">
<action_set>
- <rsc_op id="380" operation="notify" operation_key="ovndb_servers_pre_notify_stop_0" internal_operation_key="ovndb_servers:1_pre_notify_stop_0" on_node="ovn-dbs-bundle-1" on_node_uuid="ovn-dbs-bundle-1" router_node="controller-2">
- <primitive id="ovndb_servers" long-id="ovndb_servers:1" class="ocf" provider="ovn" type="ovndb-servers"/>
- <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="notify" CRM_meta_notify="true" CRM_meta_notify_active_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_active_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_all_hosts="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 controller-2 controller-1 messaging-0 rabbitmq-bundle-1 messaging-2 controller-2 controller-0 controller-1" CRM_meta_notify_all_uname="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="ovndb_servers:0" CRM_meta_notify_key_operation="stop" CRM_meta_notify_key_type="pre" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="stop" CRM_meta_notify_promote_resource="ovndb_servers:1" CRM_meta_notify_promote_uname="ovn-dbs-bundle-1" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_slave_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_start_resource="ovndb_servers:0 ovndb_servers:1" CRM_meta_notify_start_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1" CRM_meta_notify_stop_resource="ovndb_servers:1" CRM_meta_notify_stop_uname="ovn-dbs-bundle-1" CRM_meta_notify_type="pre" CRM_meta_notify_unpromoted_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_unpromoted_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_on_node="ovn-dbs-bundle-1" CRM_meta_on_node_uuid="ovn-dbs-bundle-1" CRM_meta_physical_host="controller-2" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" inactive_probe_interval="180000" listen_on_master_ip_only="yes" manage_northd="yes" master_ip="172.17.1.87" nb_master_port="6641" nb_master_protocol="ssl" ovn_nb_db_cacert="/etc/ipa/ca.crt" ovn_nb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_nb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" ovn_sb_db_cacert="/etc/ipa/ca.crt" ovn_sb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_sb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" sb_master_port="6642" sb_master_protocol="ssl"/>
+ <rsc_op id="383" operation="notify" operation_key="ovndb_servers:0_post_notify_promote_0" on_node="ovn-dbs-bundle-0" on_node_uuid="ovn-dbs-bundle-0" router_node="controller-0">
+ <primitive id="ovndb_servers" long-id="ovndb_servers:0" class="ocf" provider="ovn" type="ovndb-servers"/>
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="notify" CRM_meta_notify="true" CRM_meta_notify_active_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_active_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_all_hosts="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 controller-2 controller-1 messaging-0 rabbitmq-bundle-1 messaging-2 controller-2 controller-0 controller-1" CRM_meta_notify_all_uname="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="ovndb_servers:0" CRM_meta_notify_key_operation="promoted" CRM_meta_notify_key_type="post" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="promote" CRM_meta_notify_promote_resource="ovndb_servers:2" CRM_meta_notify_promote_uname="ovn-dbs-bundle-2" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_slave_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_start_resource="ovndb_servers:0" CRM_meta_notify_start_uname="ovn-dbs-bundle-0" CRM_meta_notify_stop_resource=" " CRM_meta_notify_stop_uname=" " CRM_meta_notify_type="post" CRM_meta_notify_unpromoted_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_unpromoted_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_on_node="ovn-dbs-bundle-0" CRM_meta_on_node_uuid="ovn-dbs-bundle-0" CRM_meta_physical_host="controller-0" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" inactive_probe_interval="180000" listen_on_master_ip_only="yes" manage_northd="yes" master_ip="172.17.1.87" nb_master_port="6641" nb_master_protocol="ssl" ovn_nb_db_cacert="/etc/ipa/ca.crt" ovn_nb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_nb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" ovn_sb_db_cacert="/etc/ipa/ca.crt" ovn_sb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_sb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" sb_master_port="6642" sb_master_protocol="ssl"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <pseudo_event id="258" operation="notify" operation_key="ovn-dbs-bundle-master_pre_notify_stop_0"/>
+ <pseudo_event id="261" operation="notify" operation_key="ovn-dbs-bundle-master_post_notify_promoted_0"/>
</trigger>
</inputs>
</synapse>
- <synapse id="14" priority="1000000">
+ <synapse id="14">
<action_set>
- <rsc_op id="377" operation="notify" operation_key="ovndb_servers_post_notify_start_0" internal_operation_key="ovndb_servers:1_post_notify_start_0" on_node="ovn-dbs-bundle-1" on_node_uuid="ovn-dbs-bundle-1" router_node="controller-0">
- <primitive id="ovndb_servers" long-id="ovndb_servers:1" class="ocf" provider="ovn" type="ovndb-servers"/>
- <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="notify" CRM_meta_notify="true" CRM_meta_notify_active_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_active_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_all_hosts="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 controller-2 controller-1 messaging-0 rabbitmq-bundle-1 messaging-2 controller-2 controller-0 controller-1" CRM_meta_notify_all_uname="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="ovndb_servers:0" CRM_meta_notify_key_operation="running" CRM_meta_notify_key_type="post" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="start" CRM_meta_notify_promote_resource="ovndb_servers:1" CRM_meta_notify_promote_uname="ovn-dbs-bundle-1" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_slave_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_start_resource="ovndb_servers:0 ovndb_servers:1" CRM_meta_notify_start_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1" CRM_meta_notify_stop_resource="ovndb_servers:1" CRM_meta_notify_stop_uname="ovn-dbs-bundle-1" CRM_meta_notify_type="post" CRM_meta_notify_unpromoted_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_unpromoted_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_on_node="ovn-dbs-bundle-1" CRM_meta_on_node_uuid="ovn-dbs-bundle-1" CRM_meta_physical_host="controller-0" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" inactive_probe_interval="180000" listen_on_master_ip_only="yes" manage_northd="yes" master_ip="172.17.1.87" nb_master_port="6641" nb_master_protocol="ssl" ovn_nb_db_cacert="/etc/ipa/ca.crt" ovn_nb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_nb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" ovn_sb_db_cacert="/etc/ipa/ca.crt" ovn_sb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_sb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" sb_master_port="6642" sb_master_protocol="ssl"/>
+ <rsc_op id="382" operation="notify" operation_key="ovndb_servers:0_pre_notify_promote_0" on_node="ovn-dbs-bundle-0" on_node_uuid="ovn-dbs-bundle-0" router_node="controller-0">
+ <primitive id="ovndb_servers" long-id="ovndb_servers:0" class="ocf" provider="ovn" type="ovndb-servers"/>
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="notify" CRM_meta_notify="true" CRM_meta_notify_active_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_active_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_all_hosts="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 controller-2 controller-1 messaging-0 rabbitmq-bundle-1 messaging-2 controller-2 controller-0 controller-1" CRM_meta_notify_all_uname="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="ovndb_servers:0" CRM_meta_notify_key_operation="promote" CRM_meta_notify_key_type="pre" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="promote" CRM_meta_notify_promote_resource="ovndb_servers:2" CRM_meta_notify_promote_uname="ovn-dbs-bundle-2" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_slave_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_start_resource="ovndb_servers:0" CRM_meta_notify_start_uname="ovn-dbs-bundle-0" CRM_meta_notify_stop_resource=" " CRM_meta_notify_stop_uname=" " CRM_meta_notify_type="pre" CRM_meta_notify_unpromoted_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_unpromoted_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_on_node="ovn-dbs-bundle-0" CRM_meta_on_node_uuid="ovn-dbs-bundle-0" CRM_meta_physical_host="controller-0" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" inactive_probe_interval="180000" listen_on_master_ip_only="yes" manage_northd="yes" master_ip="172.17.1.87" nb_master_port="6641" nb_master_protocol="ssl" ovn_nb_db_cacert="/etc/ipa/ca.crt" ovn_nb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_nb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" ovn_sb_db_cacert="/etc/ipa/ca.crt" ovn_sb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_sb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" sb_master_port="6642" sb_master_protocol="ssl"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <pseudo_event id="256" operation="notify" operation_key="ovn-dbs-bundle-master_post_notify_running_0"/>
+ <pseudo_event id="259" operation="notify" operation_key="ovn-dbs-bundle-master_pre_notify_promote_0"/>
</trigger>
</inputs>
</synapse>
- <synapse id="15">
+ <synapse id="15" priority="1000000">
<action_set>
- <rsc_op id="245" operation="monitor" operation_key="ovndb_servers_monitor_10000" internal_operation_key="ovndb_servers:1_monitor_10000" on_node="ovn-dbs-bundle-1" on_node_uuid="ovn-dbs-bundle-1" router_node="controller-0">
- <primitive id="ovndb_servers" long-id="ovndb_servers:1" class="ocf" provider="ovn" type="ovndb-servers"/>
- <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_interval="10000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="true" CRM_meta_on_node="ovn-dbs-bundle-1" CRM_meta_on_node_uuid="ovn-dbs-bundle-1" CRM_meta_op_target_rc="8" CRM_meta_physical_host="controller-0" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Promoted" CRM_meta_timeout="60000" inactive_probe_interval="180000" listen_on_master_ip_only="yes" manage_northd="yes" master_ip="172.17.1.87" nb_master_port="6641" nb_master_protocol="ssl" ovn_nb_db_cacert="/etc/ipa/ca.crt" ovn_nb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_nb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" ovn_sb_db_cacert="/etc/ipa/ca.crt" ovn_sb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_sb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" sb_master_port="6642" sb_master_protocol="ssl"/>
+ <rsc_op id="373" operation="notify" operation_key="ovndb_servers:0_post_notify_start_0" on_node="ovn-dbs-bundle-0" on_node_uuid="ovn-dbs-bundle-0" router_node="controller-0">
+ <primitive id="ovndb_servers" long-id="ovndb_servers:0" class="ocf" provider="ovn" type="ovndb-servers"/>
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="notify" CRM_meta_notify="true" CRM_meta_notify_active_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_active_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_all_hosts="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 controller-2 controller-1 messaging-0 rabbitmq-bundle-1 messaging-2 controller-2 controller-0 controller-1" CRM_meta_notify_all_uname="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="ovndb_servers:0" CRM_meta_notify_key_operation="running" CRM_meta_notify_key_type="post" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="start" CRM_meta_notify_promote_resource="ovndb_servers:2" CRM_meta_notify_promote_uname="ovn-dbs-bundle-2" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_slave_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_start_resource="ovndb_servers:0" CRM_meta_notify_start_uname="ovn-dbs-bundle-0" CRM_meta_notify_stop_resource=" " CRM_meta_notify_stop_uname=" " CRM_meta_notify_type="post" CRM_meta_notify_unpromoted_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_unpromoted_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_on_node="ovn-dbs-bundle-0" CRM_meta_on_node_uuid="ovn-dbs-bundle-0" CRM_meta_physical_host="controller-0" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" inactive_probe_interval="180000" listen_on_master_ip_only="yes" manage_northd="yes" master_ip="172.17.1.87" nb_master_port="6641" nb_master_protocol="ssl" ovn_nb_db_cacert="/etc/ipa/ca.crt" ovn_nb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_nb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" ovn_sb_db_cacert="/etc/ipa/ca.crt" ovn_sb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_sb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" sb_master_port="6642" sb_master_protocol="ssl"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="72" operation="start" operation_key="ovn-dbs-bundle-1_start_0" on_node="controller-0" on_node_uuid="1"/>
- </trigger>
- <trigger>
- <rsc_op id="242" operation="start" operation_key="ovndb_servers_start_0" internal_operation_key="ovndb_servers:1_start_0" on_node="ovn-dbs-bundle-1" on_node_uuid="ovn-dbs-bundle-1" router_node="controller-0"/>
- </trigger>
- <trigger>
- <rsc_op id="244" operation="promote" operation_key="ovndb_servers_promote_0" internal_operation_key="ovndb_servers:1_promote_0" on_node="ovn-dbs-bundle-1" on_node_uuid="ovn-dbs-bundle-1" router_node="controller-0"/>
- </trigger>
- <trigger>
- <pseudo_event id="257" operation="notified" operation_key="ovn-dbs-bundle-master_confirmed-post_notify_running_0"/>
- </trigger>
- <trigger>
- <pseudo_event id="267" operation="notified" operation_key="ovn-dbs-bundle-master_confirmed-post_notify_promoted_0"/>
+ <pseudo_event id="251" operation="notify" operation_key="ovn-dbs-bundle-master_post_notify_running_0"/>
</trigger>
</inputs>
</synapse>
<synapse id="16">
<action_set>
- <rsc_op id="244" operation="promote" operation_key="ovndb_servers_promote_0" internal_operation_key="ovndb_servers:1_promote_0" on_node="ovn-dbs-bundle-1" on_node_uuid="ovn-dbs-bundle-1" router_node="controller-0">
- <primitive id="ovndb_servers" long-id="ovndb_servers:1" class="ocf" provider="ovn" type="ovndb-servers"/>
- <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="promote" CRM_meta_notify="true" CRM_meta_notify_active_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_active_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_all_hosts="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 controller-2 controller-1 messaging-0 rabbitmq-bundle-1 messaging-2 controller-2 controller-0 controller-1" CRM_meta_notify_all_uname="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="ovndb_servers:0" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_promote_resource="ovndb_servers:1" CRM_meta_notify_promote_uname="ovn-dbs-bundle-1" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_slave_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_start_resource="ovndb_servers:0 ovndb_servers:1" CRM_meta_notify_start_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1" CRM_meta_notify_stop_resource="ovndb_servers:1" CRM_meta_notify_stop_uname="ovn-dbs-bundle-1" CRM_meta_notify_unpromoted_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_unpromoted_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_on_node="ovn-dbs-bundle-1" CRM_meta_on_node_uuid="ovn-dbs-bundle-1" CRM_meta_physical_host="controller-0" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="50000" inactive_probe_interval="180000" listen_on_master_ip_only="yes" manage_northd="yes" master_ip="172.17.1.87" nb_master_port="6641" nb_master_protocol="ssl" ovn_nb_db_cacert="/etc/ipa/ca.crt" ovn_nb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_nb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" ovn_sb_db_cacert="/etc/ipa/ca.crt" ovn_sb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_sb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" sb_master_port="6642" sb_master_protocol="ssl"/>
+ <rsc_op id="242" operation="monitor" operation_key="ovndb_servers:0_monitor_30000" on_node="ovn-dbs-bundle-0" on_node_uuid="ovn-dbs-bundle-0" router_node="controller-0">
+ <primitive id="ovndb_servers" long-id="ovndb_servers:0" class="ocf" provider="ovn" type="ovndb-servers"/>
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_interval="30000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="true" CRM_meta_on_node="ovn-dbs-bundle-0" CRM_meta_on_node_uuid="ovn-dbs-bundle-0" CRM_meta_physical_host="controller-0" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Unpromoted" CRM_meta_timeout="60000" inactive_probe_interval="180000" listen_on_master_ip_only="yes" manage_northd="yes" master_ip="172.17.1.87" nb_master_port="6641" nb_master_protocol="ssl" ovn_nb_db_cacert="/etc/ipa/ca.crt" ovn_nb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_nb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" ovn_sb_db_cacert="/etc/ipa/ca.crt" ovn_sb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_sb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" sb_master_port="6642" sb_master_protocol="ssl"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="68" operation="cancel" operation_key="ovndb_servers_monitor_30000" internal_operation_key="ovndb_servers:1_monitor_30000" on_node="ovn-dbs-bundle-1" on_node_uuid="ovn-dbs-bundle-1" router_node="controller-2"/>
- </trigger>
- <trigger>
- <rsc_op id="72" operation="start" operation_key="ovn-dbs-bundle-1_start_0" on_node="controller-0" on_node_uuid="1"/>
+ <rsc_op id="223" operation="start" operation_key="ovn-dbs-bundle-0_start_0" on_node="controller-0" on_node_uuid="1"/>
</trigger>
<trigger>
- <rsc_op id="230" operation="start" operation_key="ovn-dbs-bundle-podman-1_start_0" on_node="controller-0" on_node_uuid="1"/>
+ <rsc_op id="241" operation="start" operation_key="ovndb_servers:0_start_0" on_node="ovn-dbs-bundle-0" on_node_uuid="ovn-dbs-bundle-0" router_node="controller-0"/>
</trigger>
<trigger>
- <rsc_op id="242" operation="start" operation_key="ovndb_servers_start_0" internal_operation_key="ovndb_servers:1_start_0" on_node="ovn-dbs-bundle-1" on_node_uuid="ovn-dbs-bundle-1" router_node="controller-0"/>
+ <pseudo_event id="252" operation="notified" operation_key="ovn-dbs-bundle-master_confirmed-post_notify_running_0"/>
</trigger>
<trigger>
- <pseudo_event id="262" operation="promote" operation_key="ovn-dbs-bundle-master_promote_0"/>
+ <pseudo_event id="262" operation="notified" operation_key="ovn-dbs-bundle-master_confirmed-post_notify_promoted_0"/>
</trigger>
</inputs>
</synapse>
<synapse id="17">
<action_set>
- <rsc_op id="243" operation="stop" operation_key="ovndb_servers_stop_0" internal_operation_key="ovndb_servers:1_stop_0" on_node="ovn-dbs-bundle-1" on_node_uuid="ovn-dbs-bundle-1" router_node="controller-2">
- <primitive id="ovndb_servers" long-id="ovndb_servers:1" class="ocf" provider="ovn" type="ovndb-servers"/>
- <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="stop" CRM_meta_notify="true" CRM_meta_notify_active_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_active_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_all_hosts="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 controller-2 controller-1 messaging-0 rabbitmq-bundle-1 messaging-2 controller-2 controller-0 controller-1" CRM_meta_notify_all_uname="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="ovndb_servers:0" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_promote_resource="ovndb_servers:1" CRM_meta_notify_promote_uname="ovn-dbs-bundle-1" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_slave_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_start_resource="ovndb_servers:0 ovndb_servers:1" CRM_meta_notify_start_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1" CRM_meta_notify_stop_resource="ovndb_servers:1" CRM_meta_notify_stop_uname="ovn-dbs-bundle-1" CRM_meta_notify_unpromoted_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_unpromoted_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_on_node="ovn-dbs-bundle-1" CRM_meta_on_node_uuid="ovn-dbs-bundle-1" CRM_meta_physical_host="controller-2" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="200000" inactive_probe_interval="180000" listen_on_master_ip_only="yes" manage_northd="yes" master_ip="172.17.1.87" nb_master_port="6641" nb_master_protocol="ssl" ovn_nb_db_cacert="/etc/ipa/ca.crt" ovn_nb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_nb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" ovn_sb_db_cacert="/etc/ipa/ca.crt" ovn_sb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_sb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" sb_master_port="6642" sb_master_protocol="ssl"/>
- </rsc_op>
- </action_set>
- <inputs>
- <trigger>
- <pseudo_event id="240" operation="stop" operation_key="ovn-dbs-bundle_stop_0"/>
- </trigger>
- <trigger>
- <pseudo_event id="252" operation="stop" operation_key="ovn-dbs-bundle-master_stop_0"/>
- </trigger>
- </inputs>
- </synapse>
- <synapse id="18">
- <action_set>
- <rsc_op id="242" operation="start" operation_key="ovndb_servers_start_0" internal_operation_key="ovndb_servers:1_start_0" on_node="ovn-dbs-bundle-1" on_node_uuid="ovn-dbs-bundle-1" router_node="controller-0">
- <primitive id="ovndb_servers" long-id="ovndb_servers:1" class="ocf" provider="ovn" type="ovndb-servers"/>
- <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="start" CRM_meta_notify="true" CRM_meta_notify_active_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_active_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_all_hosts="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 controller-2 controller-1 messaging-0 rabbitmq-bundle-1 messaging-2 controller-2 controller-0 controller-1" CRM_meta_notify_all_uname="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="ovndb_servers:0" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_promote_resource="ovndb_servers:1" CRM_meta_notify_promote_uname="ovn-dbs-bundle-1" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_slave_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_start_resource="ovndb_servers:0 ovndb_servers:1" CRM_meta_notify_start_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1" CRM_meta_notify_stop_resource="ovndb_servers:1" CRM_meta_notify_stop_uname="ovn-dbs-bundle-1" CRM_meta_notify_unpromoted_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_unpromoted_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_on_node="ovn-dbs-bundle-1" CRM_meta_on_node_uuid="ovn-dbs-bundle-1" CRM_meta_physical_host="controller-0" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="200000" inactive_probe_interval="180000" listen_on_master_ip_only="yes" manage_northd="yes" master_ip="172.17.1.87" nb_master_port="6641" nb_master_protocol="ssl" ovn_nb_db_cacert="/etc/ipa/ca.crt" ovn_nb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_nb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" ovn_sb_db_cacert="/etc/ipa/ca.crt" ovn_sb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_sb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" sb_master_port="6642" sb_master_protocol="ssl"/>
+ <rsc_op id="241" operation="start" operation_key="ovndb_servers:0_start_0" on_node="ovn-dbs-bundle-0" on_node_uuid="ovn-dbs-bundle-0" router_node="controller-0">
+ <primitive id="ovndb_servers" long-id="ovndb_servers:0" class="ocf" provider="ovn" type="ovndb-servers"/>
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="start" CRM_meta_notify="true" CRM_meta_notify_active_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_active_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_all_hosts="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 controller-2 controller-1 messaging-0 rabbitmq-bundle-1 messaging-2 controller-2 controller-0 controller-1" CRM_meta_notify_all_uname="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="ovndb_servers:0" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_promote_resource="ovndb_servers:2" CRM_meta_notify_promote_uname="ovn-dbs-bundle-2" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_slave_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_start_resource="ovndb_servers:0" CRM_meta_notify_start_uname="ovn-dbs-bundle-0" CRM_meta_notify_stop_resource=" " CRM_meta_notify_stop_uname=" " CRM_meta_notify_unpromoted_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_unpromoted_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_on_node="ovn-dbs-bundle-0" CRM_meta_on_node_uuid="ovn-dbs-bundle-0" CRM_meta_physical_host="controller-0" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="200000" inactive_probe_interval="180000" listen_on_master_ip_only="yes" manage_northd="yes" master_ip="172.17.1.87" nb_master_port="6641" nb_master_protocol="ssl" ovn_nb_db_cacert="/etc/ipa/ca.crt" ovn_nb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_nb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" ovn_sb_db_cacert="/etc/ipa/ca.crt" ovn_sb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_sb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" sb_master_port="6642" sb_master_protocol="ssl"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="72" operation="start" operation_key="ovn-dbs-bundle-1_start_0" on_node="controller-0" on_node_uuid="1"/>
- </trigger>
- <trigger>
- <rsc_op id="230" operation="start" operation_key="ovn-dbs-bundle-podman-1_start_0" on_node="controller-0" on_node_uuid="1"/>
+ <rsc_op id="221" operation="start" operation_key="ovn-dbs-bundle-podman-0_start_0" on_node="controller-0" on_node_uuid="1"/>
</trigger>
<trigger>
- <rsc_op id="243" operation="stop" operation_key="ovndb_servers_stop_0" internal_operation_key="ovndb_servers:1_stop_0" on_node="ovn-dbs-bundle-1" on_node_uuid="ovn-dbs-bundle-1" router_node="controller-2"/>
+ <rsc_op id="223" operation="start" operation_key="ovn-dbs-bundle-0_start_0" on_node="controller-0" on_node_uuid="1"/>
</trigger>
<trigger>
- <rsc_op id="248" operation="start" operation_key="ovndb_servers:0_start_0" on_node="ovn-dbs-bundle-0" on_node_uuid="ovn-dbs-bundle-0" router_node="controller-2"/>
- </trigger>
- <trigger>
- <pseudo_event id="250" operation="start" operation_key="ovn-dbs-bundle-master_start_0"/>
+ <pseudo_event id="245" operation="start" operation_key="ovn-dbs-bundle-master_start_0"/>
</trigger>
</inputs>
</synapse>
- <synapse id="19">
+ <synapse id="18" priority="1000000">
<action_set>
- <rsc_op id="68" operation="cancel" operation_key="ovndb_servers_monitor_30000" internal_operation_key="ovndb_servers:1_monitor_30000" on_node="ovn-dbs-bundle-1" on_node_uuid="ovn-dbs-bundle-1" router_node="controller-2">
+ <rsc_op id="385" operation="notify" operation_key="ovndb_servers_post_notify_promote_0" internal_operation_key="ovndb_servers:1_post_notify_promote_0" on_node="ovn-dbs-bundle-1" on_node_uuid="ovn-dbs-bundle-1" router_node="controller-2">
<primitive id="ovndb_servers" long-id="ovndb_servers:1" class="ocf" provider="ovn" type="ovndb-servers"/>
- <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="30000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="true" CRM_meta_on_node="ovn-dbs-bundle-1" CRM_meta_on_node_uuid="ovn-dbs-bundle-1" CRM_meta_operation="monitor" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Unpromoted" CRM_meta_timeout="60000" inactive_probe_interval="180000" listen_on_master_ip_only="yes" manage_northd="yes" master_ip="172.17.1.87" nb_master_port="6641" nb_master_protocol="ssl" ovn_nb_db_cacert="/etc/ipa/ca.crt" ovn_nb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_nb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" ovn_sb_db_cacert="/etc/ipa/ca.crt" ovn_sb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_sb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" sb_master_port="6642" sb_master_protocol="ssl"/>
- </rsc_op>
- </action_set>
- <inputs/>
- </synapse>
- <synapse id="20" priority="1000000">
- <action_set>
- <rsc_op id="388" operation="notify" operation_key="ovndb_servers_post_notify_promote_0" internal_operation_key="ovndb_servers:2_post_notify_promote_0" on_node="ovn-dbs-bundle-2" on_node_uuid="ovn-dbs-bundle-2" router_node="controller-1">
- <primitive id="ovndb_servers" long-id="ovndb_servers:2" class="ocf" provider="ovn" type="ovndb-servers"/>
- <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="notify" CRM_meta_notify="true" CRM_meta_notify_active_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_active_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_all_hosts="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 controller-2 controller-1 messaging-0 rabbitmq-bundle-1 messaging-2 controller-2 controller-0 controller-1" CRM_meta_notify_all_uname="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="ovndb_servers:0" CRM_meta_notify_key_operation="promoted" CRM_meta_notify_key_type="post" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="promote" CRM_meta_notify_promote_resource="ovndb_servers:1" CRM_meta_notify_promote_uname="ovn-dbs-bundle-1" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_slave_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_start_resource="ovndb_servers:0 ovndb_servers:1" CRM_meta_notify_start_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1" CRM_meta_notify_stop_resource="ovndb_servers:1" CRM_meta_notify_stop_uname="ovn-dbs-bundle-1" CRM_meta_notify_type="post" CRM_meta_notify_unpromoted_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_unpromoted_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_on_node="ovn-dbs-bundle-2" CRM_meta_on_node_uuid="ovn-dbs-bundle-2" CRM_meta_physical_host="controller-1" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" inactive_probe_interval="180000" listen_on_master_ip_only="yes" manage_northd="yes" master_ip="172.17.1.87" nb_master_port="6641" nb_master_protocol="ssl" ovn_nb_db_cacert="/etc/ipa/ca.crt" ovn_nb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_nb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" ovn_sb_db_cacert="/etc/ipa/ca.crt" ovn_sb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_sb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" sb_master_port="6642" sb_master_protocol="ssl"/>
- </rsc_op>
- </action_set>
- <inputs>
- <trigger>
- <pseudo_event id="266" operation="notify" operation_key="ovn-dbs-bundle-master_post_notify_promoted_0"/>
- </trigger>
- </inputs>
- </synapse>
- <synapse id="21">
- <action_set>
- <rsc_op id="387" operation="notify" operation_key="ovndb_servers_pre_notify_promote_0" internal_operation_key="ovndb_servers:2_pre_notify_promote_0" on_node="ovn-dbs-bundle-2" on_node_uuid="ovn-dbs-bundle-2" router_node="controller-1">
- <primitive id="ovndb_servers" long-id="ovndb_servers:2" class="ocf" provider="ovn" type="ovndb-servers"/>
- <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="notify" CRM_meta_notify="true" CRM_meta_notify_active_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_active_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_all_hosts="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 controller-2 controller-1 messaging-0 rabbitmq-bundle-1 messaging-2 controller-2 controller-0 controller-1" CRM_meta_notify_all_uname="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="ovndb_servers:0" CRM_meta_notify_key_operation="promote" CRM_meta_notify_key_type="pre" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="promote" CRM_meta_notify_promote_resource="ovndb_servers:1" CRM_meta_notify_promote_uname="ovn-dbs-bundle-1" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_slave_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_start_resource="ovndb_servers:0 ovndb_servers:1" CRM_meta_notify_start_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1" CRM_meta_notify_stop_resource="ovndb_servers:1" CRM_meta_notify_stop_uname="ovn-dbs-bundle-1" CRM_meta_notify_type="pre" CRM_meta_notify_unpromoted_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_unpromoted_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_on_node="ovn-dbs-bundle-2" CRM_meta_on_node_uuid="ovn-dbs-bundle-2" CRM_meta_physical_host="controller-1" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" inactive_probe_interval="180000" listen_on_master_ip_only="yes" manage_northd="yes" master_ip="172.17.1.87" nb_master_port="6641" nb_master_protocol="ssl" ovn_nb_db_cacert="/etc/ipa/ca.crt" ovn_nb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_nb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" ovn_sb_db_cacert="/etc/ipa/ca.crt" ovn_sb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_sb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" sb_master_port="6642" sb_master_protocol="ssl"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="notify" CRM_meta_notify="true" CRM_meta_notify_active_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_active_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_all_hosts="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 controller-2 controller-1 messaging-0 rabbitmq-bundle-1 messaging-2 controller-2 controller-0 controller-1" CRM_meta_notify_all_uname="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="ovndb_servers:0" CRM_meta_notify_key_operation="promoted" CRM_meta_notify_key_type="post" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="promote" CRM_meta_notify_promote_resource="ovndb_servers:2" CRM_meta_notify_promote_uname="ovn-dbs-bundle-2" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_slave_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_start_resource="ovndb_servers:0" CRM_meta_notify_start_uname="ovn-dbs-bundle-0" CRM_meta_notify_stop_resource=" " CRM_meta_notify_stop_uname=" " CRM_meta_notify_type="post" CRM_meta_notify_unpromoted_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_unpromoted_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_on_node="ovn-dbs-bundle-1" CRM_meta_on_node_uuid="ovn-dbs-bundle-1" CRM_meta_physical_host="controller-2" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" inactive_probe_interval="180000" listen_on_master_ip_only="yes" manage_northd="yes" master_ip="172.17.1.87" nb_master_port="6641" nb_master_protocol="ssl" ovn_nb_db_cacert="/etc/ipa/ca.crt" ovn_nb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_nb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" ovn_sb_db_cacert="/etc/ipa/ca.crt" ovn_sb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_sb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" sb_master_port="6642" sb_master_protocol="ssl"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <pseudo_event id="264" operation="notify" operation_key="ovn-dbs-bundle-master_pre_notify_promote_0"/>
+ <pseudo_event id="261" operation="notify" operation_key="ovn-dbs-bundle-master_post_notify_promoted_0"/>
</trigger>
</inputs>
</synapse>
- <synapse id="22" priority="1000000">
- <action_set>
- <rsc_op id="382" operation="notify" operation_key="ovndb_servers_post_notify_stop_0" internal_operation_key="ovndb_servers:2_post_notify_stop_0" on_node="ovn-dbs-bundle-2" on_node_uuid="ovn-dbs-bundle-2" router_node="controller-1">
- <primitive id="ovndb_servers" long-id="ovndb_servers:2" class="ocf" provider="ovn" type="ovndb-servers"/>
- <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="notify" CRM_meta_notify="true" CRM_meta_notify_active_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_active_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_all_hosts="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 controller-2 controller-1 messaging-0 rabbitmq-bundle-1 messaging-2 controller-2 controller-0 controller-1" CRM_meta_notify_all_uname="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="ovndb_servers:0" CRM_meta_notify_key_operation="stopped" CRM_meta_notify_key_type="post" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="stop" CRM_meta_notify_promote_resource="ovndb_servers:1" CRM_meta_notify_promote_uname="ovn-dbs-bundle-1" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_slave_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_start_resource="ovndb_servers:0 ovndb_servers:1" CRM_meta_notify_start_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1" CRM_meta_notify_stop_resource="ovndb_servers:1" CRM_meta_notify_stop_uname="ovn-dbs-bundle-1" CRM_meta_notify_type="post" CRM_meta_notify_unpromoted_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_unpromoted_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_on_node="ovn-dbs-bundle-2" CRM_meta_on_node_uuid="ovn-dbs-bundle-2" CRM_meta_physical_host="controller-1" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" inactive_probe_interval="180000" listen_on_master_ip_only="yes" manage_northd="yes" master_ip="172.17.1.87" nb_master_port="6641" nb_master_protocol="ssl" ovn_nb_db_cacert="/etc/ipa/ca.crt" ovn_nb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_nb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" ovn_sb_db_cacert="/etc/ipa/ca.crt" ovn_sb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_sb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" sb_master_port="6642" sb_master_protocol="ssl"/>
- </rsc_op>
- </action_set>
- <inputs>
- <trigger>
- <pseudo_event id="260" operation="notify" operation_key="ovn-dbs-bundle-master_post_notify_stopped_0"/>
- </trigger>
- </inputs>
- </synapse>
- <synapse id="23">
+ <synapse id="19">
<action_set>
- <rsc_op id="381" operation="notify" operation_key="ovndb_servers_pre_notify_stop_0" internal_operation_key="ovndb_servers:2_pre_notify_stop_0" on_node="ovn-dbs-bundle-2" on_node_uuid="ovn-dbs-bundle-2" router_node="controller-1">
- <primitive id="ovndb_servers" long-id="ovndb_servers:2" class="ocf" provider="ovn" type="ovndb-servers"/>
- <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="notify" CRM_meta_notify="true" CRM_meta_notify_active_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_active_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_all_hosts="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 controller-2 controller-1 messaging-0 rabbitmq-bundle-1 messaging-2 controller-2 controller-0 controller-1" CRM_meta_notify_all_uname="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="ovndb_servers:0" CRM_meta_notify_key_operation="stop" CRM_meta_notify_key_type="pre" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="stop" CRM_meta_notify_promote_resource="ovndb_servers:1" CRM_meta_notify_promote_uname="ovn-dbs-bundle-1" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_slave_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_start_resource="ovndb_servers:0 ovndb_servers:1" CRM_meta_notify_start_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1" CRM_meta_notify_stop_resource="ovndb_servers:1" CRM_meta_notify_stop_uname="ovn-dbs-bundle-1" CRM_meta_notify_type="pre" CRM_meta_notify_unpromoted_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_unpromoted_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_on_node="ovn-dbs-bundle-2" CRM_meta_on_node_uuid="ovn-dbs-bundle-2" CRM_meta_physical_host="controller-1" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" inactive_probe_interval="180000" listen_on_master_ip_only="yes" manage_northd="yes" master_ip="172.17.1.87" nb_master_port="6641" nb_master_protocol="ssl" ovn_nb_db_cacert="/etc/ipa/ca.crt" ovn_nb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_nb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" ovn_sb_db_cacert="/etc/ipa/ca.crt" ovn_sb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_sb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" sb_master_port="6642" sb_master_protocol="ssl"/>
+ <rsc_op id="384" operation="notify" operation_key="ovndb_servers_pre_notify_promote_0" internal_operation_key="ovndb_servers:1_pre_notify_promote_0" on_node="ovn-dbs-bundle-1" on_node_uuid="ovn-dbs-bundle-1" router_node="controller-2">
+ <primitive id="ovndb_servers" long-id="ovndb_servers:1" class="ocf" provider="ovn" type="ovndb-servers"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="notify" CRM_meta_notify="true" CRM_meta_notify_active_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_active_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_all_hosts="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 controller-2 controller-1 messaging-0 rabbitmq-bundle-1 messaging-2 controller-2 controller-0 controller-1" CRM_meta_notify_all_uname="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="ovndb_servers:0" CRM_meta_notify_key_operation="promote" CRM_meta_notify_key_type="pre" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="promote" CRM_meta_notify_promote_resource="ovndb_servers:2" CRM_meta_notify_promote_uname="ovn-dbs-bundle-2" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_slave_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_start_resource="ovndb_servers:0" CRM_meta_notify_start_uname="ovn-dbs-bundle-0" CRM_meta_notify_stop_resource=" " CRM_meta_notify_stop_uname=" " CRM_meta_notify_type="pre" CRM_meta_notify_unpromoted_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_unpromoted_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_on_node="ovn-dbs-bundle-1" CRM_meta_on_node_uuid="ovn-dbs-bundle-1" CRM_meta_physical_host="controller-2" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" inactive_probe_interval="180000" listen_on_master_ip_only="yes" manage_northd="yes" master_ip="172.17.1.87" nb_master_port="6641" nb_master_protocol="ssl" ovn_nb_db_cacert="/etc/ipa/ca.crt" ovn_nb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_nb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" ovn_sb_db_cacert="/etc/ipa/ca.crt" ovn_sb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_sb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" sb_master_port="6642" sb_master_protocol="ssl"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <pseudo_event id="258" operation="notify" operation_key="ovn-dbs-bundle-master_pre_notify_stop_0"/>
+ <pseudo_event id="259" operation="notify" operation_key="ovn-dbs-bundle-master_pre_notify_promote_0"/>
</trigger>
</inputs>
</synapse>
- <synapse id="24" priority="1000000">
+ <synapse id="20" priority="1000000">
<action_set>
- <rsc_op id="379" operation="notify" operation_key="ovndb_servers_post_notify_start_0" internal_operation_key="ovndb_servers:2_post_notify_start_0" on_node="ovn-dbs-bundle-2" on_node_uuid="ovn-dbs-bundle-2" router_node="controller-1">
- <primitive id="ovndb_servers" long-id="ovndb_servers:2" class="ocf" provider="ovn" type="ovndb-servers"/>
- <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="notify" CRM_meta_notify="true" CRM_meta_notify_active_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_active_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_all_hosts="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 controller-2 controller-1 messaging-0 rabbitmq-bundle-1 messaging-2 controller-2 controller-0 controller-1" CRM_meta_notify_all_uname="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="ovndb_servers:0" CRM_meta_notify_key_operation="running" CRM_meta_notify_key_type="post" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="start" CRM_meta_notify_promote_resource="ovndb_servers:1" CRM_meta_notify_promote_uname="ovn-dbs-bundle-1" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_slave_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_start_resource="ovndb_servers:0 ovndb_servers:1" CRM_meta_notify_start_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1" CRM_meta_notify_stop_resource="ovndb_servers:1" CRM_meta_notify_stop_uname="ovn-dbs-bundle-1" CRM_meta_notify_type="post" CRM_meta_notify_unpromoted_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_unpromoted_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_on_node="ovn-dbs-bundle-2" CRM_meta_on_node_uuid="ovn-dbs-bundle-2" CRM_meta_physical_host="controller-1" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" inactive_probe_interval="180000" listen_on_master_ip_only="yes" manage_northd="yes" master_ip="172.17.1.87" nb_master_port="6641" nb_master_protocol="ssl" ovn_nb_db_cacert="/etc/ipa/ca.crt" ovn_nb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_nb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" ovn_sb_db_cacert="/etc/ipa/ca.crt" ovn_sb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_sb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" sb_master_port="6642" sb_master_protocol="ssl"/>
+ <rsc_op id="375" operation="notify" operation_key="ovndb_servers_post_notify_start_0" internal_operation_key="ovndb_servers:1_post_notify_start_0" on_node="ovn-dbs-bundle-1" on_node_uuid="ovn-dbs-bundle-1" router_node="controller-2">
+ <primitive id="ovndb_servers" long-id="ovndb_servers:1" class="ocf" provider="ovn" type="ovndb-servers"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="notify" CRM_meta_notify="true" CRM_meta_notify_active_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_active_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_all_hosts="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 controller-2 controller-1 messaging-0 rabbitmq-bundle-1 messaging-2 controller-2 controller-0 controller-1" CRM_meta_notify_all_uname="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="ovndb_servers:0" CRM_meta_notify_key_operation="running" CRM_meta_notify_key_type="post" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="start" CRM_meta_notify_promote_resource="ovndb_servers:2" CRM_meta_notify_promote_uname="ovn-dbs-bundle-2" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_slave_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_start_resource="ovndb_servers:0" CRM_meta_notify_start_uname="ovn-dbs-bundle-0" CRM_meta_notify_stop_resource=" " CRM_meta_notify_stop_uname=" " CRM_meta_notify_type="post" CRM_meta_notify_unpromoted_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_unpromoted_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_on_node="ovn-dbs-bundle-1" CRM_meta_on_node_uuid="ovn-dbs-bundle-1" CRM_meta_physical_host="controller-2" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" inactive_probe_interval="180000" listen_on_master_ip_only="yes" manage_northd="yes" master_ip="172.17.1.87" nb_master_port="6641" nb_master_protocol="ssl" ovn_nb_db_cacert="/etc/ipa/ca.crt" ovn_nb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_nb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" ovn_sb_db_cacert="/etc/ipa/ca.crt" ovn_sb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_sb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" sb_master_port="6642" sb_master_protocol="ssl"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <pseudo_event id="256" operation="notify" operation_key="ovn-dbs-bundle-master_post_notify_running_0"/>
+ <pseudo_event id="251" operation="notify" operation_key="ovn-dbs-bundle-master_post_notify_running_0"/>
</trigger>
</inputs>
</synapse>
- <synapse id="25">
+ <synapse id="21">
<action_set>
- <rsc_op id="378" operation="notify" operation_key="ovndb_servers_pre_notify_start_0" internal_operation_key="ovndb_servers:2_pre_notify_start_0" on_node="ovn-dbs-bundle-2" on_node_uuid="ovn-dbs-bundle-2" router_node="controller-1">
- <primitive id="ovndb_servers" long-id="ovndb_servers:2" class="ocf" provider="ovn" type="ovndb-servers"/>
- <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="notify" CRM_meta_notify="true" CRM_meta_notify_active_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_active_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_all_hosts="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 controller-2 controller-1 messaging-0 rabbitmq-bundle-1 messaging-2 controller-2 controller-0 controller-1" CRM_meta_notify_all_uname="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="ovndb_servers:0" CRM_meta_notify_key_operation="start" CRM_meta_notify_key_type="pre" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="start" CRM_meta_notify_promote_resource="ovndb_servers:1" CRM_meta_notify_promote_uname="ovn-dbs-bundle-1" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_slave_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_start_resource="ovndb_servers:0 ovndb_servers:1" CRM_meta_notify_start_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1" CRM_meta_notify_stop_resource="ovndb_servers:1" CRM_meta_notify_stop_uname="ovn-dbs-bundle-1" CRM_meta_notify_type="pre" CRM_meta_notify_unpromoted_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_unpromoted_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_on_node="ovn-dbs-bundle-2" CRM_meta_on_node_uuid="ovn-dbs-bundle-2" CRM_meta_physical_host="controller-1" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" inactive_probe_interval="180000" listen_on_master_ip_only="yes" manage_northd="yes" master_ip="172.17.1.87" nb_master_port="6641" nb_master_protocol="ssl" ovn_nb_db_cacert="/etc/ipa/ca.crt" ovn_nb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_nb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" ovn_sb_db_cacert="/etc/ipa/ca.crt" ovn_sb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_sb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" sb_master_port="6642" sb_master_protocol="ssl"/>
+ <rsc_op id="374" operation="notify" operation_key="ovndb_servers_pre_notify_start_0" internal_operation_key="ovndb_servers:1_pre_notify_start_0" on_node="ovn-dbs-bundle-1" on_node_uuid="ovn-dbs-bundle-1" router_node="controller-2">
+ <primitive id="ovndb_servers" long-id="ovndb_servers:1" class="ocf" provider="ovn" type="ovndb-servers"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="notify" CRM_meta_notify="true" CRM_meta_notify_active_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_active_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_all_hosts="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 controller-2 controller-1 messaging-0 rabbitmq-bundle-1 messaging-2 controller-2 controller-0 controller-1" CRM_meta_notify_all_uname="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="compute-0 compute-1 controller-0 controller-1 controller-2 database-0 database-1 database-2 messaging-0 messaging-1 messaging-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="ovndb_servers:0" CRM_meta_notify_key_operation="start" CRM_meta_notify_key_type="pre" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="start" CRM_meta_notify_promote_resource="ovndb_servers:2" CRM_meta_notify_promote_uname="ovn-dbs-bundle-2" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_slave_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_start_resource="ovndb_servers:0" CRM_meta_notify_start_uname="ovn-dbs-bundle-0" CRM_meta_notify_stop_resource=" " CRM_meta_notify_stop_uname=" " CRM_meta_notify_type="pre" CRM_meta_notify_unpromoted_resource="ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_unpromoted_uname="ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_on_node="ovn-dbs-bundle-1" CRM_meta_on_node_uuid="ovn-dbs-bundle-1" CRM_meta_physical_host="controller-2" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" inactive_probe_interval="180000" listen_on_master_ip_only="yes" manage_northd="yes" master_ip="172.17.1.87" nb_master_port="6641" nb_master_protocol="ssl" ovn_nb_db_cacert="/etc/ipa/ca.crt" ovn_nb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_nb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" ovn_sb_db_cacert="/etc/ipa/ca.crt" ovn_sb_db_cert="/etc/pki/tls/certs/ovn_dbs.crt" ovn_sb_db_privkey="/etc/pki/tls/private/ovn_dbs.key" sb_master_port="6642" sb_master_protocol="ssl"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <pseudo_event id="254" operation="notify" operation_key="ovn-dbs-bundle-master_pre_notify_start_0"/>
+ <pseudo_event id="249" operation="notify" operation_key="ovn-dbs-bundle-master_pre_notify_start_0"/>
</trigger>
</inputs>
</synapse>
- <synapse id="26" priority="1000000">
+ <synapse id="22" priority="1000000">
<action_set>
- <pseudo_event id="267" operation="notified" operation_key="ovn-dbs-bundle-master_confirmed-post_notify_promoted_0">
+ <pseudo_event id="262" operation="notified" operation_key="ovn-dbs-bundle-master_confirmed-post_notify_promoted_0">
<attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_key_operation="promoted" CRM_meta_notify_key_type="confirmed-post" CRM_meta_notify_operation="promote" CRM_meta_notify_type="post" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="120000" />
</pseudo_event>
</action_set>
<inputs>
<trigger>
- <pseudo_event id="266" operation="notify" operation_key="ovn-dbs-bundle-master_post_notify_promoted_0"/>
+ <pseudo_event id="261" operation="notify" operation_key="ovn-dbs-bundle-master_post_notify_promoted_0"/>
</trigger>
<trigger>
- <rsc_op id="384" operation="notify" operation_key="ovndb_servers:0_post_notify_promote_0" on_node="ovn-dbs-bundle-0" on_node_uuid="ovn-dbs-bundle-0" router_node="controller-2"/>
+ <rsc_op id="381" operation="notify" operation_key="ovndb_servers_post_notify_promote_0" internal_operation_key="ovndb_servers:2_post_notify_promote_0" on_node="ovn-dbs-bundle-2" on_node_uuid="ovn-dbs-bundle-2" router_node="controller-1"/>
</trigger>
<trigger>
- <rsc_op id="386" operation="notify" operation_key="ovndb_servers_post_notify_promote_0" internal_operation_key="ovndb_servers:1_post_notify_promote_0" on_node="ovn-dbs-bundle-1" on_node_uuid="ovn-dbs-bundle-1" router_node="controller-0"/>
+ <rsc_op id="383" operation="notify" operation_key="ovndb_servers:0_post_notify_promote_0" on_node="ovn-dbs-bundle-0" on_node_uuid="ovn-dbs-bundle-0" router_node="controller-0"/>
</trigger>
<trigger>
- <rsc_op id="388" operation="notify" operation_key="ovndb_servers_post_notify_promote_0" internal_operation_key="ovndb_servers:2_post_notify_promote_0" on_node="ovn-dbs-bundle-2" on_node_uuid="ovn-dbs-bundle-2" router_node="controller-1"/>
+ <rsc_op id="385" operation="notify" operation_key="ovndb_servers_post_notify_promote_0" internal_operation_key="ovndb_servers:1_post_notify_promote_0" on_node="ovn-dbs-bundle-1" on_node_uuid="ovn-dbs-bundle-1" router_node="controller-2"/>
</trigger>
</inputs>
</synapse>
- <synapse id="27" priority="1000000">
+ <synapse id="23" priority="1000000">
<action_set>
- <pseudo_event id="266" operation="notify" operation_key="ovn-dbs-bundle-master_post_notify_promoted_0">
+ <pseudo_event id="261" operation="notify" operation_key="ovn-dbs-bundle-master_post_notify_promoted_0">
<attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_key_operation="promoted" CRM_meta_notify_key_type="post" CRM_meta_notify_operation="promote" CRM_meta_notify_type="post" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="120000" />
</pseudo_event>
</action_set>
<inputs>
<trigger>
- <pseudo_event id="263" operation="promoted" operation_key="ovn-dbs-bundle-master_promoted_0"/>
+ <pseudo_event id="258" operation="promoted" operation_key="ovn-dbs-bundle-master_promoted_0"/>
</trigger>
<trigger>
- <pseudo_event id="265" operation="notified" operation_key="ovn-dbs-bundle-master_confirmed-pre_notify_promote_0"/>
+ <pseudo_event id="260" operation="notified" operation_key="ovn-dbs-bundle-master_confirmed-pre_notify_promote_0"/>
</trigger>
</inputs>
</synapse>
- <synapse id="28">
+ <synapse id="24">
<action_set>
- <pseudo_event id="265" operation="notified" operation_key="ovn-dbs-bundle-master_confirmed-pre_notify_promote_0">
+ <pseudo_event id="260" operation="notified" operation_key="ovn-dbs-bundle-master_confirmed-pre_notify_promote_0">
<attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_key_operation="promote" CRM_meta_notify_key_type="confirmed-pre" CRM_meta_notify_operation="promote" CRM_meta_notify_type="pre" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="120000" />
</pseudo_event>
</action_set>
<inputs>
<trigger>
- <pseudo_event id="264" operation="notify" operation_key="ovn-dbs-bundle-master_pre_notify_promote_0"/>
+ <pseudo_event id="259" operation="notify" operation_key="ovn-dbs-bundle-master_pre_notify_promote_0"/>
</trigger>
<trigger>
- <rsc_op id="383" operation="notify" operation_key="ovndb_servers:0_pre_notify_promote_0" on_node="ovn-dbs-bundle-0" on_node_uuid="ovn-dbs-bundle-0" router_node="controller-2"/>
+ <rsc_op id="380" operation="notify" operation_key="ovndb_servers_pre_notify_promote_0" internal_operation_key="ovndb_servers:2_pre_notify_promote_0" on_node="ovn-dbs-bundle-2" on_node_uuid="ovn-dbs-bundle-2" router_node="controller-1"/>
</trigger>
<trigger>
- <rsc_op id="385" operation="notify" operation_key="ovndb_servers_pre_notify_promote_0" internal_operation_key="ovndb_servers:1_pre_notify_promote_0" on_node="ovn-dbs-bundle-1" on_node_uuid="ovn-dbs-bundle-1" router_node="controller-0"/>
+ <rsc_op id="382" operation="notify" operation_key="ovndb_servers:0_pre_notify_promote_0" on_node="ovn-dbs-bundle-0" on_node_uuid="ovn-dbs-bundle-0" router_node="controller-0"/>
</trigger>
<trigger>
- <rsc_op id="387" operation="notify" operation_key="ovndb_servers_pre_notify_promote_0" internal_operation_key="ovndb_servers:2_pre_notify_promote_0" on_node="ovn-dbs-bundle-2" on_node_uuid="ovn-dbs-bundle-2" router_node="controller-1"/>
+ <rsc_op id="384" operation="notify" operation_key="ovndb_servers_pre_notify_promote_0" internal_operation_key="ovndb_servers:1_pre_notify_promote_0" on_node="ovn-dbs-bundle-1" on_node_uuid="ovn-dbs-bundle-1" router_node="controller-2"/>
</trigger>
</inputs>
</synapse>
- <synapse id="29">
+ <synapse id="25">
<action_set>
- <pseudo_event id="264" operation="notify" operation_key="ovn-dbs-bundle-master_pre_notify_promote_0">
+ <pseudo_event id="259" operation="notify" operation_key="ovn-dbs-bundle-master_pre_notify_promote_0">
<attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_key_operation="promote" CRM_meta_notify_key_type="pre" CRM_meta_notify_operation="promote" CRM_meta_notify_type="pre" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="120000" />
</pseudo_event>
</action_set>
<inputs>
<trigger>
- <pseudo_event id="257" operation="notified" operation_key="ovn-dbs-bundle-master_confirmed-post_notify_running_0"/>
- </trigger>
- <trigger>
- <pseudo_event id="261" operation="notified" operation_key="ovn-dbs-bundle-master_confirmed-post_notify_stopped_0"/>
+ <pseudo_event id="252" operation="notified" operation_key="ovn-dbs-bundle-master_confirmed-post_notify_running_0"/>
</trigger>
</inputs>
</synapse>
- <synapse id="30" priority="1000000">
+ <synapse id="26" priority="1000000">
<action_set>
- <pseudo_event id="263" operation="promoted" operation_key="ovn-dbs-bundle-master_promoted_0">
+ <pseudo_event id="258" operation="promoted" operation_key="ovn-dbs-bundle-master_promoted_0">
<attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="120000" />
</pseudo_event>
</action_set>
<inputs>
<trigger>
- <rsc_op id="244" operation="promote" operation_key="ovndb_servers_promote_0" internal_operation_key="ovndb_servers:1_promote_0" on_node="ovn-dbs-bundle-1" on_node_uuid="ovn-dbs-bundle-1" router_node="controller-0"/>
+ <rsc_op id="239" operation="promote" operation_key="ovndb_servers_promote_0" internal_operation_key="ovndb_servers:2_promote_0" on_node="ovn-dbs-bundle-2" on_node_uuid="ovn-dbs-bundle-2" router_node="controller-1"/>
</trigger>
</inputs>
</synapse>
- <synapse id="31">
+ <synapse id="27">
<action_set>
- <pseudo_event id="262" operation="promote" operation_key="ovn-dbs-bundle-master_promote_0">
+ <pseudo_event id="257" operation="promote" operation_key="ovn-dbs-bundle-master_promote_0">
<attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="120000" />
</pseudo_event>
</action_set>
<inputs>
<trigger>
- <pseudo_event id="251" operation="running" operation_key="ovn-dbs-bundle-master_running_0"/>
- </trigger>
- <trigger>
- <pseudo_event id="253" operation="stopped" operation_key="ovn-dbs-bundle-master_stopped_0"/>
- </trigger>
- <trigger>
- <pseudo_event id="265" operation="notified" operation_key="ovn-dbs-bundle-master_confirmed-pre_notify_promote_0"/>
- </trigger>
- <trigger>
- <pseudo_event id="274" operation="promote" operation_key="ovn-dbs-bundle_promote_0"/>
+ <pseudo_event id="246" operation="running" operation_key="ovn-dbs-bundle-master_running_0"/>
</trigger>
- </inputs>
- </synapse>
- <synapse id="32" priority="1000000">
- <action_set>
- <pseudo_event id="261" operation="notified" operation_key="ovn-dbs-bundle-master_confirmed-post_notify_stopped_0">
- <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_key_operation="stopped" CRM_meta_notify_key_type="confirmed-post" CRM_meta_notify_operation="stop" CRM_meta_notify_type="post" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="120000" />
- </pseudo_event>
- </action_set>
- <inputs>
<trigger>
- <pseudo_event id="260" operation="notify" operation_key="ovn-dbs-bundle-master_post_notify_stopped_0"/>
+ <pseudo_event id="260" operation="notified" operation_key="ovn-dbs-bundle-master_confirmed-pre_notify_promote_0"/>
</trigger>
<trigger>
- <rsc_op id="382" operation="notify" operation_key="ovndb_servers_post_notify_stop_0" internal_operation_key="ovndb_servers:2_post_notify_stop_0" on_node="ovn-dbs-bundle-2" on_node_uuid="ovn-dbs-bundle-2" router_node="controller-1"/>
+ <pseudo_event id="269" operation="promote" operation_key="ovn-dbs-bundle_promote_0"/>
</trigger>
</inputs>
</synapse>
- <synapse id="33" priority="1000000">
+ <synapse id="28" priority="1000000">
<action_set>
- <pseudo_event id="260" operation="notify" operation_key="ovn-dbs-bundle-master_post_notify_stopped_0">
- <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_key_operation="stopped" CRM_meta_notify_key_type="post" CRM_meta_notify_operation="stop" CRM_meta_notify_type="post" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="120000" />
- </pseudo_event>
- </action_set>
- <inputs>
- <trigger>
- <pseudo_event id="253" operation="stopped" operation_key="ovn-dbs-bundle-master_stopped_0"/>
- </trigger>
- <trigger>
- <pseudo_event id="259" operation="notified" operation_key="ovn-dbs-bundle-master_confirmed-pre_notify_stop_0"/>
- </trigger>
- </inputs>
- </synapse>
- <synapse id="34">
- <action_set>
- <pseudo_event id="259" operation="notified" operation_key="ovn-dbs-bundle-master_confirmed-pre_notify_stop_0">
- <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_key_operation="stop" CRM_meta_notify_key_type="confirmed-pre" CRM_meta_notify_operation="stop" CRM_meta_notify_type="pre" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="120000" />
- </pseudo_event>
- </action_set>
- <inputs>
- <trigger>
- <pseudo_event id="258" operation="notify" operation_key="ovn-dbs-bundle-master_pre_notify_stop_0"/>
- </trigger>
- <trigger>
- <rsc_op id="380" operation="notify" operation_key="ovndb_servers_pre_notify_stop_0" internal_operation_key="ovndb_servers:1_pre_notify_stop_0" on_node="ovn-dbs-bundle-1" on_node_uuid="ovn-dbs-bundle-1" router_node="controller-2"/>
- </trigger>
- <trigger>
- <rsc_op id="381" operation="notify" operation_key="ovndb_servers_pre_notify_stop_0" internal_operation_key="ovndb_servers:2_pre_notify_stop_0" on_node="ovn-dbs-bundle-2" on_node_uuid="ovn-dbs-bundle-2" router_node="controller-1"/>
- </trigger>
- </inputs>
- </synapse>
- <synapse id="35">
- <action_set>
- <pseudo_event id="258" operation="notify" operation_key="ovn-dbs-bundle-master_pre_notify_stop_0">
- <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_key_operation="stop" CRM_meta_notify_key_type="pre" CRM_meta_notify_operation="stop" CRM_meta_notify_type="pre" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="120000" />
- </pseudo_event>
- </action_set>
- <inputs/>
- </synapse>
- <synapse id="36" priority="1000000">
- <action_set>
- <pseudo_event id="257" operation="notified" operation_key="ovn-dbs-bundle-master_confirmed-post_notify_running_0">
+ <pseudo_event id="252" operation="notified" operation_key="ovn-dbs-bundle-master_confirmed-post_notify_running_0">
<attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_key_operation="running" CRM_meta_notify_key_type="confirmed-post" CRM_meta_notify_operation="start" CRM_meta_notify_type="post" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="120000" />
</pseudo_event>
</action_set>
<inputs>
<trigger>
- <pseudo_event id="256" operation="notify" operation_key="ovn-dbs-bundle-master_post_notify_running_0"/>
+ <pseudo_event id="251" operation="notify" operation_key="ovn-dbs-bundle-master_post_notify_running_0"/>
</trigger>
<trigger>
- <rsc_op id="376" operation="notify" operation_key="ovndb_servers:0_post_notify_start_0" on_node="ovn-dbs-bundle-0" on_node_uuid="ovn-dbs-bundle-0" router_node="controller-2"/>
+ <rsc_op id="372" operation="notify" operation_key="ovndb_servers_post_notify_start_0" internal_operation_key="ovndb_servers:2_post_notify_start_0" on_node="ovn-dbs-bundle-2" on_node_uuid="ovn-dbs-bundle-2" router_node="controller-1"/>
</trigger>
<trigger>
- <rsc_op id="377" operation="notify" operation_key="ovndb_servers_post_notify_start_0" internal_operation_key="ovndb_servers:1_post_notify_start_0" on_node="ovn-dbs-bundle-1" on_node_uuid="ovn-dbs-bundle-1" router_node="controller-0"/>
+ <rsc_op id="373" operation="notify" operation_key="ovndb_servers:0_post_notify_start_0" on_node="ovn-dbs-bundle-0" on_node_uuid="ovn-dbs-bundle-0" router_node="controller-0"/>
</trigger>
<trigger>
- <rsc_op id="379" operation="notify" operation_key="ovndb_servers_post_notify_start_0" internal_operation_key="ovndb_servers:2_post_notify_start_0" on_node="ovn-dbs-bundle-2" on_node_uuid="ovn-dbs-bundle-2" router_node="controller-1"/>
+ <rsc_op id="375" operation="notify" operation_key="ovndb_servers_post_notify_start_0" internal_operation_key="ovndb_servers:1_post_notify_start_0" on_node="ovn-dbs-bundle-1" on_node_uuid="ovn-dbs-bundle-1" router_node="controller-2"/>
</trigger>
</inputs>
</synapse>
- <synapse id="37" priority="1000000">
+ <synapse id="29" priority="1000000">
<action_set>
- <pseudo_event id="256" operation="notify" operation_key="ovn-dbs-bundle-master_post_notify_running_0">
+ <pseudo_event id="251" operation="notify" operation_key="ovn-dbs-bundle-master_post_notify_running_0">
<attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_key_operation="running" CRM_meta_notify_key_type="post" CRM_meta_notify_operation="start" CRM_meta_notify_type="post" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="120000" />
</pseudo_event>
</action_set>
<inputs>
<trigger>
- <pseudo_event id="251" operation="running" operation_key="ovn-dbs-bundle-master_running_0"/>
+ <pseudo_event id="246" operation="running" operation_key="ovn-dbs-bundle-master_running_0"/>
</trigger>
<trigger>
- <pseudo_event id="255" operation="notified" operation_key="ovn-dbs-bundle-master_confirmed-pre_notify_start_0"/>
+ <pseudo_event id="250" operation="notified" operation_key="ovn-dbs-bundle-master_confirmed-pre_notify_start_0"/>
</trigger>
</inputs>
</synapse>
- <synapse id="38">
+ <synapse id="30">
<action_set>
- <pseudo_event id="255" operation="notified" operation_key="ovn-dbs-bundle-master_confirmed-pre_notify_start_0">
+ <pseudo_event id="250" operation="notified" operation_key="ovn-dbs-bundle-master_confirmed-pre_notify_start_0">
<attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_key_operation="start" CRM_meta_notify_key_type="confirmed-pre" CRM_meta_notify_operation="start" CRM_meta_notify_type="pre" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="120000" />
</pseudo_event>
</action_set>
<inputs>
<trigger>
- <pseudo_event id="254" operation="notify" operation_key="ovn-dbs-bundle-master_pre_notify_start_0"/>
+ <pseudo_event id="249" operation="notify" operation_key="ovn-dbs-bundle-master_pre_notify_start_0"/>
</trigger>
<trigger>
- <rsc_op id="378" operation="notify" operation_key="ovndb_servers_pre_notify_start_0" internal_operation_key="ovndb_servers:2_pre_notify_start_0" on_node="ovn-dbs-bundle-2" on_node_uuid="ovn-dbs-bundle-2" router_node="controller-1"/>
+ <rsc_op id="371" operation="notify" operation_key="ovndb_servers_pre_notify_start_0" internal_operation_key="ovndb_servers:2_pre_notify_start_0" on_node="ovn-dbs-bundle-2" on_node_uuid="ovn-dbs-bundle-2" router_node="controller-1"/>
</trigger>
- </inputs>
- </synapse>
- <synapse id="39">
- <action_set>
- <pseudo_event id="254" operation="notify" operation_key="ovn-dbs-bundle-master_pre_notify_start_0">
- <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_key_operation="start" CRM_meta_notify_key_type="pre" CRM_meta_notify_operation="start" CRM_meta_notify_type="pre" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="120000" />
- </pseudo_event>
- </action_set>
- <inputs>
<trigger>
- <pseudo_event id="261" operation="notified" operation_key="ovn-dbs-bundle-master_confirmed-post_notify_stopped_0"/>
+ <rsc_op id="374" operation="notify" operation_key="ovndb_servers_pre_notify_start_0" internal_operation_key="ovndb_servers:1_pre_notify_start_0" on_node="ovn-dbs-bundle-1" on_node_uuid="ovn-dbs-bundle-1" router_node="controller-2"/>
</trigger>
</inputs>
</synapse>
- <synapse id="40" priority="1000000">
- <action_set>
- <pseudo_event id="253" operation="stopped" operation_key="ovn-dbs-bundle-master_stopped_0">
- <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="120000" />
- </pseudo_event>
- </action_set>
- <inputs>
- <trigger>
- <rsc_op id="243" operation="stop" operation_key="ovndb_servers_stop_0" internal_operation_key="ovndb_servers:1_stop_0" on_node="ovn-dbs-bundle-1" on_node_uuid="ovn-dbs-bundle-1" router_node="controller-2"/>
- </trigger>
- <trigger>
- <pseudo_event id="252" operation="stop" operation_key="ovn-dbs-bundle-master_stop_0"/>
- </trigger>
- </inputs>
- </synapse>
- <synapse id="41">
+ <synapse id="31">
<action_set>
- <pseudo_event id="252" operation="stop" operation_key="ovn-dbs-bundle-master_stop_0">
- <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="120000" />
+ <pseudo_event id="249" operation="notify" operation_key="ovn-dbs-bundle-master_pre_notify_start_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_key_operation="start" CRM_meta_notify_key_type="pre" CRM_meta_notify_operation="start" CRM_meta_notify_type="pre" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="120000" />
</pseudo_event>
</action_set>
- <inputs>
- <trigger>
- <pseudo_event id="240" operation="stop" operation_key="ovn-dbs-bundle_stop_0"/>
- </trigger>
- <trigger>
- <pseudo_event id="259" operation="notified" operation_key="ovn-dbs-bundle-master_confirmed-pre_notify_stop_0"/>
- </trigger>
- </inputs>
+ <inputs/>
</synapse>
- <synapse id="42" priority="1000000">
+ <synapse id="32" priority="1000000">
<action_set>
- <pseudo_event id="251" operation="running" operation_key="ovn-dbs-bundle-master_running_0">
+ <pseudo_event id="246" operation="running" operation_key="ovn-dbs-bundle-master_running_0">
<attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="120000" />
</pseudo_event>
</action_set>
<inputs>
<trigger>
- <rsc_op id="242" operation="start" operation_key="ovndb_servers_start_0" internal_operation_key="ovndb_servers:1_start_0" on_node="ovn-dbs-bundle-1" on_node_uuid="ovn-dbs-bundle-1" router_node="controller-0"/>
+ <rsc_op id="241" operation="start" operation_key="ovndb_servers:0_start_0" on_node="ovn-dbs-bundle-0" on_node_uuid="ovn-dbs-bundle-0" router_node="controller-0"/>
</trigger>
<trigger>
- <rsc_op id="248" operation="start" operation_key="ovndb_servers:0_start_0" on_node="ovn-dbs-bundle-0" on_node_uuid="ovn-dbs-bundle-0" router_node="controller-2"/>
- </trigger>
- <trigger>
- <pseudo_event id="250" operation="start" operation_key="ovn-dbs-bundle-master_start_0"/>
+ <pseudo_event id="245" operation="start" operation_key="ovn-dbs-bundle-master_start_0"/>
</trigger>
</inputs>
</synapse>
- <synapse id="43">
+ <synapse id="33">
<action_set>
- <pseudo_event id="250" operation="start" operation_key="ovn-dbs-bundle-master_start_0">
+ <pseudo_event id="245" operation="start" operation_key="ovn-dbs-bundle-master_start_0">
<attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="120000" />
</pseudo_event>
</action_set>
<inputs>
<trigger>
- <pseudo_event id="238" operation="start" operation_key="ovn-dbs-bundle_start_0"/>
+ <pseudo_event id="233" operation="start" operation_key="ovn-dbs-bundle_start_0"/>
</trigger>
<trigger>
- <pseudo_event id="253" operation="stopped" operation_key="ovn-dbs-bundle-master_stopped_0"/>
- </trigger>
- <trigger>
- <pseudo_event id="255" operation="notified" operation_key="ovn-dbs-bundle-master_confirmed-pre_notify_start_0"/>
+ <pseudo_event id="250" operation="notified" operation_key="ovn-dbs-bundle-master_confirmed-pre_notify_start_0"/>
</trigger>
</inputs>
</synapse>
- <synapse id="44">
+ <synapse id="34">
<action_set>
- <rsc_op id="226" operation="monitor" operation_key="ovn-dbs-bundle-podman-0_monitor_60000" on_node="controller-2" on_node_uuid="3">
+ <rsc_op id="222" operation="monitor" operation_key="ovn-dbs-bundle-podman-0_monitor_60000" on_node="controller-0" on_node_uuid="1">
<primitive id="ovn-dbs-bundle-podman-0" class="ocf" provider="heartbeat" type="podman"/>
- <attributes CRM_meta_interval="60000" CRM_meta_name="monitor" CRM_meta_on_node="controller-2" CRM_meta_on_node_uuid="3" CRM_meta_timeout="120000" allow_pull="true" force_kill="false" image="cluster.common.tag/rhosp16-openstack-ovn-northd:pcmklatest" monitor_cmd="/bin/true" mount_points="/var/log/pacemaker/bundles/ovn-dbs-bundle-0" reuse="false" run_cmd="/bin/bash /usr/local/bin/kolla_start" run_opts=" -e PCMK_stderr=1 --net=host -e PCMK_remote_port=3125 -v /var/lib/kolla/config_files/ovn_dbs.json:/var/lib/kolla/config_files/config.json:ro -v /lib/modules:/lib/modules:ro -v /var/lib/openvswitch/ovn:/run/openvswitch:rw -v /var/lib/openvswitch/ovn:/run/ovn:rw -v /var/log/containers/openvswitch:/var/log/openvswitch:rw -v /var/log/containers/openvswitch:/var/log/ovn:rw -v /var/lib/openvswitch/ovn:/etc/openvswitch:rw -v /var/lib/openvswitch/ovn:/etc/ovn:rw -v /etc/pki/tls/private/ovn_dbs.key:/etc/pki/tls/private/ovn_dbs.key:ro -v /etc/pki/tls/certs/ovn_dbs.crt:/etc/pki/tls/certs/ovn_dbs.crt:ro -v /etc/ipa/ca.crt:/etc/ipa/ca.crt:ro -v /etc/pacemaker/authkey:/etc/pacemaker/authkey -v /var/log/pacemaker/bundles/ovn-dbs-bundle-0:/var/log --log-driver=k8s-file --log-opt path=/var/log/containers/stdouts/ovn-dbs-bundle.log -e KOLLA_CONFIG_STRATEGY=COPY_ALWAYS "/>
+ <attributes CRM_meta_interval="60000" CRM_meta_name="monitor" CRM_meta_on_node="controller-0" CRM_meta_on_node_uuid="1" CRM_meta_timeout="120000" allow_pull="true" force_kill="false" image="cluster.common.tag/rhosp16-openstack-ovn-northd:pcmklatest" monitor_cmd="/bin/true" mount_points="/var/log/pacemaker/bundles/ovn-dbs-bundle-0" reuse="false" run_cmd="/bin/bash /usr/local/bin/kolla_start" run_opts=" -e PCMK_stderr=1 --net=host -e PCMK_remote_port=3125 -v /var/lib/kolla/config_files/ovn_dbs.json:/var/lib/kolla/config_files/config.json:ro -v /lib/modules:/lib/modules:ro -v /var/lib/openvswitch/ovn:/run/openvswitch:rw -v /var/lib/openvswitch/ovn:/run/ovn:rw -v /var/log/containers/openvswitch:/var/log/openvswitch:rw -v /var/log/containers/openvswitch:/var/log/ovn:rw -v /var/lib/openvswitch/ovn:/etc/openvswitch:rw -v /var/lib/openvswitch/ovn:/etc/ovn:rw -v /etc/pki/tls/private/ovn_dbs.key:/etc/pki/tls/private/ovn_dbs.key:ro -v /etc/pki/tls/certs/ovn_dbs.crt:/etc/pki/tls/certs/ovn_dbs.crt:ro -v /etc/ipa/ca.crt:/etc/ipa/ca.crt:ro -v /etc/pacemaker/authkey:/etc/pacemaker/authkey -v /var/log/pacemaker/bundles/ovn-dbs-bundle-0:/var/log --log-driver=k8s-file --log-opt path=/var/log/containers/stdouts/ovn-dbs-bundle.log -e KOLLA_CONFIG_STRATEGY=COPY_ALWAYS "/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="225" operation="start" operation_key="ovn-dbs-bundle-podman-0_start_0" on_node="controller-2" on_node_uuid="3"/>
+ <rsc_op id="221" operation="start" operation_key="ovn-dbs-bundle-podman-0_start_0" on_node="controller-0" on_node_uuid="1"/>
</trigger>
</inputs>
</synapse>
- <synapse id="45">
+ <synapse id="35">
<action_set>
- <rsc_op id="225" operation="start" operation_key="ovn-dbs-bundle-podman-0_start_0" on_node="controller-2" on_node_uuid="3">
+ <rsc_op id="221" operation="start" operation_key="ovn-dbs-bundle-podman-0_start_0" on_node="controller-0" on_node_uuid="1">
<primitive id="ovn-dbs-bundle-podman-0" class="ocf" provider="heartbeat" type="podman"/>
- <attributes CRM_meta_on_node="controller-2" CRM_meta_on_node_uuid="3" CRM_meta_timeout="120000" allow_pull="true" force_kill="false" image="cluster.common.tag/rhosp16-openstack-ovn-northd:pcmklatest" monitor_cmd="/bin/true" mount_points="/var/log/pacemaker/bundles/ovn-dbs-bundle-0" reuse="false" run_cmd="/bin/bash /usr/local/bin/kolla_start" run_opts=" -e PCMK_stderr=1 --net=host -e PCMK_remote_port=3125 -v /var/lib/kolla/config_files/ovn_dbs.json:/var/lib/kolla/config_files/config.json:ro -v /lib/modules:/lib/modules:ro -v /var/lib/openvswitch/ovn:/run/openvswitch:rw -v /var/lib/openvswitch/ovn:/run/ovn:rw -v /var/log/containers/openvswitch:/var/log/openvswitch:rw -v /var/log/containers/openvswitch:/var/log/ovn:rw -v /var/lib/openvswitch/ovn:/etc/openvswitch:rw -v /var/lib/openvswitch/ovn:/etc/ovn:rw -v /etc/pki/tls/private/ovn_dbs.key:/etc/pki/tls/private/ovn_dbs.key:ro -v /etc/pki/tls/certs/ovn_dbs.crt:/etc/pki/tls/certs/ovn_dbs.crt:ro -v /etc/ipa/ca.crt:/etc/ipa/ca.crt:ro -v /etc/pacemaker/authkey:/etc/pacemaker/authkey -v /var/log/pacemaker/bundles/ovn-dbs-bundle-0:/var/log --log-driver=k8s-file --log-opt path=/var/log/containers/stdouts/ovn-dbs-bundle.log -e KOLLA_CONFIG_STRATEGY=COPY_ALWAYS "/>
+ <attributes CRM_meta_on_node="controller-0" CRM_meta_on_node_uuid="1" CRM_meta_timeout="120000" allow_pull="true" force_kill="false" image="cluster.common.tag/rhosp16-openstack-ovn-northd:pcmklatest" monitor_cmd="/bin/true" mount_points="/var/log/pacemaker/bundles/ovn-dbs-bundle-0" reuse="false" run_cmd="/bin/bash /usr/local/bin/kolla_start" run_opts=" -e PCMK_stderr=1 --net=host -e PCMK_remote_port=3125 -v /var/lib/kolla/config_files/ovn_dbs.json:/var/lib/kolla/config_files/config.json:ro -v /lib/modules:/lib/modules:ro -v /var/lib/openvswitch/ovn:/run/openvswitch:rw -v /var/lib/openvswitch/ovn:/run/ovn:rw -v /var/log/containers/openvswitch:/var/log/openvswitch:rw -v /var/log/containers/openvswitch:/var/log/ovn:rw -v /var/lib/openvswitch/ovn:/etc/openvswitch:rw -v /var/lib/openvswitch/ovn:/etc/ovn:rw -v /etc/pki/tls/private/ovn_dbs.key:/etc/pki/tls/private/ovn_dbs.key:ro -v /etc/pki/tls/certs/ovn_dbs.crt:/etc/pki/tls/certs/ovn_dbs.crt:ro -v /etc/ipa/ca.crt:/etc/ipa/ca.crt:ro -v /etc/pacemaker/authkey:/etc/pacemaker/authkey -v /var/log/pacemaker/bundles/ovn-dbs-bundle-0:/var/log --log-driver=k8s-file --log-opt path=/var/log/containers/stdouts/ovn-dbs-bundle.log -e KOLLA_CONFIG_STRATEGY=COPY_ALWAYS "/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <pseudo_event id="238" operation="start" operation_key="ovn-dbs-bundle_start_0"/>
+ <pseudo_event id="233" operation="start" operation_key="ovn-dbs-bundle_start_0"/>
</trigger>
</inputs>
</synapse>
- <synapse id="46">
+ <synapse id="36">
<action_set>
- <rsc_op id="228" operation="monitor" operation_key="ovn-dbs-bundle-0_monitor_30000" on_node="controller-2" on_node_uuid="3">
+ <rsc_op id="224" operation="monitor" operation_key="ovn-dbs-bundle-0_monitor_30000" on_node="controller-0" on_node_uuid="1">
<primitive id="ovn-dbs-bundle-0" class="ocf" provider="pacemaker" type="remote"/>
- <attributes CRM_meta_container="ovn-dbs-bundle-podman-0" CRM_meta_interval="30000" CRM_meta_name="monitor" CRM_meta_on_node="controller-2" CRM_meta_on_node_uuid="3" CRM_meta_timeout="30000" addr="controller-2" port="3125"/>
+ <attributes CRM_meta_container="ovn-dbs-bundle-podman-0" CRM_meta_interval="30000" CRM_meta_name="monitor" CRM_meta_on_node="controller-0" CRM_meta_on_node_uuid="1" CRM_meta_timeout="30000" addr="controller-0" port="3125"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="227" operation="start" operation_key="ovn-dbs-bundle-0_start_0" on_node="controller-2" on_node_uuid="3"/>
+ <rsc_op id="223" operation="start" operation_key="ovn-dbs-bundle-0_start_0" on_node="controller-0" on_node_uuid="1"/>
</trigger>
</inputs>
</synapse>
- <synapse id="47">
+ <synapse id="37">
<action_set>
- <rsc_op id="227" operation="start" operation_key="ovn-dbs-bundle-0_start_0" on_node="controller-2" on_node_uuid="3">
+ <rsc_op id="223" operation="start" operation_key="ovn-dbs-bundle-0_start_0" on_node="controller-0" on_node_uuid="1">
<primitive id="ovn-dbs-bundle-0" class="ocf" provider="pacemaker" type="remote"/>
- <attributes CRM_meta_container="ovn-dbs-bundle-podman-0" CRM_meta_on_node="controller-2" CRM_meta_on_node_uuid="3" CRM_meta_timeout="120000" addr="controller-2" port="3125"/>
- </rsc_op>
- </action_set>
- <inputs>
- <trigger>
- <crm_event id="73" operation="clear_failcount" operation_key="ovn-dbs-bundle-0_clear_failcount_0" on_node="controller-0" on_node_uuid="1"/>
- </trigger>
- <trigger>
- <rsc_op id="225" operation="start" operation_key="ovn-dbs-bundle-podman-0_start_0" on_node="controller-2" on_node_uuid="3"/>
- </trigger>
- </inputs>
- </synapse>
- <synapse id="48">
- <action_set>
- <crm_event id="73" operation="clear_failcount" operation_key="ovn-dbs-bundle-0_clear_failcount_0" on_node="controller-0" on_node_uuid="1">
- <primitive id="ovn-dbs-bundle-0" class="ocf" provider="pacemaker" type="remote"/>
- <attributes CRM_meta_container="ovn-dbs-bundle-podman-0" CRM_meta_on_node="controller-0" CRM_meta_on_node_uuid="1" CRM_meta_op_no_wait="true" CRM_meta_timeout="120000" addr="controller-2" port="3125"/>
- </crm_event>
- </action_set>
- <inputs/>
- </synapse>
- <synapse id="49">
- <action_set>
- <rsc_op id="231" operation="monitor" operation_key="ovn-dbs-bundle-podman-1_monitor_60000" on_node="controller-0" on_node_uuid="1">
- <primitive id="ovn-dbs-bundle-podman-1" class="ocf" provider="heartbeat" type="podman"/>
- <attributes CRM_meta_interval="60000" CRM_meta_name="monitor" CRM_meta_on_node="controller-0" CRM_meta_on_node_uuid="1" CRM_meta_timeout="120000" allow_pull="true" force_kill="false" image="cluster.common.tag/rhosp16-openstack-ovn-northd:pcmklatest" monitor_cmd="/bin/true" mount_points="/var/log/pacemaker/bundles/ovn-dbs-bundle-1" reuse="false" run_cmd="/bin/bash /usr/local/bin/kolla_start" run_opts=" -e PCMK_stderr=1 --net=host -e PCMK_remote_port=3125 -v /var/lib/kolla/config_files/ovn_dbs.json:/var/lib/kolla/config_files/config.json:ro -v /lib/modules:/lib/modules:ro -v /var/lib/openvswitch/ovn:/run/openvswitch:rw -v /var/lib/openvswitch/ovn:/run/ovn:rw -v /var/log/containers/openvswitch:/var/log/openvswitch:rw -v /var/log/containers/openvswitch:/var/log/ovn:rw -v /var/lib/openvswitch/ovn:/etc/openvswitch:rw -v /var/lib/openvswitch/ovn:/etc/ovn:rw -v /etc/pki/tls/private/ovn_dbs.key:/etc/pki/tls/private/ovn_dbs.key:ro -v /etc/pki/tls/certs/ovn_dbs.crt:/etc/pki/tls/certs/ovn_dbs.crt:ro -v /etc/ipa/ca.crt:/etc/ipa/ca.crt:ro -v /etc/pacemaker/authkey:/etc/pacemaker/authkey -v /var/log/pacemaker/bundles/ovn-dbs-bundle-1:/var/log --log-driver=k8s-file --log-opt path=/var/log/containers/stdouts/ovn-dbs-bundle.log -e KOLLA_CONFIG_STRATEGY=COPY_ALWAYS "/>
- </rsc_op>
- </action_set>
- <inputs>
- <trigger>
- <rsc_op id="230" operation="start" operation_key="ovn-dbs-bundle-podman-1_start_0" on_node="controller-0" on_node_uuid="1"/>
- </trigger>
- </inputs>
- </synapse>
- <synapse id="50">
- <action_set>
- <rsc_op id="230" operation="start" operation_key="ovn-dbs-bundle-podman-1_start_0" on_node="controller-0" on_node_uuid="1">
- <primitive id="ovn-dbs-bundle-podman-1" class="ocf" provider="heartbeat" type="podman"/>
- <attributes CRM_meta_on_node="controller-0" CRM_meta_on_node_uuid="1" CRM_meta_timeout="120000" allow_pull="true" force_kill="false" image="cluster.common.tag/rhosp16-openstack-ovn-northd:pcmklatest" monitor_cmd="/bin/true" mount_points="/var/log/pacemaker/bundles/ovn-dbs-bundle-1" reuse="false" run_cmd="/bin/bash /usr/local/bin/kolla_start" run_opts=" -e PCMK_stderr=1 --net=host -e PCMK_remote_port=3125 -v /var/lib/kolla/config_files/ovn_dbs.json:/var/lib/kolla/config_files/config.json:ro -v /lib/modules:/lib/modules:ro -v /var/lib/openvswitch/ovn:/run/openvswitch:rw -v /var/lib/openvswitch/ovn:/run/ovn:rw -v /var/log/containers/openvswitch:/var/log/openvswitch:rw -v /var/log/containers/openvswitch:/var/log/ovn:rw -v /var/lib/openvswitch/ovn:/etc/openvswitch:rw -v /var/lib/openvswitch/ovn:/etc/ovn:rw -v /etc/pki/tls/private/ovn_dbs.key:/etc/pki/tls/private/ovn_dbs.key:ro -v /etc/pki/tls/certs/ovn_dbs.crt:/etc/pki/tls/certs/ovn_dbs.crt:ro -v /etc/ipa/ca.crt:/etc/ipa/ca.crt:ro -v /etc/pacemaker/authkey:/etc/pacemaker/authkey -v /var/log/pacemaker/bundles/ovn-dbs-bundle-1:/var/log --log-driver=k8s-file --log-opt path=/var/log/containers/stdouts/ovn-dbs-bundle.log -e KOLLA_CONFIG_STRATEGY=COPY_ALWAYS "/>
- </rsc_op>
- </action_set>
- <inputs>
- <trigger>
- <rsc_op id="229" operation="stop" operation_key="ovn-dbs-bundle-podman-1_stop_0" on_node="controller-2" on_node_uuid="3"/>
- </trigger>
- <trigger>
- <pseudo_event id="238" operation="start" operation_key="ovn-dbs-bundle_start_0"/>
- </trigger>
- </inputs>
- </synapse>
- <synapse id="51">
- <action_set>
- <rsc_op id="229" operation="stop" operation_key="ovn-dbs-bundle-podman-1_stop_0" on_node="controller-2" on_node_uuid="3">
- <primitive id="ovn-dbs-bundle-podman-1" class="ocf" provider="heartbeat" type="podman"/>
- <attributes CRM_meta_on_node="controller-2" CRM_meta_on_node_uuid="3" CRM_meta_timeout="120000" allow_pull="true" force_kill="false" image="cluster.common.tag/rhosp16-openstack-ovn-northd:pcmklatest" monitor_cmd="/bin/true" mount_points="/var/log/pacemaker/bundles/ovn-dbs-bundle-1" reuse="false" run_cmd="/bin/bash /usr/local/bin/kolla_start" run_opts=" -e PCMK_stderr=1 --net=host -e PCMK_remote_port=3125 -v /var/lib/kolla/config_files/ovn_dbs.json:/var/lib/kolla/config_files/config.json:ro -v /lib/modules:/lib/modules:ro -v /var/lib/openvswitch/ovn:/run/openvswitch:rw -v /var/lib/openvswitch/ovn:/run/ovn:rw -v /var/log/containers/openvswitch:/var/log/openvswitch:rw -v /var/log/containers/openvswitch:/var/log/ovn:rw -v /var/lib/openvswitch/ovn:/etc/openvswitch:rw -v /var/lib/openvswitch/ovn:/etc/ovn:rw -v /etc/pki/tls/private/ovn_dbs.key:/etc/pki/tls/private/ovn_dbs.key:ro -v /etc/pki/tls/certs/ovn_dbs.crt:/etc/pki/tls/certs/ovn_dbs.crt:ro -v /etc/ipa/ca.crt:/etc/ipa/ca.crt:ro -v /etc/pacemaker/authkey:/etc/pacemaker/authkey -v /var/log/pacemaker/bundles/ovn-dbs-bundle-1:/var/log --log-driver=k8s-file --log-opt path=/var/log/containers/stdouts/ovn-dbs-bundle.log -e KOLLA_CONFIG_STRATEGY=COPY_ALWAYS "/>
- </rsc_op>
- </action_set>
- <inputs>
- <trigger>
- <rsc_op id="232" operation="stop" operation_key="ovn-dbs-bundle-1_stop_0" on_node="controller-2" on_node_uuid="3"/>
- </trigger>
- <trigger>
- <pseudo_event id="240" operation="stop" operation_key="ovn-dbs-bundle_stop_0"/>
- </trigger>
- </inputs>
- </synapse>
- <synapse id="52">
- <action_set>
- <rsc_op id="233" operation="monitor" operation_key="ovn-dbs-bundle-1_monitor_30000" on_node="controller-0" on_node_uuid="1">
- <primitive id="ovn-dbs-bundle-1" class="ocf" provider="pacemaker" type="remote"/>
- <attributes CRM_meta_container="ovn-dbs-bundle-podman-1" CRM_meta_interval="30000" CRM_meta_name="monitor" CRM_meta_on_node="controller-0" CRM_meta_on_node_uuid="1" CRM_meta_timeout="30000" addr="controller-0" port="3125"/>
- </rsc_op>
- </action_set>
- <inputs>
- <trigger>
- <rsc_op id="72" operation="start" operation_key="ovn-dbs-bundle-1_start_0" on_node="controller-0" on_node_uuid="1"/>
- </trigger>
- </inputs>
- </synapse>
- <synapse id="53">
- <action_set>
- <rsc_op id="232" operation="stop" operation_key="ovn-dbs-bundle-1_stop_0" on_node="controller-2" on_node_uuid="3">
- <primitive id="ovn-dbs-bundle-1" class="ocf" provider="pacemaker" type="remote"/>
- <attributes CRM_meta_container="ovn-dbs-bundle-podman-1" CRM_meta_on_node="controller-2" CRM_meta_on_node_uuid="3" CRM_meta_timeout="120000" addr="controller-0" port="3125"/>
- <downed>
- <node id="ovn-dbs-bundle-1"/>
- </downed>
- </rsc_op>
- </action_set>
- <inputs>
- <trigger>
- <rsc_op id="243" operation="stop" operation_key="ovndb_servers_stop_0" internal_operation_key="ovndb_servers:1_stop_0" on_node="ovn-dbs-bundle-1" on_node_uuid="ovn-dbs-bundle-1" router_node="controller-2"/>
- </trigger>
- </inputs>
- </synapse>
- <synapse id="54">
- <action_set>
- <crm_event id="74" operation="clear_failcount" operation_key="ovn-dbs-bundle-1_clear_failcount_0" on_node="controller-2" on_node_uuid="3">
- <primitive id="ovn-dbs-bundle-1" class="ocf" provider="pacemaker" type="remote"/>
- <attributes CRM_meta_container="ovn-dbs-bundle-podman-1" CRM_meta_on_node="controller-2" CRM_meta_on_node_uuid="3" CRM_meta_op_no_wait="true" CRM_meta_timeout="120000" addr="controller-0" port="3125"/>
- </crm_event>
- </action_set>
- <inputs/>
- </synapse>
- <synapse id="55">
- <action_set>
- <rsc_op id="72" operation="start" operation_key="ovn-dbs-bundle-1_start_0" on_node="controller-0" on_node_uuid="1">
- <primitive id="ovn-dbs-bundle-1" class="ocf" provider="pacemaker" type="remote"/>
- <attributes CRM_meta_container="ovn-dbs-bundle-podman-1" CRM_meta_on_node="controller-0" CRM_meta_on_node_uuid="1" CRM_meta_timeout="120000" addr="controller-0" port="3125"/>
+ <attributes CRM_meta_container="ovn-dbs-bundle-podman-0" CRM_meta_on_node="controller-0" CRM_meta_on_node_uuid="1" CRM_meta_timeout="120000" addr="controller-0" port="3125"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <crm_event id="74" operation="clear_failcount" operation_key="ovn-dbs-bundle-1_clear_failcount_0" on_node="controller-2" on_node_uuid="3"/>
- </trigger>
- <trigger>
- <rsc_op id="230" operation="start" operation_key="ovn-dbs-bundle-podman-1_start_0" on_node="controller-0" on_node_uuid="1"/>
- </trigger>
- <trigger>
- <rsc_op id="232" operation="stop" operation_key="ovn-dbs-bundle-1_stop_0" on_node="controller-2" on_node_uuid="3"/>
+ <rsc_op id="221" operation="start" operation_key="ovn-dbs-bundle-podman-0_start_0" on_node="controller-0" on_node_uuid="1"/>
</trigger>
</inputs>
</synapse>
- <synapse id="56">
+ <synapse id="38">
<action_set>
- <rsc_op id="279" operation="monitor" operation_key="ip-172.17.1.87_monitor_10000" on_node="controller-0" on_node_uuid="1">
+ <rsc_op id="274" operation="monitor" operation_key="ip-172.17.1.87_monitor_10000" on_node="controller-1" on_node_uuid="2">
<primitive id="ip-172.17.1.87" class="ocf" provider="heartbeat" type="IPaddr2"/>
- <attributes CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_on_node="controller-0" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" cidr_netmask="32" ip="172.17.1.87"/>
+ <attributes CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_on_node="controller-1" CRM_meta_on_node_uuid="2" CRM_meta_timeout="20000" cidr_netmask="32" ip="172.17.1.87"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="278" operation="start" operation_key="ip-172.17.1.87_start_0" on_node="controller-0" on_node_uuid="1"/>
+ <rsc_op id="273" operation="start" operation_key="ip-172.17.1.87_start_0" on_node="controller-1" on_node_uuid="2"/>
</trigger>
</inputs>
</synapse>
- <synapse id="57">
+ <synapse id="39">
<action_set>
- <rsc_op id="278" operation="start" operation_key="ip-172.17.1.87_start_0" on_node="controller-0" on_node_uuid="1">
+ <rsc_op id="273" operation="start" operation_key="ip-172.17.1.87_start_0" on_node="controller-1" on_node_uuid="2">
<primitive id="ip-172.17.1.87" class="ocf" provider="heartbeat" type="IPaddr2"/>
- <attributes CRM_meta_name="start" CRM_meta_on_node="controller-0" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" cidr_netmask="32" ip="172.17.1.87"/>
+ <attributes CRM_meta_name="start" CRM_meta_on_node="controller-1" CRM_meta_on_node_uuid="2" CRM_meta_timeout="20000" cidr_netmask="32" ip="172.17.1.87"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <pseudo_event id="274" operation="promote" operation_key="ovn-dbs-bundle_promote_0"/>
+ <pseudo_event id="269" operation="promote" operation_key="ovn-dbs-bundle_promote_0"/>
</trigger>
</inputs>
</synapse>
- <synapse id="58">
+ <synapse id="40">
<action_set>
<crm_event id="14" operation="clear_failcount" operation_key="stonith-fence_compute-fence-nova_clear_failcount_0" on_node="messaging-0" on_node_uuid="7">
<primitive id="stonith-fence_compute-fence-nova" class="stonith" type="fence_compute"/>
@@ -874,7 +579,7 @@
</action_set>
<inputs/>
</synapse>
- <synapse id="59">
+ <synapse id="41">
<action_set>
<crm_event id="16" operation="clear_failcount" operation_key="nova-evacuate_clear_failcount_0" on_node="messaging-0" on_node_uuid="7">
<primitive id="nova-evacuate" class="ocf" provider="openstack" type="NovaEvacuate"/>
@@ -883,7 +588,7 @@
</action_set>
<inputs/>
</synapse>
- <synapse id="60">
+ <synapse id="42">
<action_set>
<crm_event id="8" operation="clear_failcount" operation_key="stonith-fence_ipmilan-525400aa1373_clear_failcount_0" on_node="database-0" on_node_uuid="4">
<primitive id="stonith-fence_ipmilan-525400aa1373" class="stonith" type="fence_ipmilan"/>
@@ -892,7 +597,7 @@
</action_set>
<inputs/>
</synapse>
- <synapse id="61">
+ <synapse id="43">
<action_set>
<crm_event id="52" operation="clear_failcount" operation_key="stonith-fence_ipmilan-525400dc23e0_clear_failcount_0" on_node="database-2" on_node_uuid="6">
<primitive id="stonith-fence_ipmilan-525400dc23e0" class="stonith" type="fence_ipmilan"/>
@@ -901,42 +606,42 @@
</action_set>
<inputs/>
</synapse>
- <synapse id="62">
+ <synapse id="44">
<action_set>
- <rsc_op id="302" operation="monitor" operation_key="stonith-fence_ipmilan-52540040bb56_monitor_60000" on_node="database-0" on_node_uuid="4">
+ <rsc_op id="297" operation="monitor" operation_key="stonith-fence_ipmilan-52540040bb56_monitor_60000" on_node="database-0" on_node_uuid="4">
<primitive id="stonith-fence_ipmilan-52540040bb56" class="stonith" type="fence_ipmilan"/>
<attributes CRM_meta_interval="60000" CRM_meta_name="monitor" CRM_meta_on_node="database-0" CRM_meta_on_node_uuid="4" CRM_meta_timeout="120000" delay="20" ipaddr="172.16.0.44" ipport="6231" lanplus="true" login="admin" passwd="****" pcmk_host_list="compute-1"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="300" operation="start" operation_key="stonith-fence_ipmilan-52540040bb56_start_0" on_node="database-0" on_node_uuid="4"/>
+ <rsc_op id="295" operation="start" operation_key="stonith-fence_ipmilan-52540040bb56_start_0" on_node="database-0" on_node_uuid="4"/>
</trigger>
</inputs>
</synapse>
- <synapse id="63">
+ <synapse id="45">
<action_set>
- <rsc_op id="300" operation="start" operation_key="stonith-fence_ipmilan-52540040bb56_start_0" on_node="database-0" on_node_uuid="4">
+ <rsc_op id="295" operation="start" operation_key="stonith-fence_ipmilan-52540040bb56_start_0" on_node="database-0" on_node_uuid="4">
<primitive id="stonith-fence_ipmilan-52540040bb56" class="stonith" type="fence_ipmilan"/>
<attributes CRM_meta_on_node="database-0" CRM_meta_on_node_uuid="4" CRM_meta_timeout="120000" delay="20" ipaddr="172.16.0.44" ipport="6231" lanplus="true" login="admin" passwd="****" pcmk_host_list="compute-1"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="299" operation="stop" operation_key="stonith-fence_ipmilan-52540040bb56_stop_0" on_node="messaging-2" on_node_uuid="9"/>
+ <rsc_op id="294" operation="stop" operation_key="stonith-fence_ipmilan-52540040bb56_stop_0" on_node="messaging-2" on_node_uuid="9"/>
</trigger>
</inputs>
</synapse>
- <synapse id="64">
+ <synapse id="46">
<action_set>
- <rsc_op id="299" operation="stop" operation_key="stonith-fence_ipmilan-52540040bb56_stop_0" on_node="messaging-2" on_node_uuid="9">
+ <rsc_op id="294" operation="stop" operation_key="stonith-fence_ipmilan-52540040bb56_stop_0" on_node="messaging-2" on_node_uuid="9">
<primitive id="stonith-fence_ipmilan-52540040bb56" class="stonith" type="fence_ipmilan"/>
<attributes CRM_meta_on_node="messaging-2" CRM_meta_on_node_uuid="9" CRM_meta_timeout="120000" delay="20" ipaddr="172.16.0.44" ipport="6231" lanplus="true" login="admin" passwd="****" pcmk_host_list="compute-1"/>
</rsc_op>
</action_set>
<inputs/>
</synapse>
- <synapse id="65">
+ <synapse id="47">
<action_set>
<crm_event id="44" operation="clear_failcount" operation_key="stonith-fence_ipmilan-52540078fb07_clear_failcount_0" on_node="messaging-2" on_node_uuid="9">
<primitive id="stonith-fence_ipmilan-52540078fb07" class="stonith" type="fence_ipmilan"/>
@@ -945,7 +650,7 @@
</action_set>
<inputs/>
</synapse>
- <synapse id="66">
+ <synapse id="48">
<action_set>
<crm_event id="10" operation="clear_failcount" operation_key="stonith-fence_ipmilan-525400ea59b0_clear_failcount_0" on_node="database-0" on_node_uuid="4">
<primitive id="stonith-fence_ipmilan-525400ea59b0" class="stonith" type="fence_ipmilan"/>
@@ -954,7 +659,7 @@
</action_set>
<inputs/>
</synapse>
- <synapse id="67">
+ <synapse id="49">
<action_set>
<crm_event id="45" operation="clear_failcount" operation_key="stonith-fence_ipmilan-525400066e50_clear_failcount_0" on_node="messaging-2" on_node_uuid="9">
<primitive id="stonith-fence_ipmilan-525400066e50" class="stonith" type="fence_ipmilan"/>
@@ -963,42 +668,42 @@
</action_set>
<inputs/>
</synapse>
- <synapse id="68">
+ <synapse id="50">
<action_set>
- <rsc_op id="314" operation="monitor" operation_key="stonith-fence_ipmilan-525400e1534e_monitor_60000" on_node="messaging-2" on_node_uuid="9">
+ <rsc_op id="309" operation="monitor" operation_key="stonith-fence_ipmilan-525400e1534e_monitor_60000" on_node="messaging-2" on_node_uuid="9">
<primitive id="stonith-fence_ipmilan-525400e1534e" class="stonith" type="fence_ipmilan"/>
<attributes CRM_meta_interval="60000" CRM_meta_name="monitor" CRM_meta_on_node="messaging-2" CRM_meta_on_node_uuid="9" CRM_meta_timeout="120000" delay="20" ipaddr="172.16.0.44" ipport="6240" lanplus="true" login="admin" passwd="****" pcmk_host_list="messaging-0"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="313" operation="start" operation_key="stonith-fence_ipmilan-525400e1534e_start_0" on_node="messaging-2" on_node_uuid="9"/>
+ <rsc_op id="308" operation="start" operation_key="stonith-fence_ipmilan-525400e1534e_start_0" on_node="messaging-2" on_node_uuid="9"/>
</trigger>
</inputs>
</synapse>
- <synapse id="69">
+ <synapse id="51">
<action_set>
- <rsc_op id="313" operation="start" operation_key="stonith-fence_ipmilan-525400e1534e_start_0" on_node="messaging-2" on_node_uuid="9">
+ <rsc_op id="308" operation="start" operation_key="stonith-fence_ipmilan-525400e1534e_start_0" on_node="messaging-2" on_node_uuid="9">
<primitive id="stonith-fence_ipmilan-525400e1534e" class="stonith" type="fence_ipmilan"/>
<attributes CRM_meta_on_node="messaging-2" CRM_meta_on_node_uuid="9" CRM_meta_timeout="120000" delay="20" ipaddr="172.16.0.44" ipport="6240" lanplus="true" login="admin" passwd="****" pcmk_host_list="messaging-0"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="312" operation="stop" operation_key="stonith-fence_ipmilan-525400e1534e_stop_0" on_node="database-1" on_node_uuid="5"/>
+ <rsc_op id="307" operation="stop" operation_key="stonith-fence_ipmilan-525400e1534e_stop_0" on_node="database-1" on_node_uuid="5"/>
</trigger>
</inputs>
</synapse>
- <synapse id="70">
+ <synapse id="52">
<action_set>
- <rsc_op id="312" operation="stop" operation_key="stonith-fence_ipmilan-525400e1534e_stop_0" on_node="database-1" on_node_uuid="5">
+ <rsc_op id="307" operation="stop" operation_key="stonith-fence_ipmilan-525400e1534e_stop_0" on_node="database-1" on_node_uuid="5">
<primitive id="stonith-fence_ipmilan-525400e1534e" class="stonith" type="fence_ipmilan"/>
<attributes CRM_meta_on_node="database-1" CRM_meta_on_node_uuid="5" CRM_meta_timeout="120000" delay="20" ipaddr="172.16.0.44" ipport="6240" lanplus="true" login="admin" passwd="****" pcmk_host_list="messaging-0"/>
</rsc_op>
</action_set>
<inputs/>
</synapse>
- <synapse id="71">
+ <synapse id="53">
<action_set>
<crm_event id="54" operation="clear_failcount" operation_key="stonith-fence_ipmilan-525400e1534e_clear_failcount_0" on_node="database-2" on_node_uuid="6">
<primitive id="stonith-fence_ipmilan-525400e1534e" class="stonith" type="fence_ipmilan"/>
@@ -1007,7 +712,7 @@
</action_set>
<inputs/>
</synapse>
- <synapse id="72">
+ <synapse id="54">
<action_set>
<crm_event id="19" operation="clear_failcount" operation_key="stonith-fence_ipmilan-52540060dbba_clear_failcount_0" on_node="messaging-0" on_node_uuid="7">
<primitive id="stonith-fence_ipmilan-52540060dbba" class="stonith" type="fence_ipmilan"/>
@@ -1016,7 +721,7 @@
</action_set>
<inputs/>
</synapse>
- <synapse id="73">
+ <synapse id="55">
<action_set>
<crm_event id="11" operation="clear_failcount" operation_key="stonith-fence_ipmilan-525400e018b6_clear_failcount_0" on_node="database-0" on_node_uuid="4">
<primitive id="stonith-fence_ipmilan-525400e018b6" class="stonith" type="fence_ipmilan"/>
@@ -1025,7 +730,7 @@
</action_set>
<inputs/>
</synapse>
- <synapse id="74">
+ <synapse id="56">
<action_set>
<crm_event id="56" operation="clear_failcount" operation_key="stonith-fence_ipmilan-525400c87cdb_clear_failcount_0" on_node="database-2" on_node_uuid="6">
<primitive id="stonith-fence_ipmilan-525400c87cdb" class="stonith" type="fence_ipmilan"/>
@@ -1034,101 +739,68 @@
</action_set>
<inputs/>
</synapse>
- <synapse id="75" priority="1000000">
+ <synapse id="57" priority="1000000">
<action_set>
- <pseudo_event id="275" operation="promoted" operation_key="ovn-dbs-bundle_promoted_0">
+ <pseudo_event id="270" operation="promoted" operation_key="ovn-dbs-bundle_promoted_0">
<attributes CRM_meta_timeout="120000" />
</pseudo_event>
</action_set>
<inputs>
<trigger>
- <pseudo_event id="267" operation="notified" operation_key="ovn-dbs-bundle-master_confirmed-post_notify_promoted_0"/>
+ <pseudo_event id="262" operation="notified" operation_key="ovn-dbs-bundle-master_confirmed-post_notify_promoted_0"/>
</trigger>
</inputs>
</synapse>
- <synapse id="76">
+ <synapse id="58">
<action_set>
- <pseudo_event id="274" operation="promote" operation_key="ovn-dbs-bundle_promote_0">
+ <pseudo_event id="269" operation="promote" operation_key="ovn-dbs-bundle_promote_0">
<attributes CRM_meta_timeout="120000" />
</pseudo_event>
</action_set>
<inputs>
<trigger>
- <pseudo_event id="239" operation="running" operation_key="ovn-dbs-bundle_running_0"/>
- </trigger>
- <trigger>
- <pseudo_event id="241" operation="stopped" operation_key="ovn-dbs-bundle_stopped_0"/>
+ <pseudo_event id="234" operation="running" operation_key="ovn-dbs-bundle_running_0"/>
</trigger>
</inputs>
</synapse>
- <synapse id="77" priority="1000000">
+ <synapse id="59" priority="1000000">
<action_set>
- <pseudo_event id="241" operation="stopped" operation_key="ovn-dbs-bundle_stopped_0">
+ <pseudo_event id="234" operation="running" operation_key="ovn-dbs-bundle_running_0">
<attributes CRM_meta_timeout="120000" />
</pseudo_event>
</action_set>
<inputs>
<trigger>
- <rsc_op id="229" operation="stop" operation_key="ovn-dbs-bundle-podman-1_stop_0" on_node="controller-2" on_node_uuid="3"/>
+ <rsc_op id="221" operation="start" operation_key="ovn-dbs-bundle-podman-0_start_0" on_node="controller-0" on_node_uuid="1"/>
</trigger>
<trigger>
- <pseudo_event id="261" operation="notified" operation_key="ovn-dbs-bundle-master_confirmed-post_notify_stopped_0"/>
+ <pseudo_event id="252" operation="notified" operation_key="ovn-dbs-bundle-master_confirmed-post_notify_running_0"/>
</trigger>
</inputs>
</synapse>
- <synapse id="78">
+ <synapse id="60">
<action_set>
- <pseudo_event id="240" operation="stop" operation_key="ovn-dbs-bundle_stop_0">
+ <pseudo_event id="233" operation="start" operation_key="ovn-dbs-bundle_start_0">
<attributes CRM_meta_timeout="120000" />
</pseudo_event>
</action_set>
<inputs/>
</synapse>
- <synapse id="79" priority="1000000">
+ <synapse id="61" priority="1000000">
<action_set>
- <pseudo_event id="239" operation="running" operation_key="ovn-dbs-bundle_running_0">
+ <pseudo_event id="126" operation="running" operation_key="rabbitmq-bundle_running_0">
<attributes CRM_meta_timeout="120000" />
</pseudo_event>
</action_set>
<inputs>
<trigger>
- <rsc_op id="225" operation="start" operation_key="ovn-dbs-bundle-podman-0_start_0" on_node="controller-2" on_node_uuid="3"/>
- </trigger>
- <trigger>
- <rsc_op id="230" operation="start" operation_key="ovn-dbs-bundle-podman-1_start_0" on_node="controller-0" on_node_uuid="1"/>
- </trigger>
- <trigger>
- <pseudo_event id="257" operation="notified" operation_key="ovn-dbs-bundle-master_confirmed-post_notify_running_0"/>
+ <pseudo_event id="142" operation="notified" operation_key="rabbitmq-bundle-clone_confirmed-post_notify_running_0"/>
</trigger>
</inputs>
</synapse>
- <synapse id="80">
- <action_set>
- <pseudo_event id="238" operation="start" operation_key="ovn-dbs-bundle_start_0">
- <attributes CRM_meta_timeout="120000" />
- </pseudo_event>
- </action_set>
- <inputs>
- <trigger>
- <pseudo_event id="241" operation="stopped" operation_key="ovn-dbs-bundle_stopped_0"/>
- </trigger>
- </inputs>
- </synapse>
- <synapse id="81" priority="1000000">
- <action_set>
- <pseudo_event id="130" operation="running" operation_key="rabbitmq-bundle_running_0">
- <attributes CRM_meta_timeout="120000" />
- </pseudo_event>
- </action_set>
- <inputs>
- <trigger>
- <pseudo_event id="146" operation="notified" operation_key="rabbitmq-bundle-clone_confirmed-post_notify_running_0"/>
- </trigger>
- </inputs>
- </synapse>
- <synapse id="82">
+ <synapse id="62">
<action_set>
- <pseudo_event id="129" operation="start" operation_key="rabbitmq-bundle_start_0">
+ <pseudo_event id="125" operation="start" operation_key="rabbitmq-bundle_start_0">
<attributes CRM_meta_timeout="120000" />
</pseudo_event>
</action_set>
diff --git a/cts/scheduler/exp/clone-anon-failcount.exp b/cts/scheduler/exp/clone-anon-failcount.exp
index 05312c2..a48f69b 100644
--- a/cts/scheduler/exp/clone-anon-failcount.exp
+++ b/cts/scheduler/exp/clone-anon-failcount.exp
@@ -186,7 +186,7 @@
<action_set>
<rsc_op id="37" operation="stop" operation_key="UmDummy01_stop_0" on_node="srv01" on_node_uuid="srv01">
<primitive id="UmDummy01" class="ocf" provider="pacemaker" type="Dummy"/>
- <attributes CRM_meta_name="stop" CRM_meta_on_fail="stop" CRM_meta_on_node="srv01" CRM_meta_on_node_uuid="srv01" CRM_meta_timeout="60000" />
+ <attributes CRM_meta_name="stop" CRM_meta_on_node="srv01" CRM_meta_on_node_uuid="srv01" CRM_meta_timeout="60000" />
</rsc_op>
</action_set>
<inputs>
diff --git a/cts/scheduler/exp/clone-order-16instances.exp b/cts/scheduler/exp/clone-order-16instances.exp
index 9d20ae1..b06826b 100644
--- a/cts/scheduler/exp/clone-order-16instances.exp
+++ b/cts/scheduler/exp/clone-order-16instances.exp
@@ -76,6 +76,9 @@
</action_set>
<inputs>
<trigger>
+ <rsc_op id="10" operation="start" operation_key="dlm_start_0" internal_operation_key="dlm:2_start_0" on_node="virt-009.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
<rsc_op id="12" operation="start" operation_key="dlm:3_start_0" on_node="virt-013.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="4"/>
</trigger>
<trigger>
@@ -105,6 +108,12 @@
</action_set>
<inputs>
<trigger>
+ <rsc_op id="10" operation="start" operation_key="dlm_start_0" internal_operation_key="dlm:2_start_0" on_node="virt-009.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="12" operation="start" operation_key="dlm:3_start_0" on_node="virt-013.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="4"/>
+ </trigger>
+ <trigger>
<rsc_op id="14" operation="start" operation_key="dlm:4_start_0" on_node="virt-014.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="5"/>
</trigger>
<trigger>
@@ -134,6 +143,15 @@
</action_set>
<inputs>
<trigger>
+ <rsc_op id="10" operation="start" operation_key="dlm_start_0" internal_operation_key="dlm:2_start_0" on_node="virt-009.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="12" operation="start" operation_key="dlm:3_start_0" on_node="virt-013.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="4"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="14" operation="start" operation_key="dlm:4_start_0" on_node="virt-014.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="5"/>
+ </trigger>
+ <trigger>
<rsc_op id="16" operation="start" operation_key="dlm:5_start_0" on_node="virt-015.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="6"/>
</trigger>
<trigger>
@@ -163,6 +181,18 @@
</action_set>
<inputs>
<trigger>
+ <rsc_op id="10" operation="start" operation_key="dlm_start_0" internal_operation_key="dlm:2_start_0" on_node="virt-009.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="12" operation="start" operation_key="dlm:3_start_0" on_node="virt-013.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="4"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="14" operation="start" operation_key="dlm:4_start_0" on_node="virt-014.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="5"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="16" operation="start" operation_key="dlm:5_start_0" on_node="virt-015.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="6"/>
+ </trigger>
+ <trigger>
<rsc_op id="18" operation="start" operation_key="dlm:6_start_0" on_node="virt-016.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="7"/>
</trigger>
<trigger>
@@ -192,6 +222,21 @@
</action_set>
<inputs>
<trigger>
+ <rsc_op id="10" operation="start" operation_key="dlm_start_0" internal_operation_key="dlm:2_start_0" on_node="virt-009.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="12" operation="start" operation_key="dlm:3_start_0" on_node="virt-013.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="4"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="14" operation="start" operation_key="dlm:4_start_0" on_node="virt-014.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="5"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="16" operation="start" operation_key="dlm:5_start_0" on_node="virt-015.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="6"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="18" operation="start" operation_key="dlm:6_start_0" on_node="virt-016.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="7"/>
+ </trigger>
+ <trigger>
<rsc_op id="20" operation="start" operation_key="dlm:7_start_0" on_node="virt-020.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="8"/>
</trigger>
<trigger>
@@ -221,6 +266,24 @@
</action_set>
<inputs>
<trigger>
+ <rsc_op id="10" operation="start" operation_key="dlm_start_0" internal_operation_key="dlm:2_start_0" on_node="virt-009.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="12" operation="start" operation_key="dlm:3_start_0" on_node="virt-013.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="4"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="14" operation="start" operation_key="dlm:4_start_0" on_node="virt-014.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="5"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="16" operation="start" operation_key="dlm:5_start_0" on_node="virt-015.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="6"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="18" operation="start" operation_key="dlm:6_start_0" on_node="virt-016.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="7"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="20" operation="start" operation_key="dlm:7_start_0" on_node="virt-020.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="8"/>
+ </trigger>
+ <trigger>
<rsc_op id="22" operation="start" operation_key="dlm:8_start_0" on_node="virt-027.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="9"/>
</trigger>
<trigger>
@@ -250,6 +313,27 @@
</action_set>
<inputs>
<trigger>
+ <rsc_op id="10" operation="start" operation_key="dlm_start_0" internal_operation_key="dlm:2_start_0" on_node="virt-009.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="12" operation="start" operation_key="dlm:3_start_0" on_node="virt-013.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="4"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="14" operation="start" operation_key="dlm:4_start_0" on_node="virt-014.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="5"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="16" operation="start" operation_key="dlm:5_start_0" on_node="virt-015.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="6"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="18" operation="start" operation_key="dlm:6_start_0" on_node="virt-016.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="7"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="20" operation="start" operation_key="dlm:7_start_0" on_node="virt-020.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="8"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="22" operation="start" operation_key="dlm:8_start_0" on_node="virt-027.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="9"/>
+ </trigger>
+ <trigger>
<rsc_op id="24" operation="start" operation_key="dlm:9_start_0" on_node="virt-028.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="10"/>
</trigger>
<trigger>
@@ -279,6 +363,30 @@
</action_set>
<inputs>
<trigger>
+ <rsc_op id="10" operation="start" operation_key="dlm_start_0" internal_operation_key="dlm:2_start_0" on_node="virt-009.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="12" operation="start" operation_key="dlm:3_start_0" on_node="virt-013.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="4"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="14" operation="start" operation_key="dlm:4_start_0" on_node="virt-014.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="5"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="16" operation="start" operation_key="dlm:5_start_0" on_node="virt-015.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="6"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="18" operation="start" operation_key="dlm:6_start_0" on_node="virt-016.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="7"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="20" operation="start" operation_key="dlm:7_start_0" on_node="virt-020.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="8"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="22" operation="start" operation_key="dlm:8_start_0" on_node="virt-027.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="9"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="24" operation="start" operation_key="dlm:9_start_0" on_node="virt-028.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="10"/>
+ </trigger>
+ <trigger>
<rsc_op id="26" operation="start" operation_key="dlm:10_start_0" on_node="virt-029.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="11"/>
</trigger>
<trigger>
@@ -308,6 +416,33 @@
</action_set>
<inputs>
<trigger>
+ <rsc_op id="10" operation="start" operation_key="dlm_start_0" internal_operation_key="dlm:2_start_0" on_node="virt-009.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="12" operation="start" operation_key="dlm:3_start_0" on_node="virt-013.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="4"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="14" operation="start" operation_key="dlm:4_start_0" on_node="virt-014.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="5"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="16" operation="start" operation_key="dlm:5_start_0" on_node="virt-015.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="6"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="18" operation="start" operation_key="dlm:6_start_0" on_node="virt-016.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="7"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="20" operation="start" operation_key="dlm:7_start_0" on_node="virt-020.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="8"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="22" operation="start" operation_key="dlm:8_start_0" on_node="virt-027.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="9"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="24" operation="start" operation_key="dlm:9_start_0" on_node="virt-028.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="10"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="26" operation="start" operation_key="dlm:10_start_0" on_node="virt-029.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="11"/>
+ </trigger>
+ <trigger>
<rsc_op id="28" operation="start" operation_key="dlm:11_start_0" on_node="virt-030.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="12"/>
</trigger>
<trigger>
@@ -337,6 +472,36 @@
</action_set>
<inputs>
<trigger>
+ <rsc_op id="10" operation="start" operation_key="dlm_start_0" internal_operation_key="dlm:2_start_0" on_node="virt-009.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="12" operation="start" operation_key="dlm:3_start_0" on_node="virt-013.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="4"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="14" operation="start" operation_key="dlm:4_start_0" on_node="virt-014.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="5"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="16" operation="start" operation_key="dlm:5_start_0" on_node="virt-015.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="6"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="18" operation="start" operation_key="dlm:6_start_0" on_node="virt-016.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="7"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="20" operation="start" operation_key="dlm:7_start_0" on_node="virt-020.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="8"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="22" operation="start" operation_key="dlm:8_start_0" on_node="virt-027.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="9"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="24" operation="start" operation_key="dlm:9_start_0" on_node="virt-028.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="10"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="26" operation="start" operation_key="dlm:10_start_0" on_node="virt-029.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="11"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="28" operation="start" operation_key="dlm:11_start_0" on_node="virt-030.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="12"/>
+ </trigger>
+ <trigger>
<rsc_op id="30" operation="start" operation_key="dlm:12_start_0" on_node="virt-031.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="13"/>
</trigger>
<trigger>
@@ -366,6 +531,39 @@
</action_set>
<inputs>
<trigger>
+ <rsc_op id="10" operation="start" operation_key="dlm_start_0" internal_operation_key="dlm:2_start_0" on_node="virt-009.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="12" operation="start" operation_key="dlm:3_start_0" on_node="virt-013.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="4"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="14" operation="start" operation_key="dlm:4_start_0" on_node="virt-014.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="5"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="16" operation="start" operation_key="dlm:5_start_0" on_node="virt-015.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="6"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="18" operation="start" operation_key="dlm:6_start_0" on_node="virt-016.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="7"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="20" operation="start" operation_key="dlm:7_start_0" on_node="virt-020.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="8"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="22" operation="start" operation_key="dlm:8_start_0" on_node="virt-027.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="9"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="24" operation="start" operation_key="dlm:9_start_0" on_node="virt-028.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="10"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="26" operation="start" operation_key="dlm:10_start_0" on_node="virt-029.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="11"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="28" operation="start" operation_key="dlm:11_start_0" on_node="virt-030.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="12"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="30" operation="start" operation_key="dlm:12_start_0" on_node="virt-031.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="13"/>
+ </trigger>
+ <trigger>
<rsc_op id="32" operation="start" operation_key="dlm:13_start_0" on_node="virt-032.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="14"/>
</trigger>
<trigger>
@@ -395,6 +593,42 @@
</action_set>
<inputs>
<trigger>
+ <rsc_op id="10" operation="start" operation_key="dlm_start_0" internal_operation_key="dlm:2_start_0" on_node="virt-009.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="12" operation="start" operation_key="dlm:3_start_0" on_node="virt-013.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="4"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="14" operation="start" operation_key="dlm:4_start_0" on_node="virt-014.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="5"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="16" operation="start" operation_key="dlm:5_start_0" on_node="virt-015.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="6"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="18" operation="start" operation_key="dlm:6_start_0" on_node="virt-016.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="7"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="20" operation="start" operation_key="dlm:7_start_0" on_node="virt-020.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="8"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="22" operation="start" operation_key="dlm:8_start_0" on_node="virt-027.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="9"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="24" operation="start" operation_key="dlm:9_start_0" on_node="virt-028.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="10"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="26" operation="start" operation_key="dlm:10_start_0" on_node="virt-029.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="11"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="28" operation="start" operation_key="dlm:11_start_0" on_node="virt-030.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="12"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="30" operation="start" operation_key="dlm:12_start_0" on_node="virt-031.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="13"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="32" operation="start" operation_key="dlm:13_start_0" on_node="virt-032.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="14"/>
+ </trigger>
+ <trigger>
<rsc_op id="34" operation="start" operation_key="dlm:14_start_0" on_node="virt-033.cluster-qe.lab.eng.brq.redhat.com" on_node_uuid="15"/>
</trigger>
<trigger>
diff --git a/cts/scheduler/exp/clone-recover-no-shuffle-1.exp b/cts/scheduler/exp/clone-recover-no-shuffle-1.exp
new file mode 100644
index 0000000..670a823
--- /dev/null
+++ b/cts/scheduler/exp/clone-recover-no-shuffle-1.exp
@@ -0,0 +1,51 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0">
+ <synapse id="0">
+ <action_set>
+ <rsc_op id="11" operation="monitor" operation_key="dummy:2_monitor_10000" on_node="node1" on_node_uuid="1">
+ <primitive id="dummy" long-id="dummy:2" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="10" operation="start" operation_key="dummy:2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="1">
+ <action_set>
+ <rsc_op id="10" operation="start" operation_key="dummy:2_start_0" on_node="node1" on_node_uuid="1">
+ <primitive id="dummy" long-id="dummy:2" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_name="start" CRM_meta_notify="false" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="12" operation="start" operation_key="dummy-clone_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="2" priority="1000000">
+ <action_set>
+ <pseudo_event id="13" operation="running" operation_key="dummy-clone_running_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="10" operation="start" operation_key="dummy:2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="12" operation="start" operation_key="dummy-clone_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="3">
+ <action_set>
+ <pseudo_event id="12" operation="start" operation_key="dummy-clone_start_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs/>
+ </synapse>
+</transition_graph>
diff --git a/cts/scheduler/exp/clone-recover-no-shuffle-10.exp b/cts/scheduler/exp/clone-recover-no-shuffle-10.exp
new file mode 100644
index 0000000..27b8b70
--- /dev/null
+++ b/cts/scheduler/exp/clone-recover-no-shuffle-10.exp
@@ -0,0 +1,51 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0">
+ <synapse id="0">
+ <action_set>
+ <rsc_op id="13" operation="monitor" operation_key="dummy:2_monitor_11000" on_node="node1" on_node_uuid="1">
+ <primitive id="dummy" long-id="dummy:2" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="11000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Unpromoted" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="12" operation="start" operation_key="dummy:2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="1">
+ <action_set>
+ <rsc_op id="12" operation="start" operation_key="dummy:2_start_0" on_node="node1" on_node_uuid="1">
+ <primitive id="dummy" long-id="dummy:2" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="start" CRM_meta_notify="false" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="14" operation="start" operation_key="dummy-clone_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="2" priority="1000000">
+ <action_set>
+ <pseudo_event id="15" operation="running" operation_key="dummy-clone_running_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="12" operation="start" operation_key="dummy:2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="14" operation="start" operation_key="dummy-clone_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="3">
+ <action_set>
+ <pseudo_event id="14" operation="start" operation_key="dummy-clone_start_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs/>
+ </synapse>
+</transition_graph>
diff --git a/cts/scheduler/exp/clone-recover-no-shuffle-11.exp b/cts/scheduler/exp/clone-recover-no-shuffle-11.exp
new file mode 100644
index 0000000..40cf1f6
--- /dev/null
+++ b/cts/scheduler/exp/clone-recover-no-shuffle-11.exp
@@ -0,0 +1,110 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0">
+ <synapse id="0">
+ <action_set>
+ <pseudo_event id="41" operation="running" operation_key="grp:2_running_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="36" operation="start" operation_key="rsc1:2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="38" operation="start" operation_key="rsc2:2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="40" operation="start" operation_key="grp:2_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="1">
+ <action_set>
+ <pseudo_event id="40" operation="start" operation_key="grp:2_start_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="48" operation="start" operation_key="grp-clone_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="2">
+ <action_set>
+ <rsc_op id="37" operation="monitor" operation_key="rsc1:2_monitor_11000" on_node="node1" on_node_uuid="1">
+ <primitive id="rsc1" long-id="rsc1:2" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="11000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Unpromoted" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="36" operation="start" operation_key="rsc1:2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="3">
+ <action_set>
+ <rsc_op id="36" operation="start" operation_key="rsc1:2_start_0" on_node="node1" on_node_uuid="1">
+ <primitive id="rsc1" long-id="rsc1:2" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="start" CRM_meta_notify="false" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="40" operation="start" operation_key="grp:2_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="4">
+ <action_set>
+ <rsc_op id="39" operation="monitor" operation_key="rsc2:2_monitor_11000" on_node="node1" on_node_uuid="1">
+ <primitive id="rsc2" long-id="rsc2:2" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="11000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Unpromoted" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="38" operation="start" operation_key="rsc2:2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="5">
+ <action_set>
+ <rsc_op id="38" operation="start" operation_key="rsc2:2_start_0" on_node="node1" on_node_uuid="1">
+ <primitive id="rsc2" long-id="rsc2:2" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="start" CRM_meta_notify="false" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="36" operation="start" operation_key="rsc1:2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="40" operation="start" operation_key="grp:2_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="6" priority="1000000">
+ <action_set>
+ <pseudo_event id="49" operation="running" operation_key="grp-clone_running_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="41" operation="running" operation_key="grp:2_running_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="48" operation="start" operation_key="grp-clone_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="7">
+ <action_set>
+ <pseudo_event id="48" operation="start" operation_key="grp-clone_start_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs/>
+ </synapse>
+</transition_graph>
diff --git a/cts/scheduler/exp/clone-recover-no-shuffle-12.exp b/cts/scheduler/exp/clone-recover-no-shuffle-12.exp
new file mode 100644
index 0000000..919e6b2
--- /dev/null
+++ b/cts/scheduler/exp/clone-recover-no-shuffle-12.exp
@@ -0,0 +1,187 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0">
+ <synapse id="0">
+ <action_set>
+ <rsc_op id="34" operation="monitor" operation_key="base:2_monitor_16000" on_node="base-bundle-2" on_node_uuid="base-bundle-2" router_node="node1">
+ <primitive id="base" long-id="base:2" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_interval="16000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="base-bundle-2" CRM_meta_on_node_uuid="base-bundle-2" CRM_meta_physical_host="node1" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Unpromoted" CRM_meta_timeout="16000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="23" operation="start" operation_key="base-bundle-2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="33" operation="start" operation_key="base:2_start_0" on_node="base-bundle-2" on_node_uuid="base-bundle-2" router_node="node1"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="1">
+ <action_set>
+ <rsc_op id="33" operation="start" operation_key="base:2_start_0" on_node="base-bundle-2" on_node_uuid="base-bundle-2" router_node="node1">
+ <primitive id="base" long-id="base:2" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_on_node="base-bundle-2" CRM_meta_on_node_uuid="base-bundle-2" CRM_meta_physical_host="node1" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="21" operation="start" operation_key="base-bundle-podman-2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="23" operation="start" operation_key="base-bundle-2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="37" operation="start" operation_key="base-bundle-clone_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="2" priority="1000000">
+ <action_set>
+ <pseudo_event id="38" operation="running" operation_key="base-bundle-clone_running_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="33" operation="start" operation_key="base:2_start_0" on_node="base-bundle-2" on_node_uuid="base-bundle-2" router_node="node1"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="37" operation="start" operation_key="base-bundle-clone_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="3">
+ <action_set>
+ <pseudo_event id="37" operation="start" operation_key="base-bundle-clone_start_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="25" operation="start" operation_key="base-bundle_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="4">
+ <action_set>
+ <rsc_op id="22" operation="monitor" operation_key="base-bundle-podman-2_monitor_60000" on_node="node1" on_node_uuid="1">
+ <primitive id="base-bundle-podman-2" class="ocf" provider="heartbeat" type="podman"/>
+ <attributes CRM_meta_interval="60000" CRM_meta_name="monitor" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" allow_pull="true" force_kill="false" image="localhost/pcmktest" monitor_cmd="/bin/true" mount_points="/var/log/pacemaker/bundles/base-bundle-2" reuse="false" run_cmd="/usr/sbin/pacemaker-remoted" run_opts=" -e PCMK_stderr=1 -e PCMK_remote_port=3121 -v /etc/pacemaker/authkey:/etc/pacemaker/authkey -v /var/log/pacemaker/bundles/base-bundle-2:/var/log -p 3121:3121 "/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="21" operation="start" operation_key="base-bundle-podman-2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="5">
+ <action_set>
+ <rsc_op id="21" operation="start" operation_key="base-bundle-podman-2_start_0" on_node="node1" on_node_uuid="1">
+ <primitive id="base-bundle-podman-2" class="ocf" provider="heartbeat" type="podman"/>
+ <attributes CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" allow_pull="true" force_kill="false" image="localhost/pcmktest" monitor_cmd="/bin/true" mount_points="/var/log/pacemaker/bundles/base-bundle-2" reuse="false" run_cmd="/usr/sbin/pacemaker-remoted" run_opts=" -e PCMK_stderr=1 -e PCMK_remote_port=3121 -v /etc/pacemaker/authkey:/etc/pacemaker/authkey -v /var/log/pacemaker/bundles/base-bundle-2:/var/log -p 3121:3121 "/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="25" operation="start" operation_key="base-bundle_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="6">
+ <action_set>
+ <rsc_op id="24" operation="monitor" operation_key="base-bundle-2_monitor_30000" on_node="node1" on_node_uuid="1">
+ <primitive id="base-bundle-2" class="ocf" provider="pacemaker" type="remote"/>
+ <attributes CRM_meta_container="base-bundle-podman-2" CRM_meta_interval="30000" CRM_meta_name="monitor" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="30000" addr="node1" port="3121"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="23" operation="start" operation_key="base-bundle-2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="7">
+ <action_set>
+ <rsc_op id="23" operation="start" operation_key="base-bundle-2_start_0" on_node="node1" on_node_uuid="1">
+ <primitive id="base-bundle-2" class="ocf" provider="pacemaker" type="remote"/>
+ <attributes CRM_meta_container="base-bundle-podman-2" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" addr="node1" port="3121"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="8" operation="monitor" operation_key="base-bundle-2_monitor_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="9" operation="monitor" operation_key="base-bundle-2_monitor_0" on_node="node2" on_node_uuid="2"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="10" operation="monitor" operation_key="base-bundle-2_monitor_0" on_node="node3" on_node_uuid="3"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="21" operation="start" operation_key="base-bundle-podman-2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="8">
+ <action_set>
+ <rsc_op id="10" operation="monitor" operation_key="base-bundle-2_monitor_0" on_node="node3" on_node_uuid="3">
+ <primitive id="base-bundle-2" class="ocf" provider="pacemaker" type="remote"/>
+ <attributes CRM_meta_container="base-bundle-podman-2" CRM_meta_on_node="node3" CRM_meta_on_node_uuid="3" CRM_meta_op_target_rc="7" CRM_meta_timeout="30000" addr="node1" port="3121"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="21" operation="start" operation_key="base-bundle-podman-2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="9">
+ <action_set>
+ <rsc_op id="9" operation="monitor" operation_key="base-bundle-2_monitor_0" on_node="node2" on_node_uuid="2">
+ <primitive id="base-bundle-2" class="ocf" provider="pacemaker" type="remote"/>
+ <attributes CRM_meta_container="base-bundle-podman-2" CRM_meta_on_node="node2" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_timeout="30000" addr="node1" port="3121"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="21" operation="start" operation_key="base-bundle-podman-2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="10">
+ <action_set>
+ <rsc_op id="8" operation="monitor" operation_key="base-bundle-2_monitor_0" on_node="node1" on_node_uuid="1">
+ <primitive id="base-bundle-2" class="ocf" provider="pacemaker" type="remote"/>
+ <attributes CRM_meta_container="base-bundle-podman-2" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_op_target_rc="7" CRM_meta_timeout="30000" addr="node1" port="3121"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="21" operation="start" operation_key="base-bundle-podman-2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="11" priority="1000000">
+ <action_set>
+ <pseudo_event id="26" operation="running" operation_key="base-bundle_running_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="21" operation="start" operation_key="base-bundle-podman-2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="38" operation="running" operation_key="base-bundle-clone_running_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="12">
+ <action_set>
+ <pseudo_event id="25" operation="start" operation_key="base-bundle_start_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs/>
+ </synapse>
+</transition_graph>
diff --git a/cts/scheduler/exp/clone-recover-no-shuffle-2.exp b/cts/scheduler/exp/clone-recover-no-shuffle-2.exp
new file mode 100644
index 0000000..84b1e1b
--- /dev/null
+++ b/cts/scheduler/exp/clone-recover-no-shuffle-2.exp
@@ -0,0 +1,110 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0">
+ <synapse id="0">
+ <action_set>
+ <pseudo_event id="29" operation="running" operation_key="grp:2_running_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="24" operation="start" operation_key="rsc1:2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="26" operation="start" operation_key="rsc2:2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="28" operation="start" operation_key="grp:2_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="1">
+ <action_set>
+ <pseudo_event id="28" operation="start" operation_key="grp:2_start_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="32" operation="start" operation_key="grp-clone_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="2">
+ <action_set>
+ <rsc_op id="25" operation="monitor" operation_key="rsc1:2_monitor_10000" on_node="node1" on_node_uuid="1">
+ <primitive id="rsc1" long-id="rsc1:2" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="24" operation="start" operation_key="rsc1:2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="3">
+ <action_set>
+ <rsc_op id="24" operation="start" operation_key="rsc1:2_start_0" on_node="node1" on_node_uuid="1">
+ <primitive id="rsc1" long-id="rsc1:2" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_name="start" CRM_meta_notify="false" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="28" operation="start" operation_key="grp:2_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="4">
+ <action_set>
+ <rsc_op id="27" operation="monitor" operation_key="rsc2:2_monitor_10000" on_node="node1" on_node_uuid="1">
+ <primitive id="rsc2" long-id="rsc2:2" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="26" operation="start" operation_key="rsc2:2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="5">
+ <action_set>
+ <rsc_op id="26" operation="start" operation_key="rsc2:2_start_0" on_node="node1" on_node_uuid="1">
+ <primitive id="rsc2" long-id="rsc2:2" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_name="start" CRM_meta_notify="false" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="24" operation="start" operation_key="rsc1:2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="28" operation="start" operation_key="grp:2_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="6" priority="1000000">
+ <action_set>
+ <pseudo_event id="33" operation="running" operation_key="grp-clone_running_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="29" operation="running" operation_key="grp:2_running_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="32" operation="start" operation_key="grp-clone_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="7">
+ <action_set>
+ <pseudo_event id="32" operation="start" operation_key="grp-clone_start_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs/>
+ </synapse>
+</transition_graph>
diff --git a/cts/scheduler/exp/clone-recover-no-shuffle-3.exp b/cts/scheduler/exp/clone-recover-no-shuffle-3.exp
new file mode 100644
index 0000000..6b6ed07
--- /dev/null
+++ b/cts/scheduler/exp/clone-recover-no-shuffle-3.exp
@@ -0,0 +1,171 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0">
+ <synapse id="0">
+ <action_set>
+ <rsc_op id="31" operation="start" operation_key="base:2_start_0" on_node="base-bundle-2" on_node_uuid="base-bundle-2" router_node="node1">
+ <primitive id="base" long-id="base:2" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_on_node="base-bundle-2" CRM_meta_on_node_uuid="base-bundle-2" CRM_meta_physical_host="node1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="19" operation="start" operation_key="base-bundle-podman-2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="21" operation="start" operation_key="base-bundle-2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="32" operation="start" operation_key="base-bundle-clone_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="1" priority="1000000">
+ <action_set>
+ <pseudo_event id="33" operation="running" operation_key="base-bundle-clone_running_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="31" operation="start" operation_key="base:2_start_0" on_node="base-bundle-2" on_node_uuid="base-bundle-2" router_node="node1"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="32" operation="start" operation_key="base-bundle-clone_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="2">
+ <action_set>
+ <pseudo_event id="32" operation="start" operation_key="base-bundle-clone_start_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="23" operation="start" operation_key="base-bundle_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="3">
+ <action_set>
+ <rsc_op id="20" operation="monitor" operation_key="base-bundle-podman-2_monitor_60000" on_node="node1" on_node_uuid="1">
+ <primitive id="base-bundle-podman-2" class="ocf" provider="heartbeat" type="podman"/>
+ <attributes CRM_meta_interval="60000" CRM_meta_name="monitor" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" allow_pull="true" force_kill="false" image="localhost/pcmktest" monitor_cmd="/bin/true" mount_points="/var/log/pacemaker/bundles/base-bundle-2" reuse="false" run_cmd="/usr/sbin/pacemaker-remoted" run_opts=" -e PCMK_stderr=1 -e PCMK_remote_port=3121 -v /etc/pacemaker/authkey:/etc/pacemaker/authkey -v /var/log/pacemaker/bundles/base-bundle-2:/var/log -p 3121:3121 "/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="19" operation="start" operation_key="base-bundle-podman-2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="4">
+ <action_set>
+ <rsc_op id="19" operation="start" operation_key="base-bundle-podman-2_start_0" on_node="node1" on_node_uuid="1">
+ <primitive id="base-bundle-podman-2" class="ocf" provider="heartbeat" type="podman"/>
+ <attributes CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" allow_pull="true" force_kill="false" image="localhost/pcmktest" monitor_cmd="/bin/true" mount_points="/var/log/pacemaker/bundles/base-bundle-2" reuse="false" run_cmd="/usr/sbin/pacemaker-remoted" run_opts=" -e PCMK_stderr=1 -e PCMK_remote_port=3121 -v /etc/pacemaker/authkey:/etc/pacemaker/authkey -v /var/log/pacemaker/bundles/base-bundle-2:/var/log -p 3121:3121 "/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="23" operation="start" operation_key="base-bundle_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="5">
+ <action_set>
+ <rsc_op id="22" operation="monitor" operation_key="base-bundle-2_monitor_30000" on_node="node1" on_node_uuid="1">
+ <primitive id="base-bundle-2" class="ocf" provider="pacemaker" type="remote"/>
+ <attributes CRM_meta_container="base-bundle-podman-2" CRM_meta_interval="30000" CRM_meta_name="monitor" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="30000" addr="node1" port="3121"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="21" operation="start" operation_key="base-bundle-2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="6">
+ <action_set>
+ <rsc_op id="21" operation="start" operation_key="base-bundle-2_start_0" on_node="node1" on_node_uuid="1">
+ <primitive id="base-bundle-2" class="ocf" provider="pacemaker" type="remote"/>
+ <attributes CRM_meta_container="base-bundle-podman-2" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" addr="node1" port="3121"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="6" operation="monitor" operation_key="base-bundle-2_monitor_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="7" operation="monitor" operation_key="base-bundle-2_monitor_0" on_node="node2" on_node_uuid="2"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="8" operation="monitor" operation_key="base-bundle-2_monitor_0" on_node="node3" on_node_uuid="3"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="19" operation="start" operation_key="base-bundle-podman-2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="7">
+ <action_set>
+ <rsc_op id="8" operation="monitor" operation_key="base-bundle-2_monitor_0" on_node="node3" on_node_uuid="3">
+ <primitive id="base-bundle-2" class="ocf" provider="pacemaker" type="remote"/>
+ <attributes CRM_meta_container="base-bundle-podman-2" CRM_meta_on_node="node3" CRM_meta_on_node_uuid="3" CRM_meta_op_target_rc="7" CRM_meta_timeout="30000" addr="node1" port="3121"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="19" operation="start" operation_key="base-bundle-podman-2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="8">
+ <action_set>
+ <rsc_op id="7" operation="monitor" operation_key="base-bundle-2_monitor_0" on_node="node2" on_node_uuid="2">
+ <primitive id="base-bundle-2" class="ocf" provider="pacemaker" type="remote"/>
+ <attributes CRM_meta_container="base-bundle-podman-2" CRM_meta_on_node="node2" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_timeout="30000" addr="node1" port="3121"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="19" operation="start" operation_key="base-bundle-podman-2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="9">
+ <action_set>
+ <rsc_op id="6" operation="monitor" operation_key="base-bundle-2_monitor_0" on_node="node1" on_node_uuid="1">
+ <primitive id="base-bundle-2" class="ocf" provider="pacemaker" type="remote"/>
+ <attributes CRM_meta_container="base-bundle-podman-2" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_op_target_rc="7" CRM_meta_timeout="30000" addr="node1" port="3121"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="19" operation="start" operation_key="base-bundle-podman-2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="10" priority="1000000">
+ <action_set>
+ <pseudo_event id="24" operation="running" operation_key="base-bundle_running_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="19" operation="start" operation_key="base-bundle-podman-2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="33" operation="running" operation_key="base-bundle-clone_running_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="11">
+ <action_set>
+ <pseudo_event id="23" operation="start" operation_key="base-bundle_start_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs/>
+ </synapse>
+</transition_graph>
diff --git a/cts/scheduler/exp/clone-recover-no-shuffle-4.exp b/cts/scheduler/exp/clone-recover-no-shuffle-4.exp
new file mode 100644
index 0000000..670a823
--- /dev/null
+++ b/cts/scheduler/exp/clone-recover-no-shuffle-4.exp
@@ -0,0 +1,51 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0">
+ <synapse id="0">
+ <action_set>
+ <rsc_op id="11" operation="monitor" operation_key="dummy:2_monitor_10000" on_node="node1" on_node_uuid="1">
+ <primitive id="dummy" long-id="dummy:2" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="10" operation="start" operation_key="dummy:2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="1">
+ <action_set>
+ <rsc_op id="10" operation="start" operation_key="dummy:2_start_0" on_node="node1" on_node_uuid="1">
+ <primitive id="dummy" long-id="dummy:2" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_name="start" CRM_meta_notify="false" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="12" operation="start" operation_key="dummy-clone_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="2" priority="1000000">
+ <action_set>
+ <pseudo_event id="13" operation="running" operation_key="dummy-clone_running_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="10" operation="start" operation_key="dummy:2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="12" operation="start" operation_key="dummy-clone_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="3">
+ <action_set>
+ <pseudo_event id="12" operation="start" operation_key="dummy-clone_start_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs/>
+ </synapse>
+</transition_graph>
diff --git a/cts/scheduler/exp/clone-recover-no-shuffle-5.exp b/cts/scheduler/exp/clone-recover-no-shuffle-5.exp
new file mode 100644
index 0000000..84b1e1b
--- /dev/null
+++ b/cts/scheduler/exp/clone-recover-no-shuffle-5.exp
@@ -0,0 +1,110 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0">
+ <synapse id="0">
+ <action_set>
+ <pseudo_event id="29" operation="running" operation_key="grp:2_running_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="24" operation="start" operation_key="rsc1:2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="26" operation="start" operation_key="rsc2:2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="28" operation="start" operation_key="grp:2_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="1">
+ <action_set>
+ <pseudo_event id="28" operation="start" operation_key="grp:2_start_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="32" operation="start" operation_key="grp-clone_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="2">
+ <action_set>
+ <rsc_op id="25" operation="monitor" operation_key="rsc1:2_monitor_10000" on_node="node1" on_node_uuid="1">
+ <primitive id="rsc1" long-id="rsc1:2" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="24" operation="start" operation_key="rsc1:2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="3">
+ <action_set>
+ <rsc_op id="24" operation="start" operation_key="rsc1:2_start_0" on_node="node1" on_node_uuid="1">
+ <primitive id="rsc1" long-id="rsc1:2" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_name="start" CRM_meta_notify="false" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="28" operation="start" operation_key="grp:2_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="4">
+ <action_set>
+ <rsc_op id="27" operation="monitor" operation_key="rsc2:2_monitor_10000" on_node="node1" on_node_uuid="1">
+ <primitive id="rsc2" long-id="rsc2:2" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="26" operation="start" operation_key="rsc2:2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="5">
+ <action_set>
+ <rsc_op id="26" operation="start" operation_key="rsc2:2_start_0" on_node="node1" on_node_uuid="1">
+ <primitive id="rsc2" long-id="rsc2:2" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_name="start" CRM_meta_notify="false" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="24" operation="start" operation_key="rsc1:2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="28" operation="start" operation_key="grp:2_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="6" priority="1000000">
+ <action_set>
+ <pseudo_event id="33" operation="running" operation_key="grp-clone_running_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="29" operation="running" operation_key="grp:2_running_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="32" operation="start" operation_key="grp-clone_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="7">
+ <action_set>
+ <pseudo_event id="32" operation="start" operation_key="grp-clone_start_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs/>
+ </synapse>
+</transition_graph>
diff --git a/cts/scheduler/exp/clone-recover-no-shuffle-6.exp b/cts/scheduler/exp/clone-recover-no-shuffle-6.exp
new file mode 100644
index 0000000..6b6ed07
--- /dev/null
+++ b/cts/scheduler/exp/clone-recover-no-shuffle-6.exp
@@ -0,0 +1,171 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0">
+ <synapse id="0">
+ <action_set>
+ <rsc_op id="31" operation="start" operation_key="base:2_start_0" on_node="base-bundle-2" on_node_uuid="base-bundle-2" router_node="node1">
+ <primitive id="base" long-id="base:2" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_on_node="base-bundle-2" CRM_meta_on_node_uuid="base-bundle-2" CRM_meta_physical_host="node1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="19" operation="start" operation_key="base-bundle-podman-2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="21" operation="start" operation_key="base-bundle-2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="32" operation="start" operation_key="base-bundle-clone_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="1" priority="1000000">
+ <action_set>
+ <pseudo_event id="33" operation="running" operation_key="base-bundle-clone_running_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="31" operation="start" operation_key="base:2_start_0" on_node="base-bundle-2" on_node_uuid="base-bundle-2" router_node="node1"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="32" operation="start" operation_key="base-bundle-clone_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="2">
+ <action_set>
+ <pseudo_event id="32" operation="start" operation_key="base-bundle-clone_start_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="23" operation="start" operation_key="base-bundle_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="3">
+ <action_set>
+ <rsc_op id="20" operation="monitor" operation_key="base-bundle-podman-2_monitor_60000" on_node="node1" on_node_uuid="1">
+ <primitive id="base-bundle-podman-2" class="ocf" provider="heartbeat" type="podman"/>
+ <attributes CRM_meta_interval="60000" CRM_meta_name="monitor" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" allow_pull="true" force_kill="false" image="localhost/pcmktest" monitor_cmd="/bin/true" mount_points="/var/log/pacemaker/bundles/base-bundle-2" reuse="false" run_cmd="/usr/sbin/pacemaker-remoted" run_opts=" -e PCMK_stderr=1 -e PCMK_remote_port=3121 -v /etc/pacemaker/authkey:/etc/pacemaker/authkey -v /var/log/pacemaker/bundles/base-bundle-2:/var/log -p 3121:3121 "/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="19" operation="start" operation_key="base-bundle-podman-2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="4">
+ <action_set>
+ <rsc_op id="19" operation="start" operation_key="base-bundle-podman-2_start_0" on_node="node1" on_node_uuid="1">
+ <primitive id="base-bundle-podman-2" class="ocf" provider="heartbeat" type="podman"/>
+ <attributes CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" allow_pull="true" force_kill="false" image="localhost/pcmktest" monitor_cmd="/bin/true" mount_points="/var/log/pacemaker/bundles/base-bundle-2" reuse="false" run_cmd="/usr/sbin/pacemaker-remoted" run_opts=" -e PCMK_stderr=1 -e PCMK_remote_port=3121 -v /etc/pacemaker/authkey:/etc/pacemaker/authkey -v /var/log/pacemaker/bundles/base-bundle-2:/var/log -p 3121:3121 "/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="23" operation="start" operation_key="base-bundle_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="5">
+ <action_set>
+ <rsc_op id="22" operation="monitor" operation_key="base-bundle-2_monitor_30000" on_node="node1" on_node_uuid="1">
+ <primitive id="base-bundle-2" class="ocf" provider="pacemaker" type="remote"/>
+ <attributes CRM_meta_container="base-bundle-podman-2" CRM_meta_interval="30000" CRM_meta_name="monitor" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="30000" addr="node1" port="3121"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="21" operation="start" operation_key="base-bundle-2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="6">
+ <action_set>
+ <rsc_op id="21" operation="start" operation_key="base-bundle-2_start_0" on_node="node1" on_node_uuid="1">
+ <primitive id="base-bundle-2" class="ocf" provider="pacemaker" type="remote"/>
+ <attributes CRM_meta_container="base-bundle-podman-2" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" addr="node1" port="3121"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="6" operation="monitor" operation_key="base-bundle-2_monitor_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="7" operation="monitor" operation_key="base-bundle-2_monitor_0" on_node="node2" on_node_uuid="2"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="8" operation="monitor" operation_key="base-bundle-2_monitor_0" on_node="node3" on_node_uuid="3"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="19" operation="start" operation_key="base-bundle-podman-2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="7">
+ <action_set>
+ <rsc_op id="8" operation="monitor" operation_key="base-bundle-2_monitor_0" on_node="node3" on_node_uuid="3">
+ <primitive id="base-bundle-2" class="ocf" provider="pacemaker" type="remote"/>
+ <attributes CRM_meta_container="base-bundle-podman-2" CRM_meta_on_node="node3" CRM_meta_on_node_uuid="3" CRM_meta_op_target_rc="7" CRM_meta_timeout="30000" addr="node1" port="3121"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="19" operation="start" operation_key="base-bundle-podman-2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="8">
+ <action_set>
+ <rsc_op id="7" operation="monitor" operation_key="base-bundle-2_monitor_0" on_node="node2" on_node_uuid="2">
+ <primitive id="base-bundle-2" class="ocf" provider="pacemaker" type="remote"/>
+ <attributes CRM_meta_container="base-bundle-podman-2" CRM_meta_on_node="node2" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_timeout="30000" addr="node1" port="3121"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="19" operation="start" operation_key="base-bundle-podman-2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="9">
+ <action_set>
+ <rsc_op id="6" operation="monitor" operation_key="base-bundle-2_monitor_0" on_node="node1" on_node_uuid="1">
+ <primitive id="base-bundle-2" class="ocf" provider="pacemaker" type="remote"/>
+ <attributes CRM_meta_container="base-bundle-podman-2" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_op_target_rc="7" CRM_meta_timeout="30000" addr="node1" port="3121"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="19" operation="start" operation_key="base-bundle-podman-2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="10" priority="1000000">
+ <action_set>
+ <pseudo_event id="24" operation="running" operation_key="base-bundle_running_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="19" operation="start" operation_key="base-bundle-podman-2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="33" operation="running" operation_key="base-bundle-clone_running_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="11">
+ <action_set>
+ <pseudo_event id="23" operation="start" operation_key="base-bundle_start_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs/>
+ </synapse>
+</transition_graph>
diff --git a/cts/scheduler/exp/clone-recover-no-shuffle-7.exp b/cts/scheduler/exp/clone-recover-no-shuffle-7.exp
new file mode 100644
index 0000000..870ed54
--- /dev/null
+++ b/cts/scheduler/exp/clone-recover-no-shuffle-7.exp
@@ -0,0 +1,162 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0">
+ <synapse id="0">
+ <action_set>
+ <rsc_op id="11" operation="monitor" operation_key="dummy_monitor_11000" internal_operation_key="dummy:1_monitor_11000" on_node="node2" on_node_uuid="2">
+ <primitive id="dummy" long-id="dummy:1" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="11000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="node2" CRM_meta_on_node_uuid="2" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Unpromoted" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="9" operation="demote" operation_key="dummy_demote_0" internal_operation_key="dummy:1_demote_0" on_node="node2" on_node_uuid="2"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="1">
+ <action_set>
+ <rsc_op id="9" operation="demote" operation_key="dummy_demote_0" internal_operation_key="dummy:1_demote_0" on_node="node2" on_node_uuid="2">
+ <primitive id="dummy" long-id="dummy:1" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="demote" CRM_meta_notify="false" CRM_meta_on_node="node2" CRM_meta_on_node_uuid="2" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="10000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="3" operation="cancel" operation_key="dummy_monitor_10000" internal_operation_key="dummy:1_monitor_10000" on_node="node2" on_node_uuid="2"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="21" operation="demote" operation_key="dummy-clone_demote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="2">
+ <action_set>
+ <rsc_op id="3" operation="cancel" operation_key="dummy_monitor_10000" internal_operation_key="dummy:1_monitor_10000" on_node="node2" on_node_uuid="2">
+ <primitive id="dummy" long-id="dummy:1" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="10000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="node2" CRM_meta_on_node_uuid="2" CRM_meta_operation="monitor" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Promoted" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="3">
+ <action_set>
+ <rsc_op id="14" operation="monitor" operation_key="dummy:2_monitor_10000" on_node="node1" on_node_uuid="1">
+ <primitive id="dummy" long-id="dummy:2" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="10000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_op_target_rc="8" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Promoted" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="12" operation="start" operation_key="dummy:2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="13" operation="promote" operation_key="dummy:2_promote_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="4">
+ <action_set>
+ <rsc_op id="13" operation="promote" operation_key="dummy:2_promote_0" on_node="node1" on_node_uuid="1">
+ <primitive id="dummy" long-id="dummy:2" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="promote" CRM_meta_notify="false" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="10000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="12" operation="start" operation_key="dummy:2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="19" operation="promote" operation_key="dummy-clone_promote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="5">
+ <action_set>
+ <rsc_op id="12" operation="start" operation_key="dummy:2_start_0" on_node="node1" on_node_uuid="1">
+ <primitive id="dummy" long-id="dummy:2" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="start" CRM_meta_notify="false" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="15" operation="start" operation_key="dummy-clone_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="6" priority="1000000">
+ <action_set>
+ <pseudo_event id="22" operation="demoted" operation_key="dummy-clone_demoted_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="9" operation="demote" operation_key="dummy_demote_0" internal_operation_key="dummy:1_demote_0" on_node="node2" on_node_uuid="2"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="21" operation="demote" operation_key="dummy-clone_demote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="7">
+ <action_set>
+ <pseudo_event id="21" operation="demote" operation_key="dummy-clone_demote_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="8" priority="1000000">
+ <action_set>
+ <pseudo_event id="20" operation="promoted" operation_key="dummy-clone_promoted_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="13" operation="promote" operation_key="dummy:2_promote_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="9">
+ <action_set>
+ <pseudo_event id="19" operation="promote" operation_key="dummy-clone_promote_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="16" operation="running" operation_key="dummy-clone_running_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="22" operation="demoted" operation_key="dummy-clone_demoted_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="10" priority="1000000">
+ <action_set>
+ <pseudo_event id="16" operation="running" operation_key="dummy-clone_running_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="12" operation="start" operation_key="dummy:2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="15" operation="start" operation_key="dummy-clone_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="11">
+ <action_set>
+ <pseudo_event id="15" operation="start" operation_key="dummy-clone_start_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="22" operation="demoted" operation_key="dummy-clone_demoted_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+</transition_graph>
diff --git a/cts/scheduler/exp/clone-recover-no-shuffle-8.exp b/cts/scheduler/exp/clone-recover-no-shuffle-8.exp
new file mode 100644
index 0000000..763a2f0
--- /dev/null
+++ b/cts/scheduler/exp/clone-recover-no-shuffle-8.exp
@@ -0,0 +1,338 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0">
+ <synapse id="0">
+ <action_set>
+ <pseudo_event id="33" operation="demoted" operation_key="grp:1_demoted_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="21" operation="demote" operation_key="rsc1_demote_0" internal_operation_key="rsc1:1_demote_0" on_node="node2" on_node_uuid="2"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="25" operation="demote" operation_key="rsc2_demote_0" internal_operation_key="rsc2:1_demote_0" on_node="node2" on_node_uuid="2"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="1">
+ <action_set>
+ <pseudo_event id="32" operation="demote" operation_key="grp:1_demote_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="56" operation="demote" operation_key="grp-clone_demote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="2">
+ <action_set>
+ <rsc_op id="23" operation="monitor" operation_key="rsc1_monitor_11000" internal_operation_key="rsc1:1_monitor_11000" on_node="node2" on_node_uuid="2">
+ <primitive id="rsc1" long-id="rsc1:1" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="11000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="node2" CRM_meta_on_node_uuid="2" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Unpromoted" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="21" operation="demote" operation_key="rsc1_demote_0" internal_operation_key="rsc1:1_demote_0" on_node="node2" on_node_uuid="2"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="3">
+ <action_set>
+ <rsc_op id="21" operation="demote" operation_key="rsc1_demote_0" internal_operation_key="rsc1:1_demote_0" on_node="node2" on_node_uuid="2">
+ <primitive id="rsc1" long-id="rsc1:1" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="demote" CRM_meta_notify="false" CRM_meta_on_node="node2" CRM_meta_on_node_uuid="2" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="10000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="4" operation="cancel" operation_key="rsc1_monitor_10000" internal_operation_key="rsc1:1_monitor_10000" on_node="node2" on_node_uuid="2"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="25" operation="demote" operation_key="rsc2_demote_0" internal_operation_key="rsc2:1_demote_0" on_node="node2" on_node_uuid="2"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="32" operation="demote" operation_key="grp:1_demote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="4">
+ <action_set>
+ <rsc_op id="4" operation="cancel" operation_key="rsc1_monitor_10000" internal_operation_key="rsc1:1_monitor_10000" on_node="node2" on_node_uuid="2">
+ <primitive id="rsc1" long-id="rsc1:1" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="10000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="node2" CRM_meta_on_node_uuid="2" CRM_meta_operation="monitor" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Promoted" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="5">
+ <action_set>
+ <rsc_op id="27" operation="monitor" operation_key="rsc2_monitor_11000" internal_operation_key="rsc2:1_monitor_11000" on_node="node2" on_node_uuid="2">
+ <primitive id="rsc2" long-id="rsc2:1" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="11000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="node2" CRM_meta_on_node_uuid="2" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Unpromoted" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="25" operation="demote" operation_key="rsc2_demote_0" internal_operation_key="rsc2:1_demote_0" on_node="node2" on_node_uuid="2"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="6">
+ <action_set>
+ <rsc_op id="25" operation="demote" operation_key="rsc2_demote_0" internal_operation_key="rsc2:1_demote_0" on_node="node2" on_node_uuid="2">
+ <primitive id="rsc2" long-id="rsc2:1" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="demote" CRM_meta_notify="false" CRM_meta_on_node="node2" CRM_meta_on_node_uuid="2" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="10000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="5" operation="cancel" operation_key="rsc2_monitor_10000" internal_operation_key="rsc2:1_monitor_10000" on_node="node2" on_node_uuid="2"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="32" operation="demote" operation_key="grp:1_demote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="7">
+ <action_set>
+ <rsc_op id="5" operation="cancel" operation_key="rsc2_monitor_10000" internal_operation_key="rsc2:1_monitor_10000" on_node="node2" on_node_uuid="2">
+ <primitive id="rsc2" long-id="rsc2:1" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="10000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="node2" CRM_meta_on_node_uuid="2" CRM_meta_operation="monitor" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Promoted" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="8">
+ <action_set>
+ <pseudo_event id="49" operation="promoted" operation_key="grp:2_promoted_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="37" operation="promote" operation_key="rsc1:2_promote_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="40" operation="promote" operation_key="rsc2:2_promote_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="9">
+ <action_set>
+ <pseudo_event id="48" operation="promote" operation_key="grp:2_promote_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="54" operation="promote" operation_key="grp-clone_promote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="10">
+ <action_set>
+ <pseudo_event id="43" operation="running" operation_key="grp:2_running_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="36" operation="start" operation_key="rsc1:2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="39" operation="start" operation_key="rsc2:2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="42" operation="start" operation_key="grp:2_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="11">
+ <action_set>
+ <pseudo_event id="42" operation="start" operation_key="grp:2_start_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="50" operation="start" operation_key="grp-clone_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="12">
+ <action_set>
+ <rsc_op id="38" operation="monitor" operation_key="rsc1:2_monitor_10000" on_node="node1" on_node_uuid="1">
+ <primitive id="rsc1" long-id="rsc1:2" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="10000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_op_target_rc="8" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Promoted" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="36" operation="start" operation_key="rsc1:2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="37" operation="promote" operation_key="rsc1:2_promote_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="13">
+ <action_set>
+ <rsc_op id="37" operation="promote" operation_key="rsc1:2_promote_0" on_node="node1" on_node_uuid="1">
+ <primitive id="rsc1" long-id="rsc1:2" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="promote" CRM_meta_notify="false" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="10000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="36" operation="start" operation_key="rsc1:2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="48" operation="promote" operation_key="grp:2_promote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="14">
+ <action_set>
+ <rsc_op id="36" operation="start" operation_key="rsc1:2_start_0" on_node="node1" on_node_uuid="1">
+ <primitive id="rsc1" long-id="rsc1:2" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="start" CRM_meta_notify="false" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="42" operation="start" operation_key="grp:2_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="15">
+ <action_set>
+ <rsc_op id="41" operation="monitor" operation_key="rsc2:2_monitor_10000" on_node="node1" on_node_uuid="1">
+ <primitive id="rsc2" long-id="rsc2:2" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="10000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_op_target_rc="8" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Promoted" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="39" operation="start" operation_key="rsc2:2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="40" operation="promote" operation_key="rsc2:2_promote_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="16">
+ <action_set>
+ <rsc_op id="40" operation="promote" operation_key="rsc2:2_promote_0" on_node="node1" on_node_uuid="1">
+ <primitive id="rsc2" long-id="rsc2:2" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="promote" CRM_meta_notify="false" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="10000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="37" operation="promote" operation_key="rsc1:2_promote_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="39" operation="start" operation_key="rsc2:2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="48" operation="promote" operation_key="grp:2_promote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="17">
+ <action_set>
+ <rsc_op id="39" operation="start" operation_key="rsc2:2_start_0" on_node="node1" on_node_uuid="1">
+ <primitive id="rsc2" long-id="rsc2:2" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="start" CRM_meta_notify="false" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="36" operation="start" operation_key="rsc1:2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="42" operation="start" operation_key="grp:2_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="18" priority="1000000">
+ <action_set>
+ <pseudo_event id="57" operation="demoted" operation_key="grp-clone_demoted_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="33" operation="demoted" operation_key="grp:1_demoted_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="56" operation="demote" operation_key="grp-clone_demote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="19">
+ <action_set>
+ <pseudo_event id="56" operation="demote" operation_key="grp-clone_demote_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="20" priority="1000000">
+ <action_set>
+ <pseudo_event id="55" operation="promoted" operation_key="grp-clone_promoted_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="49" operation="promoted" operation_key="grp:2_promoted_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="21">
+ <action_set>
+ <pseudo_event id="54" operation="promote" operation_key="grp-clone_promote_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="51" operation="running" operation_key="grp-clone_running_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="57" operation="demoted" operation_key="grp-clone_demoted_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="22" priority="1000000">
+ <action_set>
+ <pseudo_event id="51" operation="running" operation_key="grp-clone_running_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="43" operation="running" operation_key="grp:2_running_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="50" operation="start" operation_key="grp-clone_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="23">
+ <action_set>
+ <pseudo_event id="50" operation="start" operation_key="grp-clone_start_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="57" operation="demoted" operation_key="grp-clone_demoted_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+</transition_graph>
diff --git a/cts/scheduler/exp/clone-recover-no-shuffle-9.exp b/cts/scheduler/exp/clone-recover-no-shuffle-9.exp
new file mode 100644
index 0000000..e249bc7
--- /dev/null
+++ b/cts/scheduler/exp/clone-recover-no-shuffle-9.exp
@@ -0,0 +1,364 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0">
+ <synapse id="0">
+ <action_set>
+ <rsc_op id="31" operation="monitor" operation_key="base:2_monitor_15000" on_node="base-bundle-2" on_node_uuid="base-bundle-2" router_node="node1">
+ <primitive id="base" long-id="base:2" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_interval="15000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="base-bundle-2" CRM_meta_on_node_uuid="base-bundle-2" CRM_meta_op_target_rc="8" CRM_meta_physical_host="node1" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Promoted" CRM_meta_timeout="15000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="23" operation="start" operation_key="base-bundle-2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="29" operation="start" operation_key="base:2_start_0" on_node="base-bundle-2" on_node_uuid="base-bundle-2" router_node="node1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="30" operation="promote" operation_key="base:2_promote_0" on_node="base-bundle-2" on_node_uuid="base-bundle-2" router_node="node1"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="1">
+ <action_set>
+ <rsc_op id="30" operation="promote" operation_key="base:2_promote_0" on_node="base-bundle-2" on_node_uuid="base-bundle-2" router_node="node1">
+ <primitive id="base" long-id="base:2" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_on_node="base-bundle-2" CRM_meta_on_node_uuid="base-bundle-2" CRM_meta_physical_host="node1" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="21" operation="start" operation_key="base-bundle-podman-2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="23" operation="start" operation_key="base-bundle-2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="29" operation="start" operation_key="base:2_start_0" on_node="base-bundle-2" on_node_uuid="base-bundle-2" router_node="node1"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="42" operation="promote" operation_key="base-bundle-clone_promote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="2">
+ <action_set>
+ <rsc_op id="29" operation="start" operation_key="base:2_start_0" on_node="base-bundle-2" on_node_uuid="base-bundle-2" router_node="node1">
+ <primitive id="base" long-id="base:2" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_on_node="base-bundle-2" CRM_meta_on_node_uuid="base-bundle-2" CRM_meta_physical_host="node1" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="21" operation="start" operation_key="base-bundle-podman-2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="23" operation="start" operation_key="base-bundle-2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="38" operation="start" operation_key="base-bundle-clone_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="3">
+ <action_set>
+ <rsc_op id="35" operation="monitor" operation_key="base_monitor_16000" internal_operation_key="base:1_monitor_16000" on_node="base-bundle-1" on_node_uuid="base-bundle-1" router_node="node2">
+ <primitive id="base" long-id="base:1" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_interval="16000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="base-bundle-1" CRM_meta_on_node_uuid="base-bundle-1" CRM_meta_physical_host="node2" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Unpromoted" CRM_meta_timeout="16000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="33" operation="demote" operation_key="base_demote_0" internal_operation_key="base:1_demote_0" on_node="base-bundle-1" on_node_uuid="base-bundle-1" router_node="node2"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="4">
+ <action_set>
+ <rsc_op id="33" operation="demote" operation_key="base_demote_0" internal_operation_key="base:1_demote_0" on_node="base-bundle-1" on_node_uuid="base-bundle-1" router_node="node2">
+ <primitive id="base" long-id="base:1" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_on_node="base-bundle-1" CRM_meta_on_node_uuid="base-bundle-1" CRM_meta_physical_host="node2" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="6" operation="cancel" operation_key="base_monitor_15000" internal_operation_key="base:1_monitor_15000" on_node="base-bundle-1" on_node_uuid="base-bundle-1" router_node="node2"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="44" operation="demote" operation_key="base-bundle-clone_demote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="5">
+ <action_set>
+ <rsc_op id="6" operation="cancel" operation_key="base_monitor_15000" internal_operation_key="base:1_monitor_15000" on_node="base-bundle-1" on_node_uuid="base-bundle-1" router_node="node2">
+ <primitive id="base" long-id="base:1" class="ocf" provider="pacemaker" type="Stateful"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="15000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="base-bundle-1" CRM_meta_on_node_uuid="base-bundle-1" CRM_meta_operation="monitor" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Promoted" CRM_meta_timeout="15000" />
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="6" priority="1000000">
+ <action_set>
+ <pseudo_event id="45" operation="demoted" operation_key="base-bundle-clone_demoted_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="33" operation="demote" operation_key="base_demote_0" internal_operation_key="base:1_demote_0" on_node="base-bundle-1" on_node_uuid="base-bundle-1" router_node="node2"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="44" operation="demote" operation_key="base-bundle-clone_demote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="7">
+ <action_set>
+ <pseudo_event id="44" operation="demote" operation_key="base-bundle-clone_demote_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="48" operation="demote" operation_key="base-bundle_demote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="8" priority="1000000">
+ <action_set>
+ <pseudo_event id="43" operation="promoted" operation_key="base-bundle-clone_promoted_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="30" operation="promote" operation_key="base:2_promote_0" on_node="base-bundle-2" on_node_uuid="base-bundle-2" router_node="node1"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="9">
+ <action_set>
+ <pseudo_event id="42" operation="promote" operation_key="base-bundle-clone_promote_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="39" operation="running" operation_key="base-bundle-clone_running_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="45" operation="demoted" operation_key="base-bundle-clone_demoted_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="46" operation="promote" operation_key="base-bundle_promote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="10" priority="1000000">
+ <action_set>
+ <pseudo_event id="39" operation="running" operation_key="base-bundle-clone_running_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="29" operation="start" operation_key="base:2_start_0" on_node="base-bundle-2" on_node_uuid="base-bundle-2" router_node="node1"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="38" operation="start" operation_key="base-bundle-clone_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="11">
+ <action_set>
+ <pseudo_event id="38" operation="start" operation_key="base-bundle-clone_start_0">
+ <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="25" operation="start" operation_key="base-bundle_start_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="45" operation="demoted" operation_key="base-bundle-clone_demoted_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="12">
+ <action_set>
+ <rsc_op id="22" operation="monitor" operation_key="base-bundle-podman-2_monitor_60000" on_node="node1" on_node_uuid="1">
+ <primitive id="base-bundle-podman-2" class="ocf" provider="heartbeat" type="podman"/>
+ <attributes CRM_meta_interval="60000" CRM_meta_name="monitor" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" allow_pull="true" force_kill="false" image="localhost/pcmktest" monitor_cmd="/bin/true" mount_points="/var/log/pacemaker/bundles/base-bundle-2" reuse="false" run_cmd="/usr/sbin/pacemaker-remoted" run_opts=" -e PCMK_stderr=1 -e PCMK_remote_port=3121 -v /etc/pacemaker/authkey:/etc/pacemaker/authkey -v /var/log/pacemaker/bundles/base-bundle-2:/var/log -p 3121:3121 "/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="21" operation="start" operation_key="base-bundle-podman-2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="13">
+ <action_set>
+ <rsc_op id="21" operation="start" operation_key="base-bundle-podman-2_start_0" on_node="node1" on_node_uuid="1">
+ <primitive id="base-bundle-podman-2" class="ocf" provider="heartbeat" type="podman"/>
+ <attributes CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" allow_pull="true" force_kill="false" image="localhost/pcmktest" monitor_cmd="/bin/true" mount_points="/var/log/pacemaker/bundles/base-bundle-2" reuse="false" run_cmd="/usr/sbin/pacemaker-remoted" run_opts=" -e PCMK_stderr=1 -e PCMK_remote_port=3121 -v /etc/pacemaker/authkey:/etc/pacemaker/authkey -v /var/log/pacemaker/bundles/base-bundle-2:/var/log -p 3121:3121 "/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="25" operation="start" operation_key="base-bundle_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="14">
+ <action_set>
+ <rsc_op id="24" operation="monitor" operation_key="base-bundle-2_monitor_30000" on_node="node1" on_node_uuid="1">
+ <primitive id="base-bundle-2" class="ocf" provider="pacemaker" type="remote"/>
+ <attributes CRM_meta_container="base-bundle-podman-2" CRM_meta_interval="30000" CRM_meta_name="monitor" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="30000" addr="node1" port="3121"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="23" operation="start" operation_key="base-bundle-2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="15">
+ <action_set>
+ <rsc_op id="23" operation="start" operation_key="base-bundle-2_start_0" on_node="node1" on_node_uuid="1">
+ <primitive id="base-bundle-2" class="ocf" provider="pacemaker" type="remote"/>
+ <attributes CRM_meta_container="base-bundle-podman-2" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" addr="node1" port="3121"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="8" operation="monitor" operation_key="base-bundle-2_monitor_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="9" operation="monitor" operation_key="base-bundle-2_monitor_0" on_node="node2" on_node_uuid="2"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="10" operation="monitor" operation_key="base-bundle-2_monitor_0" on_node="node3" on_node_uuid="3"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="21" operation="start" operation_key="base-bundle-podman-2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="16">
+ <action_set>
+ <rsc_op id="10" operation="monitor" operation_key="base-bundle-2_monitor_0" on_node="node3" on_node_uuid="3">
+ <primitive id="base-bundle-2" class="ocf" provider="pacemaker" type="remote"/>
+ <attributes CRM_meta_container="base-bundle-podman-2" CRM_meta_on_node="node3" CRM_meta_on_node_uuid="3" CRM_meta_op_target_rc="7" CRM_meta_timeout="30000" addr="node1" port="3121"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="21" operation="start" operation_key="base-bundle-podman-2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="17">
+ <action_set>
+ <rsc_op id="9" operation="monitor" operation_key="base-bundle-2_monitor_0" on_node="node2" on_node_uuid="2">
+ <primitive id="base-bundle-2" class="ocf" provider="pacemaker" type="remote"/>
+ <attributes CRM_meta_container="base-bundle-podman-2" CRM_meta_on_node="node2" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_timeout="30000" addr="node1" port="3121"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="21" operation="start" operation_key="base-bundle-podman-2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="18">
+ <action_set>
+ <rsc_op id="8" operation="monitor" operation_key="base-bundle-2_monitor_0" on_node="node1" on_node_uuid="1">
+ <primitive id="base-bundle-2" class="ocf" provider="pacemaker" type="remote"/>
+ <attributes CRM_meta_container="base-bundle-podman-2" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_op_target_rc="7" CRM_meta_timeout="30000" addr="node1" port="3121"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="21" operation="start" operation_key="base-bundle-podman-2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="19" priority="1000000">
+ <action_set>
+ <pseudo_event id="49" operation="demoted" operation_key="base-bundle_demoted_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="45" operation="demoted" operation_key="base-bundle-clone_demoted_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="48" operation="demote" operation_key="base-bundle_demote_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="20">
+ <action_set>
+ <pseudo_event id="48" operation="demote" operation_key="base-bundle_demote_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="21" priority="1000000">
+ <action_set>
+ <pseudo_event id="47" operation="promoted" operation_key="base-bundle_promoted_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="43" operation="promoted" operation_key="base-bundle-clone_promoted_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="22">
+ <action_set>
+ <pseudo_event id="46" operation="promote" operation_key="base-bundle_promote_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="26" operation="running" operation_key="base-bundle_running_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="49" operation="demoted" operation_key="base-bundle_demoted_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="23" priority="1000000">
+ <action_set>
+ <pseudo_event id="26" operation="running" operation_key="base-bundle_running_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="21" operation="start" operation_key="base-bundle-podman-2_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="39" operation="running" operation_key="base-bundle-clone_running_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="24">
+ <action_set>
+ <pseudo_event id="25" operation="start" operation_key="base-bundle_start_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="49" operation="demoted" operation_key="base-bundle_demoted_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+</transition_graph>
diff --git a/cts/scheduler/exp/coloc-with-inner-group-member.exp b/cts/scheduler/exp/coloc-with-inner-group-member.exp
new file mode 100644
index 0000000..bb8f779
--- /dev/null
+++ b/cts/scheduler/exp/coloc-with-inner-group-member.exp
@@ -0,0 +1,202 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="1" transition_id="0">
+ <synapse id="0">
+ <action_set>
+ <pseudo_event id="21" operation="stopped" operation_key="grp_stopped_0">
+ <attributes CRM_meta_timeout="90000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="10" operation="stop" operation_key="foo_stop_0" on_node="rhel8-4" on_node_uuid="4"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="13" operation="stop" operation_key="bar_stop_0" on_node="rhel8-4" on_node_uuid="4"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="16" operation="stop" operation_key="vip_stop_0" on_node="rhel8-3" on_node_uuid="3"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="20" operation="stop" operation_key="grp_stop_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="1">
+ <action_set>
+ <pseudo_event id="20" operation="stop" operation_key="grp_stop_0">
+ <attributes CRM_meta_timeout="90000" />
+ </pseudo_event>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="2">
+ <action_set>
+ <pseudo_event id="19" operation="running" operation_key="grp_running_0">
+ <attributes CRM_meta_timeout="90000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="11" operation="start" operation_key="foo_start_0" on_node="rhel8-3" on_node_uuid="3"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="14" operation="start" operation_key="bar_start_0" on_node="rhel8-3" on_node_uuid="3"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="17" operation="start" operation_key="vip_start_0" on_node="rhel8-3" on_node_uuid="3"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="18" operation="start" operation_key="grp_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="3">
+ <action_set>
+ <pseudo_event id="18" operation="start" operation_key="grp_start_0">
+ <attributes CRM_meta_timeout="90000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="21" operation="stopped" operation_key="grp_stopped_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="4">
+ <action_set>
+ <rsc_op id="12" operation="monitor" operation_key="foo_monitor_10000" on_node="rhel8-3" on_node_uuid="3">
+ <primitive id="foo" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_on_node="rhel8-3" CRM_meta_on_node_uuid="3" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="11" operation="start" operation_key="foo_start_0" on_node="rhel8-3" on_node_uuid="3"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="5">
+ <action_set>
+ <rsc_op id="11" operation="start" operation_key="foo_start_0" on_node="rhel8-3" on_node_uuid="3">
+ <primitive id="foo" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_name="start" CRM_meta_on_node="rhel8-3" CRM_meta_on_node_uuid="3" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="10" operation="stop" operation_key="foo_stop_0" on_node="rhel8-4" on_node_uuid="4"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="18" operation="start" operation_key="grp_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="6">
+ <action_set>
+ <rsc_op id="10" operation="stop" operation_key="foo_stop_0" on_node="rhel8-4" on_node_uuid="4">
+ <primitive id="foo" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_name="stop" CRM_meta_on_node="rhel8-4" CRM_meta_on_node_uuid="4" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="13" operation="stop" operation_key="bar_stop_0" on_node="rhel8-4" on_node_uuid="4"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="20" operation="stop" operation_key="grp_stop_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="7">
+ <action_set>
+ <rsc_op id="15" operation="monitor" operation_key="bar_monitor_10000" on_node="rhel8-3" on_node_uuid="3">
+ <primitive id="bar" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_on_node="rhel8-3" CRM_meta_on_node_uuid="3" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="14" operation="start" operation_key="bar_start_0" on_node="rhel8-3" on_node_uuid="3"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="8">
+ <action_set>
+ <rsc_op id="14" operation="start" operation_key="bar_start_0" on_node="rhel8-3" on_node_uuid="3">
+ <primitive id="bar" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_name="start" CRM_meta_on_node="rhel8-3" CRM_meta_on_node_uuid="3" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="11" operation="start" operation_key="foo_start_0" on_node="rhel8-3" on_node_uuid="3"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="13" operation="stop" operation_key="bar_stop_0" on_node="rhel8-4" on_node_uuid="4"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="18" operation="start" operation_key="grp_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="9">
+ <action_set>
+ <rsc_op id="13" operation="stop" operation_key="bar_stop_0" on_node="rhel8-4" on_node_uuid="4">
+ <primitive id="bar" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_name="stop" CRM_meta_on_node="rhel8-4" CRM_meta_on_node_uuid="4" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="16" operation="stop" operation_key="vip_stop_0" on_node="rhel8-3" on_node_uuid="3"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="20" operation="stop" operation_key="grp_stop_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="10">
+ <action_set>
+ <rsc_op id="17" operation="start" operation_key="vip_start_0" on_node="rhel8-3" on_node_uuid="3">
+ <primitive id="vip" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_name="start" CRM_meta_on_node="rhel8-3" CRM_meta_on_node_uuid="3" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="14" operation="start" operation_key="bar_start_0" on_node="rhel8-3" on_node_uuid="3"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="16" operation="stop" operation_key="vip_stop_0" on_node="rhel8-3" on_node_uuid="3"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="18" operation="start" operation_key="grp_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="11">
+ <action_set>
+ <rsc_op id="16" operation="stop" operation_key="vip_stop_0" on_node="rhel8-3" on_node_uuid="3">
+ <primitive id="vip" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_name="stop" CRM_meta_on_node="rhel8-3" CRM_meta_on_node_uuid="3" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="20" operation="stop" operation_key="grp_stop_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="12">
+ <action_set>
+ <rsc_op id="3" operation="monitor" operation_key="vip_monitor_10000" on_node="rhel8-3" on_node_uuid="3">
+ <primitive id="vip" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_on_node="rhel8-3" CRM_meta_on_node_uuid="3" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="17" operation="start" operation_key="vip_start_0" on_node="rhel8-3" on_node_uuid="3"/>
+ </trigger>
+ </inputs>
+ </synapse>
+</transition_graph>
diff --git a/cts/scheduler/exp/group-anticolocation-2.exp b/cts/scheduler/exp/group-anticolocation-2.exp
new file mode 100644
index 0000000..4e57e18
--- /dev/null
+++ b/cts/scheduler/exp/group-anticolocation-2.exp
@@ -0,0 +1,148 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0">
+ <synapse id="0">
+ <action_set>
+ <pseudo_event id="25" operation="stopped" operation_key="group2_stopped_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="4" operation="stop" operation_key="member2b_stop_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="17" operation="stop" operation_key="member2a_stop_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="24" operation="stop" operation_key="group2_stop_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="1">
+ <action_set>
+ <pseudo_event id="24" operation="stop" operation_key="group2_stop_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="2">
+ <action_set>
+ <pseudo_event id="23" operation="running" operation_key="group2_running_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="18" operation="start" operation_key="member2a_start_0" on_node="node2" on_node_uuid="2"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="20" operation="start" operation_key="member2b_start_0" on_node="node2" on_node_uuid="2"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="22" operation="start" operation_key="group2_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="3">
+ <action_set>
+ <pseudo_event id="22" operation="start" operation_key="group2_start_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="25" operation="stopped" operation_key="group2_stopped_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="4">
+ <action_set>
+ <rsc_op id="19" operation="monitor" operation_key="member2a_monitor_10000" on_node="node2" on_node_uuid="2">
+ <primitive id="member2a" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_on_node="node2" CRM_meta_on_node_uuid="2" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="18" operation="start" operation_key="member2a_start_0" on_node="node2" on_node_uuid="2"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="5">
+ <action_set>
+ <rsc_op id="18" operation="start" operation_key="member2a_start_0" on_node="node2" on_node_uuid="2">
+ <primitive id="member2a" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_name="start" CRM_meta_on_node="node2" CRM_meta_on_node_uuid="2" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="17" operation="stop" operation_key="member2a_stop_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="22" operation="start" operation_key="group2_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="6">
+ <action_set>
+ <rsc_op id="17" operation="stop" operation_key="member2a_stop_0" on_node="node1" on_node_uuid="1">
+ <primitive id="member2a" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_name="stop" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="4" operation="stop" operation_key="member2b_stop_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="24" operation="stop" operation_key="group2_stop_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="7">
+ <action_set>
+ <rsc_op id="21" operation="monitor" operation_key="member2b_monitor_10000" on_node="node2" on_node_uuid="2">
+ <primitive id="member2b" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_on_node="node2" CRM_meta_on_node_uuid="2" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="20" operation="start" operation_key="member2b_start_0" on_node="node2" on_node_uuid="2"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="8">
+ <action_set>
+ <rsc_op id="20" operation="start" operation_key="member2b_start_0" on_node="node2" on_node_uuid="2">
+ <primitive id="member2b" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_name="start" CRM_meta_on_node="node2" CRM_meta_on_node_uuid="2" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="4" operation="stop" operation_key="member2b_stop_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="18" operation="start" operation_key="member2a_start_0" on_node="node2" on_node_uuid="2"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="22" operation="start" operation_key="group2_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="9">
+ <action_set>
+ <rsc_op id="4" operation="stop" operation_key="member2b_stop_0" on_node="node1" on_node_uuid="1">
+ <primitive id="member2b" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_name="stop" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="24" operation="stop" operation_key="group2_stop_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+</transition_graph>
diff --git a/cts/scheduler/exp/group-anticolocation-3.exp b/cts/scheduler/exp/group-anticolocation-3.exp
new file mode 100644
index 0000000..066b3bd
--- /dev/null
+++ b/cts/scheduler/exp/group-anticolocation-3.exp
@@ -0,0 +1,38 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0">
+ <synapse id="0">
+ <action_set>
+ <pseudo_event id="22" operation="stopped" operation_key="group2_stopped_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="4" operation="stop" operation_key="member2b_stop_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="21" operation="stop" operation_key="group2_stop_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="1">
+ <action_set>
+ <pseudo_event id="21" operation="stop" operation_key="group2_stop_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="2">
+ <action_set>
+ <rsc_op id="4" operation="stop" operation_key="member2b_stop_0" on_node="node1" on_node_uuid="1">
+ <primitive id="member2b" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_name="stop" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="21" operation="stop" operation_key="group2_stop_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+</transition_graph>
diff --git a/cts/scheduler/exp/group-anticolocation-4.exp b/cts/scheduler/exp/group-anticolocation-4.exp
new file mode 100644
index 0000000..4e57e18
--- /dev/null
+++ b/cts/scheduler/exp/group-anticolocation-4.exp
@@ -0,0 +1,148 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0">
+ <synapse id="0">
+ <action_set>
+ <pseudo_event id="25" operation="stopped" operation_key="group2_stopped_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="4" operation="stop" operation_key="member2b_stop_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="17" operation="stop" operation_key="member2a_stop_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="24" operation="stop" operation_key="group2_stop_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="1">
+ <action_set>
+ <pseudo_event id="24" operation="stop" operation_key="group2_stop_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="2">
+ <action_set>
+ <pseudo_event id="23" operation="running" operation_key="group2_running_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="18" operation="start" operation_key="member2a_start_0" on_node="node2" on_node_uuid="2"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="20" operation="start" operation_key="member2b_start_0" on_node="node2" on_node_uuid="2"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="22" operation="start" operation_key="group2_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="3">
+ <action_set>
+ <pseudo_event id="22" operation="start" operation_key="group2_start_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="25" operation="stopped" operation_key="group2_stopped_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="4">
+ <action_set>
+ <rsc_op id="19" operation="monitor" operation_key="member2a_monitor_10000" on_node="node2" on_node_uuid="2">
+ <primitive id="member2a" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_on_node="node2" CRM_meta_on_node_uuid="2" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="18" operation="start" operation_key="member2a_start_0" on_node="node2" on_node_uuid="2"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="5">
+ <action_set>
+ <rsc_op id="18" operation="start" operation_key="member2a_start_0" on_node="node2" on_node_uuid="2">
+ <primitive id="member2a" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_name="start" CRM_meta_on_node="node2" CRM_meta_on_node_uuid="2" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="17" operation="stop" operation_key="member2a_stop_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="22" operation="start" operation_key="group2_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="6">
+ <action_set>
+ <rsc_op id="17" operation="stop" operation_key="member2a_stop_0" on_node="node1" on_node_uuid="1">
+ <primitive id="member2a" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_name="stop" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="4" operation="stop" operation_key="member2b_stop_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="24" operation="stop" operation_key="group2_stop_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="7">
+ <action_set>
+ <rsc_op id="21" operation="monitor" operation_key="member2b_monitor_10000" on_node="node2" on_node_uuid="2">
+ <primitive id="member2b" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_on_node="node2" CRM_meta_on_node_uuid="2" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="20" operation="start" operation_key="member2b_start_0" on_node="node2" on_node_uuid="2"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="8">
+ <action_set>
+ <rsc_op id="20" operation="start" operation_key="member2b_start_0" on_node="node2" on_node_uuid="2">
+ <primitive id="member2b" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_name="start" CRM_meta_on_node="node2" CRM_meta_on_node_uuid="2" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="4" operation="stop" operation_key="member2b_stop_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="18" operation="start" operation_key="member2a_start_0" on_node="node2" on_node_uuid="2"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="22" operation="start" operation_key="group2_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="9">
+ <action_set>
+ <rsc_op id="4" operation="stop" operation_key="member2b_stop_0" on_node="node1" on_node_uuid="1">
+ <primitive id="member2b" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_name="stop" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="24" operation="stop" operation_key="group2_stop_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+</transition_graph>
diff --git a/cts/scheduler/exp/group-anticolocation-5.exp b/cts/scheduler/exp/group-anticolocation-5.exp
new file mode 100644
index 0000000..2394b4e
--- /dev/null
+++ b/cts/scheduler/exp/group-anticolocation-5.exp
@@ -0,0 +1,148 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0">
+ <synapse id="0">
+ <action_set>
+ <pseudo_event id="25" operation="stopped" operation_key="group2_stopped_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="4" operation="stop" operation_key="member2b_stop_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="17" operation="stop" operation_key="member2a_stop_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="24" operation="stop" operation_key="group2_stop_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="1">
+ <action_set>
+ <pseudo_event id="24" operation="stop" operation_key="group2_stop_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="2">
+ <action_set>
+ <pseudo_event id="23" operation="running" operation_key="group2_running_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="18" operation="start" operation_key="member2a_start_0" on_node="node3" on_node_uuid="3"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="20" operation="start" operation_key="member2b_start_0" on_node="node3" on_node_uuid="3"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="22" operation="start" operation_key="group2_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="3">
+ <action_set>
+ <pseudo_event id="22" operation="start" operation_key="group2_start_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="25" operation="stopped" operation_key="group2_stopped_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="4">
+ <action_set>
+ <rsc_op id="19" operation="monitor" operation_key="member2a_monitor_10000" on_node="node3" on_node_uuid="3">
+ <primitive id="member2a" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_on_node="node3" CRM_meta_on_node_uuid="3" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="18" operation="start" operation_key="member2a_start_0" on_node="node3" on_node_uuid="3"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="5">
+ <action_set>
+ <rsc_op id="18" operation="start" operation_key="member2a_start_0" on_node="node3" on_node_uuid="3">
+ <primitive id="member2a" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_name="start" CRM_meta_on_node="node3" CRM_meta_on_node_uuid="3" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="17" operation="stop" operation_key="member2a_stop_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="22" operation="start" operation_key="group2_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="6">
+ <action_set>
+ <rsc_op id="17" operation="stop" operation_key="member2a_stop_0" on_node="node1" on_node_uuid="1">
+ <primitive id="member2a" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_name="stop" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="4" operation="stop" operation_key="member2b_stop_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="24" operation="stop" operation_key="group2_stop_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="7">
+ <action_set>
+ <rsc_op id="21" operation="monitor" operation_key="member2b_monitor_10000" on_node="node3" on_node_uuid="3">
+ <primitive id="member2b" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_on_node="node3" CRM_meta_on_node_uuid="3" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="20" operation="start" operation_key="member2b_start_0" on_node="node3" on_node_uuid="3"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="8">
+ <action_set>
+ <rsc_op id="20" operation="start" operation_key="member2b_start_0" on_node="node3" on_node_uuid="3">
+ <primitive id="member2b" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_name="start" CRM_meta_on_node="node3" CRM_meta_on_node_uuid="3" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="4" operation="stop" operation_key="member2b_stop_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="18" operation="start" operation_key="member2a_start_0" on_node="node3" on_node_uuid="3"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="22" operation="start" operation_key="group2_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="9">
+ <action_set>
+ <rsc_op id="4" operation="stop" operation_key="member2b_stop_0" on_node="node1" on_node_uuid="1">
+ <primitive id="member2b" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_name="stop" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="24" operation="stop" operation_key="group2_stop_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+</transition_graph>
diff --git a/cts/scheduler/exp/group-anticolocation.exp b/cts/scheduler/exp/group-anticolocation.exp
index 4e57e18..5a37559 100644
--- a/cts/scheduler/exp/group-anticolocation.exp
+++ b/cts/scheduler/exp/group-anticolocation.exp
@@ -1,25 +1,25 @@
<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0">
<synapse id="0">
<action_set>
- <pseudo_event id="25" operation="stopped" operation_key="group2_stopped_0">
+ <pseudo_event id="18" operation="stopped" operation_key="group1_stopped_0">
<attributes CRM_meta_timeout="20000" />
</pseudo_event>
</action_set>
<inputs>
<trigger>
- <rsc_op id="4" operation="stop" operation_key="member2b_stop_0" on_node="node1" on_node_uuid="1"/>
+ <rsc_op id="9" operation="stop" operation_key="member1a_stop_0" on_node="node2" on_node_uuid="2"/>
</trigger>
<trigger>
- <rsc_op id="17" operation="stop" operation_key="member2a_stop_0" on_node="node1" on_node_uuid="1"/>
+ <rsc_op id="12" operation="stop" operation_key="member1b_stop_0" on_node="node2" on_node_uuid="2"/>
</trigger>
<trigger>
- <pseudo_event id="24" operation="stop" operation_key="group2_stop_0"/>
+ <pseudo_event id="17" operation="stop" operation_key="group1_stop_0"/>
</trigger>
</inputs>
</synapse>
<synapse id="1">
<action_set>
- <pseudo_event id="24" operation="stop" operation_key="group2_stop_0">
+ <pseudo_event id="17" operation="stop" operation_key="group1_stop_0">
<attributes CRM_meta_timeout="20000" />
</pseudo_event>
</action_set>
@@ -27,66 +27,212 @@
</synapse>
<synapse id="2">
<action_set>
- <pseudo_event id="23" operation="running" operation_key="group2_running_0">
+ <pseudo_event id="16" operation="running" operation_key="group1_running_0">
<attributes CRM_meta_timeout="20000" />
</pseudo_event>
</action_set>
<inputs>
<trigger>
- <rsc_op id="18" operation="start" operation_key="member2a_start_0" on_node="node2" on_node_uuid="2"/>
+ <rsc_op id="10" operation="start" operation_key="member1a_start_0" on_node="node1" on_node_uuid="1"/>
</trigger>
<trigger>
- <rsc_op id="20" operation="start" operation_key="member2b_start_0" on_node="node2" on_node_uuid="2"/>
+ <rsc_op id="13" operation="start" operation_key="member1b_start_0" on_node="node1" on_node_uuid="1"/>
</trigger>
<trigger>
- <pseudo_event id="22" operation="start" operation_key="group2_start_0"/>
+ <pseudo_event id="15" operation="start" operation_key="group1_start_0"/>
</trigger>
</inputs>
</synapse>
<synapse id="3">
<action_set>
- <pseudo_event id="22" operation="start" operation_key="group2_start_0">
+ <pseudo_event id="15" operation="start" operation_key="group1_start_0">
<attributes CRM_meta_timeout="20000" />
</pseudo_event>
</action_set>
<inputs>
<trigger>
- <pseudo_event id="25" operation="stopped" operation_key="group2_stopped_0"/>
+ <pseudo_event id="18" operation="stopped" operation_key="group1_stopped_0"/>
</trigger>
</inputs>
</synapse>
<synapse id="4">
<action_set>
- <rsc_op id="19" operation="monitor" operation_key="member2a_monitor_10000" on_node="node2" on_node_uuid="2">
+ <rsc_op id="11" operation="monitor" operation_key="member1a_monitor_10000" on_node="node1" on_node_uuid="1">
+ <primitive id="member1a" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="10" operation="start" operation_key="member1a_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="5">
+ <action_set>
+ <rsc_op id="10" operation="start" operation_key="member1a_start_0" on_node="node1" on_node_uuid="1">
+ <primitive id="member1a" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_name="start" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="9" operation="stop" operation_key="member1a_stop_0" on_node="node2" on_node_uuid="2"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="15" operation="start" operation_key="group1_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="6">
+ <action_set>
+ <rsc_op id="9" operation="stop" operation_key="member1a_stop_0" on_node="node2" on_node_uuid="2">
+ <primitive id="member1a" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_name="stop" CRM_meta_on_node="node2" CRM_meta_on_node_uuid="2" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="12" operation="stop" operation_key="member1b_stop_0" on_node="node2" on_node_uuid="2"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="17" operation="stop" operation_key="group1_stop_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="7">
+ <action_set>
+ <rsc_op id="14" operation="monitor" operation_key="member1b_monitor_10000" on_node="node1" on_node_uuid="1">
+ <primitive id="member1b" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="13" operation="start" operation_key="member1b_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="8">
+ <action_set>
+ <rsc_op id="13" operation="start" operation_key="member1b_start_0" on_node="node1" on_node_uuid="1">
+ <primitive id="member1b" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_name="start" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="10" operation="start" operation_key="member1a_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="12" operation="stop" operation_key="member1b_stop_0" on_node="node2" on_node_uuid="2"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="15" operation="start" operation_key="group1_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="9">
+ <action_set>
+ <rsc_op id="12" operation="stop" operation_key="member1b_stop_0" on_node="node2" on_node_uuid="2">
+ <primitive id="member1b" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_name="stop" CRM_meta_on_node="node2" CRM_meta_on_node_uuid="2" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="17" operation="stop" operation_key="group1_stop_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="10">
+ <action_set>
+ <pseudo_event id="27" operation="stopped" operation_key="group2_stopped_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="4" operation="stop" operation_key="member2b_stop_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="19" operation="stop" operation_key="member2a_stop_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="26" operation="stop" operation_key="group2_stop_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="11">
+ <action_set>
+ <pseudo_event id="26" operation="stop" operation_key="group2_stop_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="12">
+ <action_set>
+ <pseudo_event id="25" operation="running" operation_key="group2_running_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="20" operation="start" operation_key="member2a_start_0" on_node="node2" on_node_uuid="2"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="22" operation="start" operation_key="member2b_start_0" on_node="node2" on_node_uuid="2"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="24" operation="start" operation_key="group2_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="13">
+ <action_set>
+ <pseudo_event id="24" operation="start" operation_key="group2_start_0">
+ <attributes CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="27" operation="stopped" operation_key="group2_stopped_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="14">
+ <action_set>
+ <rsc_op id="21" operation="monitor" operation_key="member2a_monitor_10000" on_node="node2" on_node_uuid="2">
<primitive id="member2a" class="ocf" provider="pacemaker" type="Dummy"/>
<attributes CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_on_node="node2" CRM_meta_on_node_uuid="2" CRM_meta_timeout="20000" />
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="18" operation="start" operation_key="member2a_start_0" on_node="node2" on_node_uuid="2"/>
+ <rsc_op id="20" operation="start" operation_key="member2a_start_0" on_node="node2" on_node_uuid="2"/>
</trigger>
</inputs>
</synapse>
- <synapse id="5">
+ <synapse id="15">
<action_set>
- <rsc_op id="18" operation="start" operation_key="member2a_start_0" on_node="node2" on_node_uuid="2">
+ <rsc_op id="20" operation="start" operation_key="member2a_start_0" on_node="node2" on_node_uuid="2">
<primitive id="member2a" class="ocf" provider="pacemaker" type="Dummy"/>
<attributes CRM_meta_name="start" CRM_meta_on_node="node2" CRM_meta_on_node_uuid="2" CRM_meta_timeout="20000" />
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="17" operation="stop" operation_key="member2a_stop_0" on_node="node1" on_node_uuid="1"/>
+ <rsc_op id="19" operation="stop" operation_key="member2a_stop_0" on_node="node1" on_node_uuid="1"/>
</trigger>
<trigger>
- <pseudo_event id="22" operation="start" operation_key="group2_start_0"/>
+ <pseudo_event id="24" operation="start" operation_key="group2_start_0"/>
</trigger>
</inputs>
</synapse>
- <synapse id="6">
+ <synapse id="16">
<action_set>
- <rsc_op id="17" operation="stop" operation_key="member2a_stop_0" on_node="node1" on_node_uuid="1">
+ <rsc_op id="19" operation="stop" operation_key="member2a_stop_0" on_node="node1" on_node_uuid="1">
<primitive id="member2a" class="ocf" provider="pacemaker" type="Dummy"/>
<attributes CRM_meta_name="stop" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" />
</rsc_op>
@@ -96,26 +242,26 @@
<rsc_op id="4" operation="stop" operation_key="member2b_stop_0" on_node="node1" on_node_uuid="1"/>
</trigger>
<trigger>
- <pseudo_event id="24" operation="stop" operation_key="group2_stop_0"/>
+ <pseudo_event id="26" operation="stop" operation_key="group2_stop_0"/>
</trigger>
</inputs>
</synapse>
- <synapse id="7">
+ <synapse id="17">
<action_set>
- <rsc_op id="21" operation="monitor" operation_key="member2b_monitor_10000" on_node="node2" on_node_uuid="2">
+ <rsc_op id="23" operation="monitor" operation_key="member2b_monitor_10000" on_node="node2" on_node_uuid="2">
<primitive id="member2b" class="ocf" provider="pacemaker" type="Dummy"/>
<attributes CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_on_node="node2" CRM_meta_on_node_uuid="2" CRM_meta_timeout="20000" />
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="20" operation="start" operation_key="member2b_start_0" on_node="node2" on_node_uuid="2"/>
+ <rsc_op id="22" operation="start" operation_key="member2b_start_0" on_node="node2" on_node_uuid="2"/>
</trigger>
</inputs>
</synapse>
- <synapse id="8">
+ <synapse id="18">
<action_set>
- <rsc_op id="20" operation="start" operation_key="member2b_start_0" on_node="node2" on_node_uuid="2">
+ <rsc_op id="22" operation="start" operation_key="member2b_start_0" on_node="node2" on_node_uuid="2">
<primitive id="member2b" class="ocf" provider="pacemaker" type="Dummy"/>
<attributes CRM_meta_name="start" CRM_meta_on_node="node2" CRM_meta_on_node_uuid="2" CRM_meta_timeout="20000" />
</rsc_op>
@@ -125,14 +271,14 @@
<rsc_op id="4" operation="stop" operation_key="member2b_stop_0" on_node="node1" on_node_uuid="1"/>
</trigger>
<trigger>
- <rsc_op id="18" operation="start" operation_key="member2a_start_0" on_node="node2" on_node_uuid="2"/>
+ <rsc_op id="20" operation="start" operation_key="member2a_start_0" on_node="node2" on_node_uuid="2"/>
</trigger>
<trigger>
- <pseudo_event id="22" operation="start" operation_key="group2_start_0"/>
+ <pseudo_event id="24" operation="start" operation_key="group2_start_0"/>
</trigger>
</inputs>
</synapse>
- <synapse id="9">
+ <synapse id="19">
<action_set>
<rsc_op id="4" operation="stop" operation_key="member2b_stop_0" on_node="node1" on_node_uuid="1">
<primitive id="member2b" class="ocf" provider="pacemaker" type="Dummy"/>
@@ -141,7 +287,7 @@
</action_set>
<inputs>
<trigger>
- <pseudo_event id="24" operation="stop" operation_key="group2_stop_0"/>
+ <pseudo_event id="26" operation="stop" operation_key="group2_stop_0"/>
</trigger>
</inputs>
</synapse>
diff --git a/cts/scheduler/exp/inc4.exp b/cts/scheduler/exp/inc4.exp
index add43f8..7b1d121 100644
--- a/cts/scheduler/exp/inc4.exp
+++ b/cts/scheduler/exp/inc4.exp
@@ -45,9 +45,15 @@
<rsc_op id="9" operation="monitor" operation_key="child_rsc1:3_monitor_0" on_node="node2" on_node_uuid="uuid2"/>
</trigger>
<trigger>
+ <rsc_op id="10" operation="monitor" operation_key="child_rsc1:4_monitor_0" on_node="node2" on_node_uuid="uuid2"/>
+ </trigger>
+ <trigger>
<rsc_op id="17" operation="stop" operation_key="child_rsc1:3_stop_0" on_node="node1" on_node_uuid="uuid1"/>
</trigger>
<trigger>
+ <rsc_op id="19" operation="stop" operation_key="child_rsc1:4_stop_0" on_node="node1" on_node_uuid="uuid1"/>
+ </trigger>
+ <trigger>
<pseudo_event id="22" operation="stop" operation_key="rsc1_stop_0"/>
</trigger>
</inputs>
diff --git a/cts/scheduler/exp/no-promote-on-unrunnable-guest.exp b/cts/scheduler/exp/no-promote-on-unrunnable-guest.exp
index 351aec1..5eeb3d4 100644
--- a/cts/scheduler/exp/no-promote-on-unrunnable-guest.exp
+++ b/cts/scheduler/exp/no-promote-on-unrunnable-guest.exp
@@ -1,34 +1,5 @@
<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0">
- <synapse id="0">
- <action_set>
- <rsc_op id="261" operation="notify" operation_key="ovndb_servers_pre_notify_stop_0" internal_operation_key="ovndb_servers:0_pre_notify_stop_0" on_node="ovn-dbs-bundle-0" on_node_uuid="ovn-dbs-bundle-0" router_node="controller-0">
- <primitive id="ovndb_servers" long-id="ovndb_servers:0" class="ocf" provider="ovn" type="ovndb-servers"/>
- <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="notify" CRM_meta_notify="true" CRM_meta_notify_active_resource="ovndb_servers:0 ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_active_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="controller-0 controller-1 controller-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource=" " CRM_meta_notify_key_operation="stop" CRM_meta_notify_key_type="pre" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="stop" CRM_meta_notify_promote_resource="ovndb_servers:1" CRM_meta_notify_promote_uname="ovn-dbs-bundle-1" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource="ovndb_servers:0 ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_slave_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource="ovndb_servers:0" CRM_meta_notify_stop_uname="ovn-dbs-bundle-0" CRM_meta_notify_type="pre" CRM_meta_notify_unpromoted_resource="ovndb_servers:0 ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_unpromoted_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_on_node="ovn-dbs-bundle-0" CRM_meta_on_node_uuid="ovn-dbs-bundle-0" CRM_meta_physical_host="controller-0" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" inactive_probe_interval="180000" listen_on_master_ip_only="no" manage_northd="yes" master_ip="172.17.1.247" nb_master_port="6641" sb_master_port="6642"/>
- </rsc_op>
- </action_set>
- <inputs>
- <trigger>
- <pseudo_event id="200" operation="notify" operation_key="ovn-dbs-bundle-master_pre_notify_stop_0"/>
- </trigger>
- </inputs>
- </synapse>
- <synapse id="1">
- <action_set>
- <rsc_op id="184" operation="stop" operation_key="ovndb_servers_stop_0" internal_operation_key="ovndb_servers:0_stop_0" on_node="ovn-dbs-bundle-0" on_node_uuid="ovn-dbs-bundle-0" router_node="controller-0">
- <primitive id="ovndb_servers" long-id="ovndb_servers:0" class="ocf" provider="ovn" type="ovndb-servers"/>
- <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="stop" CRM_meta_notify="true" CRM_meta_notify_active_resource="ovndb_servers:0 ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_active_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="controller-0 controller-1 controller-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource=" " CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_promote_resource="ovndb_servers:1" CRM_meta_notify_promote_uname="ovn-dbs-bundle-1" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource="ovndb_servers:0 ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_slave_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource="ovndb_servers:0" CRM_meta_notify_stop_uname="ovn-dbs-bundle-0" CRM_meta_notify_unpromoted_resource="ovndb_servers:0 ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_unpromoted_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_on_node="ovn-dbs-bundle-0" CRM_meta_on_node_uuid="ovn-dbs-bundle-0" CRM_meta_physical_host="controller-0" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="200000" inactive_probe_interval="180000" listen_on_master_ip_only="no" manage_northd="yes" master_ip="172.17.1.247" nb_master_port="6641" sb_master_port="6642"/>
- </rsc_op>
- </action_set>
- <inputs>
- <trigger>
- <pseudo_event id="181" operation="stop" operation_key="ovn-dbs-bundle_stop_0"/>
- </trigger>
- <trigger>
- <pseudo_event id="194" operation="stop" operation_key="ovn-dbs-bundle-master_stop_0"/>
- </trigger>
- </inputs>
- </synapse>
- <synapse id="2" priority="1000000">
+ <synapse id="0" priority="1000000">
<action_set>
<rsc_op id="267" operation="notify" operation_key="ovndb_servers_post_notify_promote_0" internal_operation_key="ovndb_servers:1_post_notify_promote_0" on_node="ovn-dbs-bundle-1" on_node_uuid="ovn-dbs-bundle-1" router_node="controller-1">
<primitive id="ovndb_servers" long-id="ovndb_servers:1" class="ocf" provider="ovn" type="ovndb-servers"/>
@@ -41,7 +12,7 @@
</trigger>
</inputs>
</synapse>
- <synapse id="3">
+ <synapse id="1">
<action_set>
<rsc_op id="266" operation="notify" operation_key="ovndb_servers_pre_notify_promote_0" internal_operation_key="ovndb_servers:1_pre_notify_promote_0" on_node="ovn-dbs-bundle-1" on_node_uuid="ovn-dbs-bundle-1" router_node="controller-1">
<primitive id="ovndb_servers" long-id="ovndb_servers:1" class="ocf" provider="ovn" type="ovndb-servers"/>
@@ -54,9 +25,9 @@
</trigger>
</inputs>
</synapse>
- <synapse id="4" priority="1000000">
+ <synapse id="2" priority="1000000">
<action_set>
- <rsc_op id="263" operation="notify" operation_key="ovndb_servers_post_notify_stop_0" internal_operation_key="ovndb_servers:1_post_notify_stop_0" on_node="ovn-dbs-bundle-1" on_node_uuid="ovn-dbs-bundle-1" router_node="controller-1">
+ <rsc_op id="262" operation="notify" operation_key="ovndb_servers_post_notify_stop_0" internal_operation_key="ovndb_servers:1_post_notify_stop_0" on_node="ovn-dbs-bundle-1" on_node_uuid="ovn-dbs-bundle-1" router_node="controller-1">
<primitive id="ovndb_servers" long-id="ovndb_servers:1" class="ocf" provider="ovn" type="ovndb-servers"/>
<attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="notify" CRM_meta_notify="true" CRM_meta_notify_active_resource="ovndb_servers:0 ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_active_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="controller-0 controller-1 controller-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource=" " CRM_meta_notify_key_operation="stopped" CRM_meta_notify_key_type="post" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="stop" CRM_meta_notify_promote_resource="ovndb_servers:1" CRM_meta_notify_promote_uname="ovn-dbs-bundle-1" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource="ovndb_servers:0 ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_slave_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource="ovndb_servers:0" CRM_meta_notify_stop_uname="ovn-dbs-bundle-0" CRM_meta_notify_type="post" CRM_meta_notify_unpromoted_resource="ovndb_servers:0 ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_unpromoted_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_on_node="ovn-dbs-bundle-1" CRM_meta_on_node_uuid="ovn-dbs-bundle-1" CRM_meta_physical_host="controller-1" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" inactive_probe_interval="180000" listen_on_master_ip_only="no" manage_northd="yes" master_ip="172.17.1.247" nb_master_port="6641" sb_master_port="6642"/>
</rsc_op>
@@ -67,9 +38,9 @@
</trigger>
</inputs>
</synapse>
- <synapse id="5">
+ <synapse id="3">
<action_set>
- <rsc_op id="262" operation="notify" operation_key="ovndb_servers_pre_notify_stop_0" internal_operation_key="ovndb_servers:1_pre_notify_stop_0" on_node="ovn-dbs-bundle-1" on_node_uuid="ovn-dbs-bundle-1" router_node="controller-1">
+ <rsc_op id="261" operation="notify" operation_key="ovndb_servers_pre_notify_stop_0" internal_operation_key="ovndb_servers:1_pre_notify_stop_0" on_node="ovn-dbs-bundle-1" on_node_uuid="ovn-dbs-bundle-1" router_node="controller-1">
<primitive id="ovndb_servers" long-id="ovndb_servers:1" class="ocf" provider="ovn" type="ovndb-servers"/>
<attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="notify" CRM_meta_notify="true" CRM_meta_notify_active_resource="ovndb_servers:0 ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_active_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="controller-0 controller-1 controller-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource=" " CRM_meta_notify_key_operation="stop" CRM_meta_notify_key_type="pre" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="stop" CRM_meta_notify_promote_resource="ovndb_servers:1" CRM_meta_notify_promote_uname="ovn-dbs-bundle-1" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource="ovndb_servers:0 ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_slave_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource="ovndb_servers:0" CRM_meta_notify_stop_uname="ovn-dbs-bundle-0" CRM_meta_notify_type="pre" CRM_meta_notify_unpromoted_resource="ovndb_servers:0 ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_unpromoted_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_on_node="ovn-dbs-bundle-1" CRM_meta_on_node_uuid="ovn-dbs-bundle-1" CRM_meta_physical_host="controller-1" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" inactive_probe_interval="180000" listen_on_master_ip_only="no" manage_northd="yes" master_ip="172.17.1.247" nb_master_port="6641" sb_master_port="6642"/>
</rsc_op>
@@ -80,16 +51,16 @@
</trigger>
</inputs>
</synapse>
- <synapse id="6">
+ <synapse id="4">
<action_set>
- <rsc_op id="189" operation="monitor" operation_key="ovndb_servers_monitor_10000" internal_operation_key="ovndb_servers:1_monitor_10000" on_node="ovn-dbs-bundle-1" on_node_uuid="ovn-dbs-bundle-1" router_node="controller-1">
+ <rsc_op id="186" operation="monitor" operation_key="ovndb_servers_monitor_10000" internal_operation_key="ovndb_servers:1_monitor_10000" on_node="ovn-dbs-bundle-1" on_node_uuid="ovn-dbs-bundle-1" router_node="controller-1">
<primitive id="ovndb_servers" long-id="ovndb_servers:1" class="ocf" provider="ovn" type="ovndb-servers"/>
<attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_interval="10000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="true" CRM_meta_on_node="ovn-dbs-bundle-1" CRM_meta_on_node_uuid="ovn-dbs-bundle-1" CRM_meta_op_target_rc="8" CRM_meta_physical_host="controller-1" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_role="Promoted" CRM_meta_timeout="60000" inactive_probe_interval="180000" listen_on_master_ip_only="no" manage_northd="yes" master_ip="172.17.1.247" nb_master_port="6641" sb_master_port="6642"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="188" operation="promote" operation_key="ovndb_servers_promote_0" internal_operation_key="ovndb_servers:1_promote_0" on_node="ovn-dbs-bundle-1" on_node_uuid="ovn-dbs-bundle-1" router_node="controller-1"/>
+ <rsc_op id="185" operation="promote" operation_key="ovndb_servers_promote_0" internal_operation_key="ovndb_servers:1_promote_0" on_node="ovn-dbs-bundle-1" on_node_uuid="ovn-dbs-bundle-1" router_node="controller-1"/>
</trigger>
<trigger>
<pseudo_event id="203" operation="notified" operation_key="ovn-dbs-bundle-master_confirmed-post_notify_stopped_0"/>
@@ -99,9 +70,9 @@
</trigger>
</inputs>
</synapse>
- <synapse id="7">
+ <synapse id="5">
<action_set>
- <rsc_op id="188" operation="promote" operation_key="ovndb_servers_promote_0" internal_operation_key="ovndb_servers:1_promote_0" on_node="ovn-dbs-bundle-1" on_node_uuid="ovn-dbs-bundle-1" router_node="controller-1">
+ <rsc_op id="185" operation="promote" operation_key="ovndb_servers_promote_0" internal_operation_key="ovndb_servers:1_promote_0" on_node="ovn-dbs-bundle-1" on_node_uuid="ovn-dbs-bundle-1" router_node="controller-1">
<primitive id="ovndb_servers" long-id="ovndb_servers:1" class="ocf" provider="ovn" type="ovndb-servers"/>
<attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="promote" CRM_meta_notify="true" CRM_meta_notify_active_resource="ovndb_servers:0 ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_active_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="controller-0 controller-1 controller-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource=" " CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_promote_resource="ovndb_servers:1" CRM_meta_notify_promote_uname="ovn-dbs-bundle-1" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource="ovndb_servers:0 ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_slave_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource="ovndb_servers:0" CRM_meta_notify_stop_uname="ovn-dbs-bundle-0" CRM_meta_notify_unpromoted_resource="ovndb_servers:0 ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_unpromoted_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_on_node="ovn-dbs-bundle-1" CRM_meta_on_node_uuid="ovn-dbs-bundle-1" CRM_meta_physical_host="controller-1" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="50000" inactive_probe_interval="180000" listen_on_master_ip_only="no" manage_northd="yes" master_ip="172.17.1.247" nb_master_port="6641" sb_master_port="6642"/>
</rsc_op>
@@ -115,7 +86,7 @@
</trigger>
</inputs>
</synapse>
- <synapse id="8">
+ <synapse id="6">
<action_set>
<rsc_op id="40" operation="cancel" operation_key="ovndb_servers_monitor_30000" internal_operation_key="ovndb_servers:1_monitor_30000" on_node="ovn-dbs-bundle-1" on_node_uuid="ovn-dbs-bundle-1" router_node="controller-1">
<primitive id="ovndb_servers" long-id="ovndb_servers:1" class="ocf" provider="ovn" type="ovndb-servers"/>
@@ -124,7 +95,7 @@
</action_set>
<inputs/>
</synapse>
- <synapse id="9" priority="1000000">
+ <synapse id="7" priority="1000000">
<action_set>
<rsc_op id="269" operation="notify" operation_key="ovndb_servers_post_notify_promote_0" internal_operation_key="ovndb_servers:2_post_notify_promote_0" on_node="ovn-dbs-bundle-2" on_node_uuid="ovn-dbs-bundle-2" router_node="controller-2">
<primitive id="ovndb_servers" long-id="ovndb_servers:2" class="ocf" provider="ovn" type="ovndb-servers"/>
@@ -137,7 +108,7 @@
</trigger>
</inputs>
</synapse>
- <synapse id="10">
+ <synapse id="8">
<action_set>
<rsc_op id="268" operation="notify" operation_key="ovndb_servers_pre_notify_promote_0" internal_operation_key="ovndb_servers:2_pre_notify_promote_0" on_node="ovn-dbs-bundle-2" on_node_uuid="ovn-dbs-bundle-2" router_node="controller-2">
<primitive id="ovndb_servers" long-id="ovndb_servers:2" class="ocf" provider="ovn" type="ovndb-servers"/>
@@ -150,9 +121,9 @@
</trigger>
</inputs>
</synapse>
- <synapse id="11" priority="1000000">
+ <synapse id="9" priority="1000000">
<action_set>
- <rsc_op id="265" operation="notify" operation_key="ovndb_servers_post_notify_stop_0" internal_operation_key="ovndb_servers:2_post_notify_stop_0" on_node="ovn-dbs-bundle-2" on_node_uuid="ovn-dbs-bundle-2" router_node="controller-2">
+ <rsc_op id="264" operation="notify" operation_key="ovndb_servers_post_notify_stop_0" internal_operation_key="ovndb_servers:2_post_notify_stop_0" on_node="ovn-dbs-bundle-2" on_node_uuid="ovn-dbs-bundle-2" router_node="controller-2">
<primitive id="ovndb_servers" long-id="ovndb_servers:2" class="ocf" provider="ovn" type="ovndb-servers"/>
<attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="notify" CRM_meta_notify="true" CRM_meta_notify_active_resource="ovndb_servers:0 ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_active_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="controller-0 controller-1 controller-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource=" " CRM_meta_notify_key_operation="stopped" CRM_meta_notify_key_type="post" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="stop" CRM_meta_notify_promote_resource="ovndb_servers:1" CRM_meta_notify_promote_uname="ovn-dbs-bundle-1" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource="ovndb_servers:0 ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_slave_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource="ovndb_servers:0" CRM_meta_notify_stop_uname="ovn-dbs-bundle-0" CRM_meta_notify_type="post" CRM_meta_notify_unpromoted_resource="ovndb_servers:0 ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_unpromoted_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_on_node="ovn-dbs-bundle-2" CRM_meta_on_node_uuid="ovn-dbs-bundle-2" CRM_meta_physical_host="controller-2" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" inactive_probe_interval="180000" listen_on_master_ip_only="no" manage_northd="yes" master_ip="172.17.1.247" nb_master_port="6641" sb_master_port="6642"/>
</rsc_op>
@@ -163,9 +134,9 @@
</trigger>
</inputs>
</synapse>
- <synapse id="12">
+ <synapse id="10">
<action_set>
- <rsc_op id="264" operation="notify" operation_key="ovndb_servers_pre_notify_stop_0" internal_operation_key="ovndb_servers:2_pre_notify_stop_0" on_node="ovn-dbs-bundle-2" on_node_uuid="ovn-dbs-bundle-2" router_node="controller-2">
+ <rsc_op id="263" operation="notify" operation_key="ovndb_servers_pre_notify_stop_0" internal_operation_key="ovndb_servers:2_pre_notify_stop_0" on_node="ovn-dbs-bundle-2" on_node_uuid="ovn-dbs-bundle-2" router_node="controller-2">
<primitive id="ovndb_servers" long-id="ovndb_servers:2" class="ocf" provider="ovn" type="ovndb-servers"/>
<attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="notify" CRM_meta_notify="true" CRM_meta_notify_active_resource="ovndb_servers:0 ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_active_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="controller-0 controller-1 controller-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource=" " CRM_meta_notify_key_operation="stop" CRM_meta_notify_key_type="pre" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="stop" CRM_meta_notify_promote_resource="ovndb_servers:1" CRM_meta_notify_promote_uname="ovn-dbs-bundle-1" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource="ovndb_servers:0 ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_slave_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource="ovndb_servers:0" CRM_meta_notify_stop_uname="ovn-dbs-bundle-0" CRM_meta_notify_type="pre" CRM_meta_notify_unpromoted_resource="ovndb_servers:0 ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_unpromoted_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_on_node="ovn-dbs-bundle-2" CRM_meta_on_node_uuid="ovn-dbs-bundle-2" CRM_meta_physical_host="controller-2" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" inactive_probe_interval="180000" listen_on_master_ip_only="no" manage_northd="yes" master_ip="172.17.1.247" nb_master_port="6641" sb_master_port="6642"/>
</rsc_op>
@@ -176,6 +147,35 @@
</trigger>
</inputs>
</synapse>
+ <synapse id="11">
+ <action_set>
+ <rsc_op id="265" operation="notify" operation_key="ovndb_servers_pre_notify_stop_0" internal_operation_key="ovndb_servers:0_pre_notify_stop_0" on_node="ovn-dbs-bundle-0" on_node_uuid="ovn-dbs-bundle-0" router_node="controller-0">
+ <primitive id="ovndb_servers" long-id="ovndb_servers:0" class="ocf" provider="ovn" type="ovndb-servers"/>
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="notify" CRM_meta_notify="true" CRM_meta_notify_active_resource="ovndb_servers:0 ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_active_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="controller-0 controller-1 controller-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource=" " CRM_meta_notify_key_operation="stop" CRM_meta_notify_key_type="pre" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="stop" CRM_meta_notify_promote_resource="ovndb_servers:1" CRM_meta_notify_promote_uname="ovn-dbs-bundle-1" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource="ovndb_servers:0 ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_slave_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource="ovndb_servers:0" CRM_meta_notify_stop_uname="ovn-dbs-bundle-0" CRM_meta_notify_type="pre" CRM_meta_notify_unpromoted_resource="ovndb_servers:0 ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_unpromoted_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_on_node="ovn-dbs-bundle-0" CRM_meta_on_node_uuid="ovn-dbs-bundle-0" CRM_meta_physical_host="controller-0" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" inactive_probe_interval="180000" listen_on_master_ip_only="no" manage_northd="yes" master_ip="172.17.1.247" nb_master_port="6641" sb_master_port="6642"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="200" operation="notify" operation_key="ovn-dbs-bundle-master_pre_notify_stop_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="12">
+ <action_set>
+ <rsc_op id="190" operation="stop" operation_key="ovndb_servers_stop_0" internal_operation_key="ovndb_servers:0_stop_0" on_node="ovn-dbs-bundle-0" on_node_uuid="ovn-dbs-bundle-0" router_node="controller-0">
+ <primitive id="ovndb_servers" long-id="ovndb_servers:0" class="ocf" provider="ovn" type="ovndb-servers"/>
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_container_attribute_target="host" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="stop" CRM_meta_notify="true" CRM_meta_notify_active_resource="ovndb_servers:0 ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_active_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_all_hosts="controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2 controller-0 controller-1 controller-2" CRM_meta_notify_all_uname="controller-0 controller-1 controller-2 galera-bundle-0 galera-bundle-1 galera-bundle-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2 rabbitmq-bundle-0 rabbitmq-bundle-1 rabbitmq-bundle-2 redis-bundle-0 redis-bundle-1 redis-bundle-2" CRM_meta_notify_available_uname="controller-0 controller-1 controller-2 ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource=" " CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_promote_resource="ovndb_servers:1" CRM_meta_notify_promote_uname="ovn-dbs-bundle-1" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource="ovndb_servers:0 ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_slave_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource="ovndb_servers:0" CRM_meta_notify_stop_uname="ovn-dbs-bundle-0" CRM_meta_notify_unpromoted_resource="ovndb_servers:0 ovndb_servers:1 ovndb_servers:2" CRM_meta_notify_unpromoted_uname="ovn-dbs-bundle-0 ovn-dbs-bundle-1 ovn-dbs-bundle-2" CRM_meta_on_node="ovn-dbs-bundle-0" CRM_meta_on_node_uuid="ovn-dbs-bundle-0" CRM_meta_physical_host="controller-0" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="200000" inactive_probe_interval="180000" listen_on_master_ip_only="no" manage_northd="yes" master_ip="172.17.1.247" nb_master_port="6641" sb_master_port="6642"/>
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="181" operation="stop" operation_key="ovn-dbs-bundle_stop_0"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="194" operation="stop" operation_key="ovn-dbs-bundle-master_stop_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
<synapse id="13" priority="1000000">
<action_set>
<pseudo_event id="209" operation="notified" operation_key="ovn-dbs-bundle-master_confirmed-post_notify_promoted_0">
@@ -250,7 +250,7 @@
</action_set>
<inputs>
<trigger>
- <rsc_op id="188" operation="promote" operation_key="ovndb_servers_promote_0" internal_operation_key="ovndb_servers:1_promote_0" on_node="ovn-dbs-bundle-1" on_node_uuid="ovn-dbs-bundle-1" router_node="controller-1"/>
+ <rsc_op id="185" operation="promote" operation_key="ovndb_servers_promote_0" internal_operation_key="ovndb_servers:1_promote_0" on_node="ovn-dbs-bundle-1" on_node_uuid="ovn-dbs-bundle-1" router_node="controller-1"/>
</trigger>
</inputs>
</synapse>
@@ -286,10 +286,10 @@
<pseudo_event id="202" operation="notify" operation_key="ovn-dbs-bundle-master_post_notify_stopped_0"/>
</trigger>
<trigger>
- <rsc_op id="263" operation="notify" operation_key="ovndb_servers_post_notify_stop_0" internal_operation_key="ovndb_servers:1_post_notify_stop_0" on_node="ovn-dbs-bundle-1" on_node_uuid="ovn-dbs-bundle-1" router_node="controller-1"/>
+ <rsc_op id="262" operation="notify" operation_key="ovndb_servers_post_notify_stop_0" internal_operation_key="ovndb_servers:1_post_notify_stop_0" on_node="ovn-dbs-bundle-1" on_node_uuid="ovn-dbs-bundle-1" router_node="controller-1"/>
</trigger>
<trigger>
- <rsc_op id="265" operation="notify" operation_key="ovndb_servers_post_notify_stop_0" internal_operation_key="ovndb_servers:2_post_notify_stop_0" on_node="ovn-dbs-bundle-2" on_node_uuid="ovn-dbs-bundle-2" router_node="controller-2"/>
+ <rsc_op id="264" operation="notify" operation_key="ovndb_servers_post_notify_stop_0" internal_operation_key="ovndb_servers:2_post_notify_stop_0" on_node="ovn-dbs-bundle-2" on_node_uuid="ovn-dbs-bundle-2" router_node="controller-2"/>
</trigger>
</inputs>
</synapse>
@@ -319,13 +319,13 @@
<pseudo_event id="200" operation="notify" operation_key="ovn-dbs-bundle-master_pre_notify_stop_0"/>
</trigger>
<trigger>
- <rsc_op id="261" operation="notify" operation_key="ovndb_servers_pre_notify_stop_0" internal_operation_key="ovndb_servers:0_pre_notify_stop_0" on_node="ovn-dbs-bundle-0" on_node_uuid="ovn-dbs-bundle-0" router_node="controller-0"/>
+ <rsc_op id="261" operation="notify" operation_key="ovndb_servers_pre_notify_stop_0" internal_operation_key="ovndb_servers:1_pre_notify_stop_0" on_node="ovn-dbs-bundle-1" on_node_uuid="ovn-dbs-bundle-1" router_node="controller-1"/>
</trigger>
<trigger>
- <rsc_op id="262" operation="notify" operation_key="ovndb_servers_pre_notify_stop_0" internal_operation_key="ovndb_servers:1_pre_notify_stop_0" on_node="ovn-dbs-bundle-1" on_node_uuid="ovn-dbs-bundle-1" router_node="controller-1"/>
+ <rsc_op id="263" operation="notify" operation_key="ovndb_servers_pre_notify_stop_0" internal_operation_key="ovndb_servers:2_pre_notify_stop_0" on_node="ovn-dbs-bundle-2" on_node_uuid="ovn-dbs-bundle-2" router_node="controller-2"/>
</trigger>
<trigger>
- <rsc_op id="264" operation="notify" operation_key="ovndb_servers_pre_notify_stop_0" internal_operation_key="ovndb_servers:2_pre_notify_stop_0" on_node="ovn-dbs-bundle-2" on_node_uuid="ovn-dbs-bundle-2" router_node="controller-2"/>
+ <rsc_op id="265" operation="notify" operation_key="ovndb_servers_pre_notify_stop_0" internal_operation_key="ovndb_servers:0_pre_notify_stop_0" on_node="ovn-dbs-bundle-0" on_node_uuid="ovn-dbs-bundle-0" router_node="controller-0"/>
</trigger>
</inputs>
</synapse>
@@ -396,7 +396,7 @@
</action_set>
<inputs>
<trigger>
- <rsc_op id="184" operation="stop" operation_key="ovndb_servers_stop_0" internal_operation_key="ovndb_servers:0_stop_0" on_node="ovn-dbs-bundle-0" on_node_uuid="ovn-dbs-bundle-0" router_node="controller-0"/>
+ <rsc_op id="190" operation="stop" operation_key="ovndb_servers_stop_0" internal_operation_key="ovndb_servers:0_stop_0" on_node="ovn-dbs-bundle-0" on_node_uuid="ovn-dbs-bundle-0" router_node="controller-0"/>
</trigger>
<trigger>
<pseudo_event id="194" operation="stop" operation_key="ovn-dbs-bundle-master_stop_0"/>
@@ -473,7 +473,7 @@
</action_set>
<inputs>
<trigger>
- <rsc_op id="184" operation="stop" operation_key="ovndb_servers_stop_0" internal_operation_key="ovndb_servers:0_stop_0" on_node="ovn-dbs-bundle-0" on_node_uuid="ovn-dbs-bundle-0" router_node="controller-0"/>
+ <rsc_op id="190" operation="stop" operation_key="ovndb_servers_stop_0" internal_operation_key="ovndb_servers:0_stop_0" on_node="ovn-dbs-bundle-0" on_node_uuid="ovn-dbs-bundle-0" router_node="controller-0"/>
</trigger>
</inputs>
</synapse>
diff --git a/cts/scheduler/exp/node-pending-timeout.exp b/cts/scheduler/exp/node-pending-timeout.exp
new file mode 100644
index 0000000..e94812f
--- /dev/null
+++ b/cts/scheduler/exp/node-pending-timeout.exp
@@ -0,0 +1,38 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0">
+ <synapse id="0">
+ <action_set>
+ <rsc_op id="3" operation="start" operation_key="st-sbd_start_0" on_node="node-1" on_node_uuid="1">
+ <primitive id="st-sbd" class="stonith" type="external/sbd"/>
+ <attributes CRM_meta_on_node="node-1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <crm_event id="1" operation="stonith" operation_key="stonith-node-2-reboot" on_node="node-2" on_node_uuid="2"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="2" operation="monitor" operation_key="st-sbd_monitor_0" on_node="node-1" on_node_uuid="1"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="1">
+ <action_set>
+ <rsc_op id="2" operation="monitor" operation_key="st-sbd_monitor_0" on_node="node-1" on_node_uuid="1">
+ <primitive id="st-sbd" class="stonith" type="external/sbd"/>
+ <attributes CRM_meta_on_node="node-1" CRM_meta_on_node_uuid="1" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="2">
+ <action_set>
+ <crm_event id="1" operation="stonith" operation_key="stonith-node-2-reboot" on_node="node-2" on_node_uuid="2">
+ <attributes CRM_meta_on_node="node-2" CRM_meta_on_node_uuid="2" CRM_meta_stonith_action="reboot" />
+ <downed>
+ <node id="2"/>
+ </downed>
+ </crm_event>
+ </action_set>
+ <inputs/>
+ </synapse>
+</transition_graph>
diff --git a/cts/scheduler/exp/pending-node-no-uname.exp b/cts/scheduler/exp/pending-node-no-uname.exp
new file mode 100644
index 0000000..2c45756
--- /dev/null
+++ b/cts/scheduler/exp/pending-node-no-uname.exp
@@ -0,0 +1,11 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0">
+ <synapse id="0">
+ <action_set>
+ <rsc_op id="1" operation="monitor" operation_key="st-sbd_monitor_0" on_node="node-1" on_node_uuid="1">
+ <primitive id="st-sbd" class="stonith" type="external/sbd"/>
+ <attributes CRM_meta_on_node="node-1" CRM_meta_on_node_uuid="1" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+</transition_graph>
diff --git a/cts/scheduler/exp/promoted-failed-demote-2.exp b/cts/scheduler/exp/promoted-failed-demote-2.exp
index 02b9250..81ed8df 100644
--- a/cts/scheduler/exp/promoted-failed-demote-2.exp
+++ b/cts/scheduler/exp/promoted-failed-demote-2.exp
@@ -30,7 +30,7 @@
<action_set>
<rsc_op id="3" operation="stop" operation_key="stateful-1:0_stop_0" on_node="dl380g5b" on_node_uuid="888e539a-c6ef-496d-b79a-77cbf0f9e5e4">
<primitive id="stateful-1:0" class="ocf" provider="heartbeat" type="Stateful"/>
- <attributes CRM_meta_clone="0" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="true" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="stop" CRM_meta_notify="false" CRM_meta_on_fail="stop" CRM_meta_on_node="dl380g5b" CRM_meta_on_node_uuid="888e539a-c6ef-496d-b79a-77cbf0f9e5e4" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="60000" />
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="true" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="stop" CRM_meta_notify="false" CRM_meta_on_node="dl380g5b" CRM_meta_on_node_uuid="888e539a-c6ef-496d-b79a-77cbf0f9e5e4" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="60000" />
</rsc_op>
</action_set>
<inputs>
diff --git a/cts/scheduler/exp/promoted-failed-demote.exp b/cts/scheduler/exp/promoted-failed-demote.exp
index e4fc706..69e6b39 100644
--- a/cts/scheduler/exp/promoted-failed-demote.exp
+++ b/cts/scheduler/exp/promoted-failed-demote.exp
@@ -43,7 +43,7 @@
<action_set>
<rsc_op id="3" operation="stop" operation_key="stateful-1:0_stop_0" on_node="dl380g5b" on_node_uuid="888e539a-c6ef-496d-b79a-77cbf0f9e5e4">
<primitive id="stateful-1:0" class="ocf" provider="heartbeat" type="Stateful"/>
- <attributes CRM_meta_clone="0" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="true" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="stop" CRM_meta_notify="true" CRM_meta_notify_active_resource="stateful-1:0 stateful-1:1 stateful-2:1" CRM_meta_notify_active_uname="dl380g5b dl380g5a dl380g5a" CRM_meta_notify_all_uname="dl380g5a dl380g5b" CRM_meta_notify_available_uname="dl380g5a dl380g5b" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="stateful-2:0" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_promote_resource="stateful-1:1 stateful-2:1" CRM_meta_notify_promote_uname="dl380g5a dl380g5a" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource="stateful-1:0 stateful-1:1 stateful-2:1" CRM_meta_notify_slave_uname="dl380g5b dl380g5a dl380g5a" CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource="stateful-1:0" CRM_meta_notify_stop_uname="dl380g5b" CRM_meta_notify_unpromoted_resource="stateful-1:0 stateful-1:1 stateful-2:1" CRM_meta_notify_unpromoted_uname="dl380g5b dl380g5a dl380g5a" CRM_meta_on_fail="stop" CRM_meta_on_node="dl380g5b" CRM_meta_on_node_uuid="888e539a-c6ef-496d-b79a-77cbf0f9e5e4" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="60000" />
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="true" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="stop" CRM_meta_notify="true" CRM_meta_notify_active_resource="stateful-1:0 stateful-1:1 stateful-2:1" CRM_meta_notify_active_uname="dl380g5b dl380g5a dl380g5a" CRM_meta_notify_all_uname="dl380g5a dl380g5b" CRM_meta_notify_available_uname="dl380g5a dl380g5b" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="stateful-2:0" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_promote_resource="stateful-1:1 stateful-2:1" CRM_meta_notify_promote_uname="dl380g5a dl380g5a" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource="stateful-1:0 stateful-1:1 stateful-2:1" CRM_meta_notify_slave_uname="dl380g5b dl380g5a dl380g5a" CRM_meta_notify_start_resource=" " CRM_meta_notify_start_uname=" " CRM_meta_notify_stop_resource="stateful-1:0" CRM_meta_notify_stop_uname="dl380g5b" CRM_meta_notify_unpromoted_resource="stateful-1:0 stateful-1:1 stateful-2:1" CRM_meta_notify_unpromoted_uname="dl380g5b dl380g5a dl380g5a" CRM_meta_on_node="dl380g5b" CRM_meta_on_node_uuid="888e539a-c6ef-496d-b79a-77cbf0f9e5e4" CRM_meta_promoted_max="1" CRM_meta_promoted_node_max="1" CRM_meta_timeout="60000" />
</rsc_op>
</action_set>
<inputs>
diff --git a/cts/scheduler/exp/promoted-ordering.exp b/cts/scheduler/exp/promoted-ordering.exp
index 1df608d..430fbe6 100644
--- a/cts/scheduler/exp/promoted-ordering.exp
+++ b/cts/scheduler/exp/promoted-ordering.exp
@@ -89,34 +89,34 @@
</synapse>
<synapse id="8">
<action_set>
- <rsc_op id="23" operation="monitor" operation_key="intip_1_master_monitor_30000" on_node="webcluster01" on_node_uuid="49e81295-8e2f-4aeb-98f3-a14de6f62298">
- <primitive id="intip_1_master" class="ocf" provider="heartbeat" type="IPaddr2"/>
+ <rsc_op id="23" operation="monitor" operation_key="intip_1_active_monitor_30000" on_node="webcluster01" on_node_uuid="49e81295-8e2f-4aeb-98f3-a14de6f62298">
+ <primitive id="intip_1_active" class="ocf" provider="heartbeat" type="IPaddr2"/>
<attributes CRM_meta_interval="30000" CRM_meta_name="monitor" CRM_meta_on_node="webcluster01" CRM_meta_on_node_uuid="49e81295-8e2f-4aeb-98f3-a14de6f62298" CRM_meta_timeout="30000" ip="192.168.100.201" netmask="24" nic="eth1" target_role="started"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="22" operation="start" operation_key="intip_1_master_start_0" on_node="webcluster01" on_node_uuid="49e81295-8e2f-4aeb-98f3-a14de6f62298"/>
+ <rsc_op id="22" operation="start" operation_key="intip_1_active_start_0" on_node="webcluster01" on_node_uuid="49e81295-8e2f-4aeb-98f3-a14de6f62298"/>
</trigger>
</inputs>
</synapse>
<synapse id="9">
<action_set>
- <rsc_op id="22" operation="start" operation_key="intip_1_master_start_0" on_node="webcluster01" on_node_uuid="49e81295-8e2f-4aeb-98f3-a14de6f62298">
- <primitive id="intip_1_master" class="ocf" provider="heartbeat" type="IPaddr2"/>
+ <rsc_op id="22" operation="start" operation_key="intip_1_active_start_0" on_node="webcluster01" on_node_uuid="49e81295-8e2f-4aeb-98f3-a14de6f62298">
+ <primitive id="intip_1_active" class="ocf" provider="heartbeat" type="IPaddr2"/>
<attributes CRM_meta_on_node="webcluster01" CRM_meta_on_node_uuid="49e81295-8e2f-4aeb-98f3-a14de6f62298" CRM_meta_timeout="20000" ip="192.168.100.201" netmask="24" nic="eth1" target_role="started"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="5" operation="monitor" operation_key="intip_1_master_monitor_0" on_node="webcluster01" on_node_uuid="49e81295-8e2f-4aeb-98f3-a14de6f62298"/>
+ <rsc_op id="5" operation="monitor" operation_key="intip_1_active_monitor_0" on_node="webcluster01" on_node_uuid="49e81295-8e2f-4aeb-98f3-a14de6f62298"/>
</trigger>
</inputs>
</synapse>
<synapse id="10">
<action_set>
- <rsc_op id="5" operation="monitor" operation_key="intip_1_master_monitor_0" on_node="webcluster01" on_node_uuid="49e81295-8e2f-4aeb-98f3-a14de6f62298">
- <primitive id="intip_1_master" class="ocf" provider="heartbeat" type="IPaddr2"/>
+ <rsc_op id="5" operation="monitor" operation_key="intip_1_active_monitor_0" on_node="webcluster01" on_node_uuid="49e81295-8e2f-4aeb-98f3-a14de6f62298">
+ <primitive id="intip_1_active" class="ocf" provider="heartbeat" type="IPaddr2"/>
<attributes CRM_meta_on_node="webcluster01" CRM_meta_on_node_uuid="49e81295-8e2f-4aeb-98f3-a14de6f62298" CRM_meta_op_target_rc="7" CRM_meta_timeout="30000" ip="192.168.100.201" netmask="24" nic="eth1" target_role="started"/>
</rsc_op>
</action_set>
@@ -124,34 +124,34 @@
</synapse>
<synapse id="11">
<action_set>
- <rsc_op id="25" operation="monitor" operation_key="intip_2_slave_monitor_30000" on_node="webcluster01" on_node_uuid="49e81295-8e2f-4aeb-98f3-a14de6f62298">
- <primitive id="intip_2_slave" class="ocf" provider="heartbeat" type="IPaddr2"/>
+ <rsc_op id="25" operation="monitor" operation_key="intip_2_passive_monitor_30000" on_node="webcluster01" on_node_uuid="49e81295-8e2f-4aeb-98f3-a14de6f62298">
+ <primitive id="intip_2_passive" class="ocf" provider="heartbeat" type="IPaddr2"/>
<attributes CRM_meta_interval="30000" CRM_meta_name="monitor" CRM_meta_on_node="webcluster01" CRM_meta_on_node_uuid="49e81295-8e2f-4aeb-98f3-a14de6f62298" CRM_meta_timeout="30000" ip="192.168.100.202" netmask="24" nic="eth1" target_role="started"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="24" operation="start" operation_key="intip_2_slave_start_0" on_node="webcluster01" on_node_uuid="49e81295-8e2f-4aeb-98f3-a14de6f62298"/>
+ <rsc_op id="24" operation="start" operation_key="intip_2_passive_start_0" on_node="webcluster01" on_node_uuid="49e81295-8e2f-4aeb-98f3-a14de6f62298"/>
</trigger>
</inputs>
</synapse>
<synapse id="12">
<action_set>
- <rsc_op id="24" operation="start" operation_key="intip_2_slave_start_0" on_node="webcluster01" on_node_uuid="49e81295-8e2f-4aeb-98f3-a14de6f62298">
- <primitive id="intip_2_slave" class="ocf" provider="heartbeat" type="IPaddr2"/>
+ <rsc_op id="24" operation="start" operation_key="intip_2_passive_start_0" on_node="webcluster01" on_node_uuid="49e81295-8e2f-4aeb-98f3-a14de6f62298">
+ <primitive id="intip_2_passive" class="ocf" provider="heartbeat" type="IPaddr2"/>
<attributes CRM_meta_on_node="webcluster01" CRM_meta_on_node_uuid="49e81295-8e2f-4aeb-98f3-a14de6f62298" CRM_meta_timeout="20000" ip="192.168.100.202" netmask="24" nic="eth1" target_role="started"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="6" operation="monitor" operation_key="intip_2_slave_monitor_0" on_node="webcluster01" on_node_uuid="49e81295-8e2f-4aeb-98f3-a14de6f62298"/>
+ <rsc_op id="6" operation="monitor" operation_key="intip_2_passive_monitor_0" on_node="webcluster01" on_node_uuid="49e81295-8e2f-4aeb-98f3-a14de6f62298"/>
</trigger>
</inputs>
</synapse>
<synapse id="13">
<action_set>
- <rsc_op id="6" operation="monitor" operation_key="intip_2_slave_monitor_0" on_node="webcluster01" on_node_uuid="49e81295-8e2f-4aeb-98f3-a14de6f62298">
- <primitive id="intip_2_slave" class="ocf" provider="heartbeat" type="IPaddr2"/>
+ <rsc_op id="6" operation="monitor" operation_key="intip_2_passive_monitor_0" on_node="webcluster01" on_node_uuid="49e81295-8e2f-4aeb-98f3-a14de6f62298">
+ <primitive id="intip_2_passive" class="ocf" provider="heartbeat" type="IPaddr2"/>
<attributes CRM_meta_on_node="webcluster01" CRM_meta_on_node_uuid="49e81295-8e2f-4aeb-98f3-a14de6f62298" CRM_meta_op_target_rc="7" CRM_meta_timeout="30000" ip="192.168.100.202" netmask="24" nic="eth1" target_role="started"/>
</rsc_op>
</action_set>
@@ -268,10 +268,10 @@
<rsc_op id="7" operation="monitor" operation_key="drbd_www:0_monitor_0" on_node="webcluster01" on_node_uuid="49e81295-8e2f-4aeb-98f3-a14de6f62298"/>
</trigger>
<trigger>
- <rsc_op id="22" operation="start" operation_key="intip_1_master_start_0" on_node="webcluster01" on_node_uuid="49e81295-8e2f-4aeb-98f3-a14de6f62298"/>
+ <rsc_op id="22" operation="start" operation_key="intip_1_active_start_0" on_node="webcluster01" on_node_uuid="49e81295-8e2f-4aeb-98f3-a14de6f62298"/>
</trigger>
<trigger>
- <rsc_op id="24" operation="start" operation_key="intip_2_slave_start_0" on_node="webcluster01" on_node_uuid="49e81295-8e2f-4aeb-98f3-a14de6f62298"/>
+ <rsc_op id="24" operation="start" operation_key="intip_2_passive_start_0" on_node="webcluster01" on_node_uuid="49e81295-8e2f-4aeb-98f3-a14de6f62298"/>
</trigger>
<trigger>
<pseudo_event id="32" operation="notified" operation_key="ms_drbd_www_confirmed-pre_notify_start_0"/>
@@ -425,10 +425,10 @@
<rsc_op id="12" operation="monitor" operation_key="drbd_mysql:0_monitor_0" on_node="webcluster01" on_node_uuid="49e81295-8e2f-4aeb-98f3-a14de6f62298"/>
</trigger>
<trigger>
- <rsc_op id="22" operation="start" operation_key="intip_1_master_start_0" on_node="webcluster01" on_node_uuid="49e81295-8e2f-4aeb-98f3-a14de6f62298"/>
+ <rsc_op id="22" operation="start" operation_key="intip_1_active_start_0" on_node="webcluster01" on_node_uuid="49e81295-8e2f-4aeb-98f3-a14de6f62298"/>
</trigger>
<trigger>
- <rsc_op id="24" operation="start" operation_key="intip_2_slave_start_0" on_node="webcluster01" on_node_uuid="49e81295-8e2f-4aeb-98f3-a14de6f62298"/>
+ <rsc_op id="24" operation="start" operation_key="intip_2_passive_start_0" on_node="webcluster01" on_node_uuid="49e81295-8e2f-4aeb-98f3-a14de6f62298"/>
</trigger>
<trigger>
<pseudo_event id="73" operation="notified" operation_key="ms_drbd_mysql_confirmed-pre_notify_start_0"/>
diff --git a/cts/scheduler/exp/promoted-probed-score.exp b/cts/scheduler/exp/promoted-probed-score.exp
index 3db546c..0952700 100644
--- a/cts/scheduler/exp/promoted-probed-score.exp
+++ b/cts/scheduler/exp/promoted-probed-score.exp
@@ -1,9 +1,9 @@
<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0">
<synapse id="0" priority="1000000">
<action_set>
- <rsc_op id="243" operation="notify" operation_key="AdminDrbd:0_post_notify_promote_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu">
+ <rsc_op id="243" operation="notify" operation_key="AdminDrbd:0_post_notify_promote_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu">
<primitive id="AdminDrbd" long-id="AdminDrbd:0" class="ocf" provider="linbit" type="drbd"/>
- <attributes CRM_meta_clone="0" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="2" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_active_resource=" " CRM_meta_notify_active_uname=" " CRM_meta_notify_all_uname="hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu" CRM_meta_notify_available_uname="hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="AdminDrbd:0 AdminDrbd:1" CRM_meta_notify_key_operation="promoted" CRM_meta_notify_key_type="post" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="promote" CRM_meta_notify_promote_resource="AdminDrbd:0 AdminDrbd:1" CRM_meta_notify_promote_uname="hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource=" " CRM_meta_notify_slave_uname=" " CRM_meta_notify_start_resource="AdminDrbd:0 AdminDrbd:1" CRM_meta_notify_start_uname="hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu" CRM_meta_notify_stop_resource=" " CRM_meta_notify_stop_uname=" " CRM_meta_notify_type="post" CRM_meta_notify_unpromoted_resource=" " CRM_meta_notify_unpromoted_uname=" " CRM_meta_on_node="hypatia-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="hypatia-corosync.nevis.columbia.edu" CRM_meta_promoted_max="2" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" drbd_resource="admin"/>
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="2" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_active_resource=" " CRM_meta_notify_active_uname=" " CRM_meta_notify_all_uname="hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu" CRM_meta_notify_available_uname="hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="AdminDrbd:0 AdminDrbd:1" CRM_meta_notify_key_operation="promoted" CRM_meta_notify_key_type="post" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="promote" CRM_meta_notify_promote_resource="AdminDrbd:0 AdminDrbd:1" CRM_meta_notify_promote_uname="orestes-corosync.nevis.columbia.edu hypatia-corosync.nevis.columbia.edu" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource=" " CRM_meta_notify_slave_uname=" " CRM_meta_notify_start_resource="AdminDrbd:0 AdminDrbd:1" CRM_meta_notify_start_uname="orestes-corosync.nevis.columbia.edu hypatia-corosync.nevis.columbia.edu" CRM_meta_notify_stop_resource=" " CRM_meta_notify_stop_uname=" " CRM_meta_notify_type="post" CRM_meta_notify_unpromoted_resource=" " CRM_meta_notify_unpromoted_uname=" " CRM_meta_on_node="orestes-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="orestes-corosync.nevis.columbia.edu" CRM_meta_promoted_max="2" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" drbd_resource="admin"/>
</rsc_op>
</action_set>
<inputs>
@@ -14,9 +14,9 @@
</synapse>
<synapse id="1">
<action_set>
- <rsc_op id="242" operation="notify" operation_key="AdminDrbd:0_pre_notify_promote_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu">
+ <rsc_op id="242" operation="notify" operation_key="AdminDrbd:0_pre_notify_promote_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu">
<primitive id="AdminDrbd" long-id="AdminDrbd:0" class="ocf" provider="linbit" type="drbd"/>
- <attributes CRM_meta_clone="0" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="2" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_active_resource=" " CRM_meta_notify_active_uname=" " CRM_meta_notify_all_uname="hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu" CRM_meta_notify_available_uname="hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="AdminDrbd:0 AdminDrbd:1" CRM_meta_notify_key_operation="promote" CRM_meta_notify_key_type="pre" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="promote" CRM_meta_notify_promote_resource="AdminDrbd:0 AdminDrbd:1" CRM_meta_notify_promote_uname="hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource=" " CRM_meta_notify_slave_uname=" " CRM_meta_notify_start_resource="AdminDrbd:0 AdminDrbd:1" CRM_meta_notify_start_uname="hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu" CRM_meta_notify_stop_resource=" " CRM_meta_notify_stop_uname=" " CRM_meta_notify_type="pre" CRM_meta_notify_unpromoted_resource=" " CRM_meta_notify_unpromoted_uname=" " CRM_meta_on_node="hypatia-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="hypatia-corosync.nevis.columbia.edu" CRM_meta_promoted_max="2" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" drbd_resource="admin"/>
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="2" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_active_resource=" " CRM_meta_notify_active_uname=" " CRM_meta_notify_all_uname="hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu" CRM_meta_notify_available_uname="hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="AdminDrbd:0 AdminDrbd:1" CRM_meta_notify_key_operation="promote" CRM_meta_notify_key_type="pre" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="promote" CRM_meta_notify_promote_resource="AdminDrbd:0 AdminDrbd:1" CRM_meta_notify_promote_uname="orestes-corosync.nevis.columbia.edu hypatia-corosync.nevis.columbia.edu" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource=" " CRM_meta_notify_slave_uname=" " CRM_meta_notify_start_resource="AdminDrbd:0 AdminDrbd:1" CRM_meta_notify_start_uname="orestes-corosync.nevis.columbia.edu hypatia-corosync.nevis.columbia.edu" CRM_meta_notify_stop_resource=" " CRM_meta_notify_stop_uname=" " CRM_meta_notify_type="pre" CRM_meta_notify_unpromoted_resource=" " CRM_meta_notify_unpromoted_uname=" " CRM_meta_on_node="orestes-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="orestes-corosync.nevis.columbia.edu" CRM_meta_promoted_max="2" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" drbd_resource="admin"/>
</rsc_op>
</action_set>
<inputs>
@@ -27,9 +27,9 @@
</synapse>
<synapse id="2" priority="1000000">
<action_set>
- <rsc_op id="240" operation="notify" operation_key="AdminDrbd:0_post_notify_start_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu">
+ <rsc_op id="240" operation="notify" operation_key="AdminDrbd:0_post_notify_start_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu">
<primitive id="AdminDrbd" long-id="AdminDrbd:0" class="ocf" provider="linbit" type="drbd"/>
- <attributes CRM_meta_clone="0" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="2" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_active_resource=" " CRM_meta_notify_active_uname=" " CRM_meta_notify_all_uname="hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu" CRM_meta_notify_available_uname="hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="AdminDrbd:0 AdminDrbd:1" CRM_meta_notify_key_operation="running" CRM_meta_notify_key_type="post" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="start" CRM_meta_notify_promote_resource="AdminDrbd:0 AdminDrbd:1" CRM_meta_notify_promote_uname="hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource=" " CRM_meta_notify_slave_uname=" " CRM_meta_notify_start_resource="AdminDrbd:0 AdminDrbd:1" CRM_meta_notify_start_uname="hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu" CRM_meta_notify_stop_resource=" " CRM_meta_notify_stop_uname=" " CRM_meta_notify_type="post" CRM_meta_notify_unpromoted_resource=" " CRM_meta_notify_unpromoted_uname=" " CRM_meta_on_node="hypatia-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="hypatia-corosync.nevis.columbia.edu" CRM_meta_promoted_max="2" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" drbd_resource="admin"/>
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="2" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_active_resource=" " CRM_meta_notify_active_uname=" " CRM_meta_notify_all_uname="hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu" CRM_meta_notify_available_uname="hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="AdminDrbd:0 AdminDrbd:1" CRM_meta_notify_key_operation="running" CRM_meta_notify_key_type="post" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="start" CRM_meta_notify_promote_resource="AdminDrbd:0 AdminDrbd:1" CRM_meta_notify_promote_uname="orestes-corosync.nevis.columbia.edu hypatia-corosync.nevis.columbia.edu" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource=" " CRM_meta_notify_slave_uname=" " CRM_meta_notify_start_resource="AdminDrbd:0 AdminDrbd:1" CRM_meta_notify_start_uname="orestes-corosync.nevis.columbia.edu hypatia-corosync.nevis.columbia.edu" CRM_meta_notify_stop_resource=" " CRM_meta_notify_stop_uname=" " CRM_meta_notify_type="post" CRM_meta_notify_unpromoted_resource=" " CRM_meta_notify_unpromoted_uname=" " CRM_meta_on_node="orestes-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="orestes-corosync.nevis.columbia.edu" CRM_meta_promoted_max="2" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" drbd_resource="admin"/>
</rsc_op>
</action_set>
<inputs>
@@ -40,17 +40,17 @@
</synapse>
<synapse id="3">
<action_set>
- <rsc_op id="42" operation="monitor" operation_key="AdminDrbd:0_monitor_59000" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu">
+ <rsc_op id="42" operation="monitor" operation_key="AdminDrbd:0_monitor_59000" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu">
<primitive id="AdminDrbd" long-id="AdminDrbd:0" class="ocf" provider="linbit" type="drbd"/>
- <attributes CRM_meta_clone="0" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="59000" CRM_meta_master_max="2" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="true" CRM_meta_on_node="hypatia-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="hypatia-corosync.nevis.columbia.edu" CRM_meta_op_target_rc="8" CRM_meta_promoted_max="2" CRM_meta_promoted_node_max="1" CRM_meta_role="Promoted" CRM_meta_timeout="30000" drbd_resource="admin"/>
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="59000" CRM_meta_master_max="2" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="true" CRM_meta_on_node="orestes-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="orestes-corosync.nevis.columbia.edu" CRM_meta_op_target_rc="8" CRM_meta_promoted_max="2" CRM_meta_promoted_node_max="1" CRM_meta_role="Promoted" CRM_meta_timeout="30000" drbd_resource="admin"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="40" operation="start" operation_key="AdminDrbd:0_start_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
+ <rsc_op id="40" operation="start" operation_key="AdminDrbd:0_start_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
</trigger>
<trigger>
- <rsc_op id="41" operation="promote" operation_key="AdminDrbd:0_promote_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
+ <rsc_op id="41" operation="promote" operation_key="AdminDrbd:0_promote_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
</trigger>
<trigger>
<pseudo_event id="53" operation="notified" operation_key="AdminClone_confirmed-post_notify_running_0"/>
@@ -62,14 +62,14 @@
</synapse>
<synapse id="4">
<action_set>
- <rsc_op id="41" operation="promote" operation_key="AdminDrbd:0_promote_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu">
+ <rsc_op id="41" operation="promote" operation_key="AdminDrbd:0_promote_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu">
<primitive id="AdminDrbd" long-id="AdminDrbd:0" class="ocf" provider="linbit" type="drbd"/>
- <attributes CRM_meta_clone="0" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="2" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_active_resource=" " CRM_meta_notify_active_uname=" " CRM_meta_notify_all_uname="hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu" CRM_meta_notify_available_uname="hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="AdminDrbd:0 AdminDrbd:1" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_promote_resource="AdminDrbd:0 AdminDrbd:1" CRM_meta_notify_promote_uname="hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource=" " CRM_meta_notify_slave_uname=" " CRM_meta_notify_start_resource="AdminDrbd:0 AdminDrbd:1" CRM_meta_notify_start_uname="hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu" CRM_meta_notify_stop_resource=" " CRM_meta_notify_stop_uname=" " CRM_meta_notify_unpromoted_resource=" " CRM_meta_notify_unpromoted_uname=" " CRM_meta_on_node="hypatia-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="hypatia-corosync.nevis.columbia.edu" CRM_meta_promoted_max="2" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" drbd_resource="admin"/>
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="2" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_active_resource=" " CRM_meta_notify_active_uname=" " CRM_meta_notify_all_uname="hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu" CRM_meta_notify_available_uname="hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="AdminDrbd:0 AdminDrbd:1" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_promote_resource="AdminDrbd:0 AdminDrbd:1" CRM_meta_notify_promote_uname="orestes-corosync.nevis.columbia.edu hypatia-corosync.nevis.columbia.edu" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource=" " CRM_meta_notify_slave_uname=" " CRM_meta_notify_start_resource="AdminDrbd:0 AdminDrbd:1" CRM_meta_notify_start_uname="orestes-corosync.nevis.columbia.edu hypatia-corosync.nevis.columbia.edu" CRM_meta_notify_stop_resource=" " CRM_meta_notify_stop_uname=" " CRM_meta_notify_unpromoted_resource=" " CRM_meta_notify_unpromoted_uname=" " CRM_meta_on_node="orestes-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="orestes-corosync.nevis.columbia.edu" CRM_meta_promoted_max="2" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" drbd_resource="admin"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="40" operation="start" operation_key="AdminDrbd:0_start_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
+ <rsc_op id="40" operation="start" operation_key="AdminDrbd:0_start_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
</trigger>
<trigger>
<pseudo_event id="58" operation="promote" operation_key="AdminClone_promote_0"/>
@@ -78,9 +78,9 @@
</synapse>
<synapse id="5">
<action_set>
- <rsc_op id="40" operation="start" operation_key="AdminDrbd:0_start_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu">
+ <rsc_op id="40" operation="start" operation_key="AdminDrbd:0_start_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu">
<primitive id="AdminDrbd" long-id="AdminDrbd:0" class="ocf" provider="linbit" type="drbd"/>
- <attributes CRM_meta_clone="0" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="2" CRM_meta_master_node_max="1" CRM_meta_name="start" CRM_meta_notify="true" CRM_meta_notify_active_resource=" " CRM_meta_notify_active_uname=" " CRM_meta_notify_all_uname="hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu" CRM_meta_notify_available_uname="hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="AdminDrbd:0 AdminDrbd:1" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_promote_resource="AdminDrbd:0 AdminDrbd:1" CRM_meta_notify_promote_uname="hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource=" " CRM_meta_notify_slave_uname=" " CRM_meta_notify_start_resource="AdminDrbd:0 AdminDrbd:1" CRM_meta_notify_start_uname="hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu" CRM_meta_notify_stop_resource=" " CRM_meta_notify_stop_uname=" " CRM_meta_notify_unpromoted_resource=" " CRM_meta_notify_unpromoted_uname=" " CRM_meta_on_node="hypatia-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="hypatia-corosync.nevis.columbia.edu" CRM_meta_promoted_max="2" CRM_meta_promoted_node_max="1" CRM_meta_timeout="240000" drbd_resource="admin"/>
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="2" CRM_meta_master_node_max="1" CRM_meta_name="start" CRM_meta_notify="true" CRM_meta_notify_active_resource=" " CRM_meta_notify_active_uname=" " CRM_meta_notify_all_uname="hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu" CRM_meta_notify_available_uname="hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="AdminDrbd:0 AdminDrbd:1" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_promote_resource="AdminDrbd:0 AdminDrbd:1" CRM_meta_notify_promote_uname="orestes-corosync.nevis.columbia.edu hypatia-corosync.nevis.columbia.edu" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource=" " CRM_meta_notify_slave_uname=" " CRM_meta_notify_start_resource="AdminDrbd:0 AdminDrbd:1" CRM_meta_notify_start_uname="orestes-corosync.nevis.columbia.edu hypatia-corosync.nevis.columbia.edu" CRM_meta_notify_stop_resource=" " CRM_meta_notify_stop_uname=" " CRM_meta_notify_unpromoted_resource=" " CRM_meta_notify_unpromoted_uname=" " CRM_meta_on_node="orestes-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="orestes-corosync.nevis.columbia.edu" CRM_meta_promoted_max="2" CRM_meta_promoted_node_max="1" CRM_meta_timeout="240000" drbd_resource="admin"/>
</rsc_op>
</action_set>
<inputs>
@@ -91,9 +91,9 @@
</synapse>
<synapse id="6" priority="1000000">
<action_set>
- <rsc_op id="245" operation="notify" operation_key="AdminDrbd:1_post_notify_promote_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu">
+ <rsc_op id="245" operation="notify" operation_key="AdminDrbd:1_post_notify_promote_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu">
<primitive id="AdminDrbd" long-id="AdminDrbd:1" class="ocf" provider="linbit" type="drbd"/>
- <attributes CRM_meta_clone="1" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="2" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_active_resource=" " CRM_meta_notify_active_uname=" " CRM_meta_notify_all_uname="hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu" CRM_meta_notify_available_uname="hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="AdminDrbd:0 AdminDrbd:1" CRM_meta_notify_key_operation="promoted" CRM_meta_notify_key_type="post" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="promote" CRM_meta_notify_promote_resource="AdminDrbd:0 AdminDrbd:1" CRM_meta_notify_promote_uname="hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource=" " CRM_meta_notify_slave_uname=" " CRM_meta_notify_start_resource="AdminDrbd:0 AdminDrbd:1" CRM_meta_notify_start_uname="hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu" CRM_meta_notify_stop_resource=" " CRM_meta_notify_stop_uname=" " CRM_meta_notify_type="post" CRM_meta_notify_unpromoted_resource=" " CRM_meta_notify_unpromoted_uname=" " CRM_meta_on_node="orestes-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="orestes-corosync.nevis.columbia.edu" CRM_meta_promoted_max="2" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" drbd_resource="admin"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="2" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_active_resource=" " CRM_meta_notify_active_uname=" " CRM_meta_notify_all_uname="hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu" CRM_meta_notify_available_uname="hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="AdminDrbd:0 AdminDrbd:1" CRM_meta_notify_key_operation="promoted" CRM_meta_notify_key_type="post" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="promote" CRM_meta_notify_promote_resource="AdminDrbd:0 AdminDrbd:1" CRM_meta_notify_promote_uname="orestes-corosync.nevis.columbia.edu hypatia-corosync.nevis.columbia.edu" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource=" " CRM_meta_notify_slave_uname=" " CRM_meta_notify_start_resource="AdminDrbd:0 AdminDrbd:1" CRM_meta_notify_start_uname="orestes-corosync.nevis.columbia.edu hypatia-corosync.nevis.columbia.edu" CRM_meta_notify_stop_resource=" " CRM_meta_notify_stop_uname=" " CRM_meta_notify_type="post" CRM_meta_notify_unpromoted_resource=" " CRM_meta_notify_unpromoted_uname=" " CRM_meta_on_node="hypatia-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="hypatia-corosync.nevis.columbia.edu" CRM_meta_promoted_max="2" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" drbd_resource="admin"/>
</rsc_op>
</action_set>
<inputs>
@@ -104,9 +104,9 @@
</synapse>
<synapse id="7">
<action_set>
- <rsc_op id="244" operation="notify" operation_key="AdminDrbd:1_pre_notify_promote_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu">
+ <rsc_op id="244" operation="notify" operation_key="AdminDrbd:1_pre_notify_promote_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu">
<primitive id="AdminDrbd" long-id="AdminDrbd:1" class="ocf" provider="linbit" type="drbd"/>
- <attributes CRM_meta_clone="1" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="2" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_active_resource=" " CRM_meta_notify_active_uname=" " CRM_meta_notify_all_uname="hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu" CRM_meta_notify_available_uname="hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="AdminDrbd:0 AdminDrbd:1" CRM_meta_notify_key_operation="promote" CRM_meta_notify_key_type="pre" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="promote" CRM_meta_notify_promote_resource="AdminDrbd:0 AdminDrbd:1" CRM_meta_notify_promote_uname="hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource=" " CRM_meta_notify_slave_uname=" " CRM_meta_notify_start_resource="AdminDrbd:0 AdminDrbd:1" CRM_meta_notify_start_uname="hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu" CRM_meta_notify_stop_resource=" " CRM_meta_notify_stop_uname=" " CRM_meta_notify_type="pre" CRM_meta_notify_unpromoted_resource=" " CRM_meta_notify_unpromoted_uname=" " CRM_meta_on_node="orestes-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="orestes-corosync.nevis.columbia.edu" CRM_meta_promoted_max="2" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" drbd_resource="admin"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="2" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_active_resource=" " CRM_meta_notify_active_uname=" " CRM_meta_notify_all_uname="hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu" CRM_meta_notify_available_uname="hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="AdminDrbd:0 AdminDrbd:1" CRM_meta_notify_key_operation="promote" CRM_meta_notify_key_type="pre" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="promote" CRM_meta_notify_promote_resource="AdminDrbd:0 AdminDrbd:1" CRM_meta_notify_promote_uname="orestes-corosync.nevis.columbia.edu hypatia-corosync.nevis.columbia.edu" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource=" " CRM_meta_notify_slave_uname=" " CRM_meta_notify_start_resource="AdminDrbd:0 AdminDrbd:1" CRM_meta_notify_start_uname="orestes-corosync.nevis.columbia.edu hypatia-corosync.nevis.columbia.edu" CRM_meta_notify_stop_resource=" " CRM_meta_notify_stop_uname=" " CRM_meta_notify_type="pre" CRM_meta_notify_unpromoted_resource=" " CRM_meta_notify_unpromoted_uname=" " CRM_meta_on_node="hypatia-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="hypatia-corosync.nevis.columbia.edu" CRM_meta_promoted_max="2" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" drbd_resource="admin"/>
</rsc_op>
</action_set>
<inputs>
@@ -117,9 +117,9 @@
</synapse>
<synapse id="8" priority="1000000">
<action_set>
- <rsc_op id="241" operation="notify" operation_key="AdminDrbd:1_post_notify_start_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu">
+ <rsc_op id="241" operation="notify" operation_key="AdminDrbd:1_post_notify_start_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu">
<primitive id="AdminDrbd" long-id="AdminDrbd:1" class="ocf" provider="linbit" type="drbd"/>
- <attributes CRM_meta_clone="1" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="2" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_active_resource=" " CRM_meta_notify_active_uname=" " CRM_meta_notify_all_uname="hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu" CRM_meta_notify_available_uname="hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="AdminDrbd:0 AdminDrbd:1" CRM_meta_notify_key_operation="running" CRM_meta_notify_key_type="post" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="start" CRM_meta_notify_promote_resource="AdminDrbd:0 AdminDrbd:1" CRM_meta_notify_promote_uname="hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource=" " CRM_meta_notify_slave_uname=" " CRM_meta_notify_start_resource="AdminDrbd:0 AdminDrbd:1" CRM_meta_notify_start_uname="hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu" CRM_meta_notify_stop_resource=" " CRM_meta_notify_stop_uname=" " CRM_meta_notify_type="post" CRM_meta_notify_unpromoted_resource=" " CRM_meta_notify_unpromoted_uname=" " CRM_meta_on_node="orestes-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="orestes-corosync.nevis.columbia.edu" CRM_meta_promoted_max="2" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" drbd_resource="admin"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="2" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_active_resource=" " CRM_meta_notify_active_uname=" " CRM_meta_notify_all_uname="hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu" CRM_meta_notify_available_uname="hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="AdminDrbd:0 AdminDrbd:1" CRM_meta_notify_key_operation="running" CRM_meta_notify_key_type="post" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_operation="start" CRM_meta_notify_promote_resource="AdminDrbd:0 AdminDrbd:1" CRM_meta_notify_promote_uname="orestes-corosync.nevis.columbia.edu hypatia-corosync.nevis.columbia.edu" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource=" " CRM_meta_notify_slave_uname=" " CRM_meta_notify_start_resource="AdminDrbd:0 AdminDrbd:1" CRM_meta_notify_start_uname="orestes-corosync.nevis.columbia.edu hypatia-corosync.nevis.columbia.edu" CRM_meta_notify_stop_resource=" " CRM_meta_notify_stop_uname=" " CRM_meta_notify_type="post" CRM_meta_notify_unpromoted_resource=" " CRM_meta_notify_unpromoted_uname=" " CRM_meta_on_node="hypatia-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="hypatia-corosync.nevis.columbia.edu" CRM_meta_promoted_max="2" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" drbd_resource="admin"/>
</rsc_op>
</action_set>
<inputs>
@@ -130,17 +130,17 @@
</synapse>
<synapse id="9">
<action_set>
- <rsc_op id="45" operation="monitor" operation_key="AdminDrbd:1_monitor_59000" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu">
+ <rsc_op id="45" operation="monitor" operation_key="AdminDrbd:1_monitor_59000" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu">
<primitive id="AdminDrbd" long-id="AdminDrbd:1" class="ocf" provider="linbit" type="drbd"/>
- <attributes CRM_meta_clone="1" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="59000" CRM_meta_master_max="2" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="true" CRM_meta_on_node="orestes-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="orestes-corosync.nevis.columbia.edu" CRM_meta_op_target_rc="8" CRM_meta_promoted_max="2" CRM_meta_promoted_node_max="1" CRM_meta_role="Promoted" CRM_meta_timeout="30000" drbd_resource="admin"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="59000" CRM_meta_master_max="2" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="true" CRM_meta_on_node="hypatia-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="hypatia-corosync.nevis.columbia.edu" CRM_meta_op_target_rc="8" CRM_meta_promoted_max="2" CRM_meta_promoted_node_max="1" CRM_meta_role="Promoted" CRM_meta_timeout="30000" drbd_resource="admin"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="43" operation="start" operation_key="AdminDrbd:1_start_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
+ <rsc_op id="43" operation="start" operation_key="AdminDrbd:1_start_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
</trigger>
<trigger>
- <rsc_op id="44" operation="promote" operation_key="AdminDrbd:1_promote_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
+ <rsc_op id="44" operation="promote" operation_key="AdminDrbd:1_promote_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
</trigger>
<trigger>
<pseudo_event id="53" operation="notified" operation_key="AdminClone_confirmed-post_notify_running_0"/>
@@ -152,14 +152,14 @@
</synapse>
<synapse id="10">
<action_set>
- <rsc_op id="44" operation="promote" operation_key="AdminDrbd:1_promote_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu">
+ <rsc_op id="44" operation="promote" operation_key="AdminDrbd:1_promote_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu">
<primitive id="AdminDrbd" long-id="AdminDrbd:1" class="ocf" provider="linbit" type="drbd"/>
- <attributes CRM_meta_clone="1" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="2" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_active_resource=" " CRM_meta_notify_active_uname=" " CRM_meta_notify_all_uname="hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu" CRM_meta_notify_available_uname="hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="AdminDrbd:0 AdminDrbd:1" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_promote_resource="AdminDrbd:0 AdminDrbd:1" CRM_meta_notify_promote_uname="hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource=" " CRM_meta_notify_slave_uname=" " CRM_meta_notify_start_resource="AdminDrbd:0 AdminDrbd:1" CRM_meta_notify_start_uname="hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu" CRM_meta_notify_stop_resource=" " CRM_meta_notify_stop_uname=" " CRM_meta_notify_unpromoted_resource=" " CRM_meta_notify_unpromoted_uname=" " CRM_meta_on_node="orestes-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="orestes-corosync.nevis.columbia.edu" CRM_meta_promoted_max="2" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" drbd_resource="admin"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="2" CRM_meta_master_node_max="1" CRM_meta_notify="true" CRM_meta_notify_active_resource=" " CRM_meta_notify_active_uname=" " CRM_meta_notify_all_uname="hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu" CRM_meta_notify_available_uname="hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="AdminDrbd:0 AdminDrbd:1" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_promote_resource="AdminDrbd:0 AdminDrbd:1" CRM_meta_notify_promote_uname="orestes-corosync.nevis.columbia.edu hypatia-corosync.nevis.columbia.edu" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource=" " CRM_meta_notify_slave_uname=" " CRM_meta_notify_start_resource="AdminDrbd:0 AdminDrbd:1" CRM_meta_notify_start_uname="orestes-corosync.nevis.columbia.edu hypatia-corosync.nevis.columbia.edu" CRM_meta_notify_stop_resource=" " CRM_meta_notify_stop_uname=" " CRM_meta_notify_unpromoted_resource=" " CRM_meta_notify_unpromoted_uname=" " CRM_meta_on_node="hypatia-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="hypatia-corosync.nevis.columbia.edu" CRM_meta_promoted_max="2" CRM_meta_promoted_node_max="1" CRM_meta_timeout="20000" drbd_resource="admin"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="43" operation="start" operation_key="AdminDrbd:1_start_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
+ <rsc_op id="43" operation="start" operation_key="AdminDrbd:1_start_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
</trigger>
<trigger>
<pseudo_event id="58" operation="promote" operation_key="AdminClone_promote_0"/>
@@ -168,9 +168,9 @@
</synapse>
<synapse id="11">
<action_set>
- <rsc_op id="43" operation="start" operation_key="AdminDrbd:1_start_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu">
+ <rsc_op id="43" operation="start" operation_key="AdminDrbd:1_start_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu">
<primitive id="AdminDrbd" long-id="AdminDrbd:1" class="ocf" provider="linbit" type="drbd"/>
- <attributes CRM_meta_clone="1" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="2" CRM_meta_master_node_max="1" CRM_meta_name="start" CRM_meta_notify="true" CRM_meta_notify_active_resource=" " CRM_meta_notify_active_uname=" " CRM_meta_notify_all_uname="hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu" CRM_meta_notify_available_uname="hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="AdminDrbd:0 AdminDrbd:1" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_promote_resource="AdminDrbd:0 AdminDrbd:1" CRM_meta_notify_promote_uname="hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource=" " CRM_meta_notify_slave_uname=" " CRM_meta_notify_start_resource="AdminDrbd:0 AdminDrbd:1" CRM_meta_notify_start_uname="hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu" CRM_meta_notify_stop_resource=" " CRM_meta_notify_stop_uname=" " CRM_meta_notify_unpromoted_resource=" " CRM_meta_notify_unpromoted_uname=" " CRM_meta_on_node="orestes-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="orestes-corosync.nevis.columbia.edu" CRM_meta_promoted_max="2" CRM_meta_promoted_node_max="1" CRM_meta_timeout="240000" drbd_resource="admin"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="2" CRM_meta_master_node_max="1" CRM_meta_name="start" CRM_meta_notify="true" CRM_meta_notify_active_resource=" " CRM_meta_notify_active_uname=" " CRM_meta_notify_all_uname="hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu" CRM_meta_notify_available_uname="hypatia-corosync.nevis.columbia.edu orestes-corosync.nevis.columbia.edu" CRM_meta_notify_demote_resource=" " CRM_meta_notify_demote_uname=" " CRM_meta_notify_inactive_resource="AdminDrbd:0 AdminDrbd:1" CRM_meta_notify_master_resource=" " CRM_meta_notify_master_uname=" " CRM_meta_notify_promote_resource="AdminDrbd:0 AdminDrbd:1" CRM_meta_notify_promote_uname="orestes-corosync.nevis.columbia.edu hypatia-corosync.nevis.columbia.edu" CRM_meta_notify_promoted_resource=" " CRM_meta_notify_promoted_uname=" " CRM_meta_notify_slave_resource=" " CRM_meta_notify_slave_uname=" " CRM_meta_notify_start_resource="AdminDrbd:0 AdminDrbd:1" CRM_meta_notify_start_uname="orestes-corosync.nevis.columbia.edu hypatia-corosync.nevis.columbia.edu" CRM_meta_notify_stop_resource=" " CRM_meta_notify_stop_uname=" " CRM_meta_notify_unpromoted_resource=" " CRM_meta_notify_unpromoted_uname=" " CRM_meta_on_node="hypatia-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="hypatia-corosync.nevis.columbia.edu" CRM_meta_promoted_max="2" CRM_meta_promoted_node_max="1" CRM_meta_timeout="240000" drbd_resource="admin"/>
</rsc_op>
</action_set>
<inputs>
@@ -190,10 +190,10 @@
<pseudo_event id="62" operation="notify" operation_key="AdminClone_post_notify_promoted_0"/>
</trigger>
<trigger>
- <rsc_op id="243" operation="notify" operation_key="AdminDrbd:0_post_notify_promote_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
+ <rsc_op id="243" operation="notify" operation_key="AdminDrbd:0_post_notify_promote_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
</trigger>
<trigger>
- <rsc_op id="245" operation="notify" operation_key="AdminDrbd:1_post_notify_promote_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
+ <rsc_op id="245" operation="notify" operation_key="AdminDrbd:1_post_notify_promote_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
</trigger>
</inputs>
</synapse>
@@ -223,10 +223,10 @@
<pseudo_event id="60" operation="notify" operation_key="AdminClone_pre_notify_promote_0"/>
</trigger>
<trigger>
- <rsc_op id="242" operation="notify" operation_key="AdminDrbd:0_pre_notify_promote_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
+ <rsc_op id="242" operation="notify" operation_key="AdminDrbd:0_pre_notify_promote_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
</trigger>
<trigger>
- <rsc_op id="244" operation="notify" operation_key="AdminDrbd:1_pre_notify_promote_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
+ <rsc_op id="244" operation="notify" operation_key="AdminDrbd:1_pre_notify_promote_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
</trigger>
</inputs>
</synapse>
@@ -250,10 +250,10 @@
</action_set>
<inputs>
<trigger>
- <rsc_op id="41" operation="promote" operation_key="AdminDrbd:0_promote_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
+ <rsc_op id="41" operation="promote" operation_key="AdminDrbd:0_promote_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
</trigger>
<trigger>
- <rsc_op id="44" operation="promote" operation_key="AdminDrbd:1_promote_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
+ <rsc_op id="44" operation="promote" operation_key="AdminDrbd:1_promote_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
</trigger>
</inputs>
</synapse>
@@ -283,10 +283,10 @@
<pseudo_event id="52" operation="notify" operation_key="AdminClone_post_notify_running_0"/>
</trigger>
<trigger>
- <rsc_op id="240" operation="notify" operation_key="AdminDrbd:0_post_notify_start_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
+ <rsc_op id="240" operation="notify" operation_key="AdminDrbd:0_post_notify_start_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
</trigger>
<trigger>
- <rsc_op id="241" operation="notify" operation_key="AdminDrbd:1_post_notify_start_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
+ <rsc_op id="241" operation="notify" operation_key="AdminDrbd:1_post_notify_start_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
</trigger>
</inputs>
</synapse>
@@ -333,10 +333,10 @@
</action_set>
<inputs>
<trigger>
- <rsc_op id="40" operation="start" operation_key="AdminDrbd:0_start_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
+ <rsc_op id="40" operation="start" operation_key="AdminDrbd:0_start_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
</trigger>
<trigger>
- <rsc_op id="43" operation="start" operation_key="AdminDrbd:1_start_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
+ <rsc_op id="43" operation="start" operation_key="AdminDrbd:1_start_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
</trigger>
<trigger>
<pseudo_event id="46" operation="start" operation_key="AdminClone_start_0"/>
@@ -527,7 +527,7 @@
<pseudo_event id="104" operation="start" operation_key="CupsClone_start_0"/>
</trigger>
<trigger>
- <pseudo_event id="211" operation="running" operation_key="FilesystemGroup:0_running_0"/>
+ <pseudo_event id="227" operation="running" operation_key="FilesystemGroup:1_running_0"/>
</trigger>
</inputs>
</synapse>
@@ -647,7 +647,7 @@
<pseudo_event id="104" operation="start" operation_key="CupsClone_start_0"/>
</trigger>
<trigger>
- <pseudo_event id="227" operation="running" operation_key="FilesystemGroup:1_running_0"/>
+ <pseudo_event id="211" operation="running" operation_key="FilesystemGroup:0_running_0"/>
</trigger>
</inputs>
</synapse>
@@ -1058,7 +1058,7 @@
<pseudo_event id="148" operation="start" operation_key="LibvirtdClone_start_0"/>
</trigger>
<trigger>
- <pseudo_event id="211" operation="running" operation_key="FilesystemGroup:0_running_0"/>
+ <pseudo_event id="227" operation="running" operation_key="FilesystemGroup:1_running_0"/>
</trigger>
</inputs>
</synapse>
@@ -1173,7 +1173,7 @@
<pseudo_event id="148" operation="start" operation_key="LibvirtdClone_start_0"/>
</trigger>
<trigger>
- <pseudo_event id="227" operation="running" operation_key="FilesystemGroup:1_running_0"/>
+ <pseudo_event id="211" operation="running" operation_key="FilesystemGroup:0_running_0"/>
</trigger>
</inputs>
</synapse>
@@ -1300,7 +1300,7 @@
<pseudo_event id="166" operation="start" operation_key="TftpClone_start_0"/>
</trigger>
<trigger>
- <pseudo_event id="211" operation="running" operation_key="FilesystemGroup:0_running_0"/>
+ <pseudo_event id="227" operation="running" operation_key="FilesystemGroup:1_running_0"/>
</trigger>
</inputs>
</synapse>
@@ -1393,7 +1393,7 @@
<pseudo_event id="166" operation="start" operation_key="TftpClone_start_0"/>
</trigger>
<trigger>
- <pseudo_event id="227" operation="running" operation_key="FilesystemGroup:1_running_0"/>
+ <pseudo_event id="211" operation="running" operation_key="FilesystemGroup:0_running_0"/>
</trigger>
</inputs>
</synapse>
@@ -1546,7 +1546,7 @@
<pseudo_event id="194" operation="start" operation_key="ExportsClone_start_0"/>
</trigger>
<trigger>
- <pseudo_event id="211" operation="running" operation_key="FilesystemGroup:0_running_0"/>
+ <pseudo_event id="227" operation="running" operation_key="FilesystemGroup:1_running_0"/>
</trigger>
</inputs>
</synapse>
@@ -1794,7 +1794,7 @@
<pseudo_event id="194" operation="start" operation_key="ExportsClone_start_0"/>
</trigger>
<trigger>
- <pseudo_event id="227" operation="running" operation_key="FilesystemGroup:1_running_0"/>
+ <pseudo_event id="211" operation="running" operation_key="FilesystemGroup:0_running_0"/>
</trigger>
</inputs>
</synapse>
@@ -2081,22 +2081,22 @@
</action_set>
<inputs>
<trigger>
- <rsc_op id="198" operation="start" operation_key="AdminLvm:0_start_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
+ <rsc_op id="198" operation="start" operation_key="AdminLvm:0_start_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
</trigger>
<trigger>
- <rsc_op id="200" operation="start" operation_key="FSUsrNevis:0_start_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
+ <rsc_op id="200" operation="start" operation_key="FSUsrNevis:0_start_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
</trigger>
<trigger>
- <rsc_op id="202" operation="start" operation_key="FSVarNevis:0_start_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
+ <rsc_op id="202" operation="start" operation_key="FSVarNevis:0_start_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
</trigger>
<trigger>
- <rsc_op id="204" operation="start" operation_key="FSVirtualMachines:0_start_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
+ <rsc_op id="204" operation="start" operation_key="FSVirtualMachines:0_start_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
</trigger>
<trigger>
- <rsc_op id="206" operation="start" operation_key="FSMail:0_start_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
+ <rsc_op id="206" operation="start" operation_key="FSMail:0_start_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
</trigger>
<trigger>
- <rsc_op id="208" operation="start" operation_key="FSWork:0_start_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
+ <rsc_op id="208" operation="start" operation_key="FSWork:0_start_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
</trigger>
<trigger>
<pseudo_event id="210" operation="start" operation_key="FilesystemGroup:0_start_0"/>
@@ -2111,7 +2111,7 @@
</action_set>
<inputs>
<trigger>
- <rsc_op id="41" operation="promote" operation_key="AdminDrbd:0_promote_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
+ <rsc_op id="41" operation="promote" operation_key="AdminDrbd:0_promote_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
</trigger>
<trigger>
<pseudo_event id="230" operation="start" operation_key="FilesystemClone_start_0"/>
@@ -2120,22 +2120,22 @@
</synapse>
<synapse id="144">
<action_set>
- <rsc_op id="199" operation="monitor" operation_key="AdminLvm:0_monitor_30000" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu">
+ <rsc_op id="199" operation="monitor" operation_key="AdminLvm:0_monitor_30000" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu">
<primitive id="AdminLvm" long-id="AdminLvm:0" class="ocf" provider="heartbeat" type="LVM"/>
- <attributes CRM_meta_clone="0" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="30000" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="hypatia-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="hypatia-corosync.nevis.columbia.edu" CRM_meta_timeout="100000" depth="0" volgrpname="ADMIN"/>
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="30000" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="orestes-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="orestes-corosync.nevis.columbia.edu" CRM_meta_timeout="100000" depth="0" volgrpname="ADMIN"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="198" operation="start" operation_key="AdminLvm:0_start_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
+ <rsc_op id="198" operation="start" operation_key="AdminLvm:0_start_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
</trigger>
</inputs>
</synapse>
<synapse id="145">
<action_set>
- <rsc_op id="198" operation="start" operation_key="AdminLvm:0_start_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu">
+ <rsc_op id="198" operation="start" operation_key="AdminLvm:0_start_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu">
<primitive id="AdminLvm" long-id="AdminLvm:0" class="ocf" provider="heartbeat" type="LVM"/>
- <attributes CRM_meta_clone="0" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_name="start" CRM_meta_notify="false" CRM_meta_on_node="hypatia-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="hypatia-corosync.nevis.columbia.edu" CRM_meta_timeout="30000" volgrpname="ADMIN"/>
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_name="start" CRM_meta_notify="false" CRM_meta_on_node="orestes-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="orestes-corosync.nevis.columbia.edu" CRM_meta_timeout="30000" volgrpname="ADMIN"/>
</rsc_op>
</action_set>
<inputs>
@@ -2146,36 +2146,36 @@
</synapse>
<synapse id="146">
<action_set>
- <rsc_op id="13" operation="monitor" operation_key="AdminLvm:0_monitor_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu">
+ <rsc_op id="32" operation="monitor" operation_key="AdminLvm:0_monitor_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu">
<primitive id="AdminLvm" long-id="AdminLvm:0" class="ocf" provider="heartbeat" type="LVM"/>
- <attributes CRM_meta_clone="0" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_on_node="hypatia-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="hypatia-corosync.nevis.columbia.edu" CRM_meta_op_target_rc="7" CRM_meta_timeout="100000" volgrpname="ADMIN"/>
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_on_node="orestes-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="orestes-corosync.nevis.columbia.edu" CRM_meta_op_target_rc="7" CRM_meta_timeout="100000" volgrpname="ADMIN"/>
</rsc_op>
</action_set>
<inputs/>
</synapse>
<synapse id="147">
<action_set>
- <rsc_op id="201" operation="monitor" operation_key="FSUsrNevis:0_monitor_20000" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu">
+ <rsc_op id="201" operation="monitor" operation_key="FSUsrNevis:0_monitor_20000" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu">
<primitive id="FSUsrNevis" long-id="FSUsrNevis:0" class="ocf" provider="heartbeat" type="Filesystem"/>
- <attributes CRM_meta_clone="0" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="20000" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="hypatia-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="hypatia-corosync.nevis.columbia.edu" CRM_meta_timeout="40000" depth="0" device="/dev/mapper/ADMIN-usr" directory="/usr/nevis" fstype="gfs2" options="defaults,noatime,nodiratime"/>
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="20000" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="orestes-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="orestes-corosync.nevis.columbia.edu" CRM_meta_timeout="40000" depth="0" device="/dev/mapper/ADMIN-usr" directory="/usr/nevis" fstype="gfs2" options="defaults,noatime,nodiratime"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="200" operation="start" operation_key="FSUsrNevis:0_start_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
+ <rsc_op id="200" operation="start" operation_key="FSUsrNevis:0_start_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
</trigger>
</inputs>
</synapse>
<synapse id="148">
<action_set>
- <rsc_op id="200" operation="start" operation_key="FSUsrNevis:0_start_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu">
+ <rsc_op id="200" operation="start" operation_key="FSUsrNevis:0_start_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu">
<primitive id="FSUsrNevis" long-id="FSUsrNevis:0" class="ocf" provider="heartbeat" type="Filesystem"/>
- <attributes CRM_meta_clone="0" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_name="start" CRM_meta_notify="false" CRM_meta_on_node="hypatia-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="hypatia-corosync.nevis.columbia.edu" CRM_meta_timeout="60000" device="/dev/mapper/ADMIN-usr" directory="/usr/nevis" fstype="gfs2" options="defaults,noatime,nodiratime"/>
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_name="start" CRM_meta_notify="false" CRM_meta_on_node="orestes-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="orestes-corosync.nevis.columbia.edu" CRM_meta_timeout="60000" device="/dev/mapper/ADMIN-usr" directory="/usr/nevis" fstype="gfs2" options="defaults,noatime,nodiratime"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="198" operation="start" operation_key="AdminLvm:0_start_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
+ <rsc_op id="198" operation="start" operation_key="AdminLvm:0_start_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
</trigger>
<trigger>
<pseudo_event id="210" operation="start" operation_key="FilesystemGroup:0_start_0"/>
@@ -2184,36 +2184,36 @@
</synapse>
<synapse id="149">
<action_set>
- <rsc_op id="14" operation="monitor" operation_key="FSUsrNevis:0_monitor_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu">
+ <rsc_op id="33" operation="monitor" operation_key="FSUsrNevis:0_monitor_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu">
<primitive id="FSUsrNevis" long-id="FSUsrNevis:0" class="ocf" provider="heartbeat" type="Filesystem"/>
- <attributes CRM_meta_clone="0" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_on_node="hypatia-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="hypatia-corosync.nevis.columbia.edu" CRM_meta_op_target_rc="7" CRM_meta_timeout="40000" device="/dev/mapper/ADMIN-usr" directory="/usr/nevis" fstype="gfs2" options="defaults,noatime,nodiratime"/>
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_on_node="orestes-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="orestes-corosync.nevis.columbia.edu" CRM_meta_op_target_rc="7" CRM_meta_timeout="40000" device="/dev/mapper/ADMIN-usr" directory="/usr/nevis" fstype="gfs2" options="defaults,noatime,nodiratime"/>
</rsc_op>
</action_set>
<inputs/>
</synapse>
<synapse id="150">
<action_set>
- <rsc_op id="203" operation="monitor" operation_key="FSVarNevis:0_monitor_20000" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu">
+ <rsc_op id="203" operation="monitor" operation_key="FSVarNevis:0_monitor_20000" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu">
<primitive id="FSVarNevis" long-id="FSVarNevis:0" class="ocf" provider="heartbeat" type="Filesystem"/>
- <attributes CRM_meta_clone="0" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="20000" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="hypatia-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="hypatia-corosync.nevis.columbia.edu" CRM_meta_timeout="40000" depth="0" device="/dev/mapper/ADMIN-var" directory="/var/nevis" fstype="gfs2" options="defaults,noatime,nodiratime"/>
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="20000" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="orestes-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="orestes-corosync.nevis.columbia.edu" CRM_meta_timeout="40000" depth="0" device="/dev/mapper/ADMIN-var" directory="/var/nevis" fstype="gfs2" options="defaults,noatime,nodiratime"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="202" operation="start" operation_key="FSVarNevis:0_start_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
+ <rsc_op id="202" operation="start" operation_key="FSVarNevis:0_start_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
</trigger>
</inputs>
</synapse>
<synapse id="151">
<action_set>
- <rsc_op id="202" operation="start" operation_key="FSVarNevis:0_start_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu">
+ <rsc_op id="202" operation="start" operation_key="FSVarNevis:0_start_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu">
<primitive id="FSVarNevis" long-id="FSVarNevis:0" class="ocf" provider="heartbeat" type="Filesystem"/>
- <attributes CRM_meta_clone="0" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_name="start" CRM_meta_notify="false" CRM_meta_on_node="hypatia-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="hypatia-corosync.nevis.columbia.edu" CRM_meta_timeout="60000" device="/dev/mapper/ADMIN-var" directory="/var/nevis" fstype="gfs2" options="defaults,noatime,nodiratime"/>
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_name="start" CRM_meta_notify="false" CRM_meta_on_node="orestes-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="orestes-corosync.nevis.columbia.edu" CRM_meta_timeout="60000" device="/dev/mapper/ADMIN-var" directory="/var/nevis" fstype="gfs2" options="defaults,noatime,nodiratime"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="200" operation="start" operation_key="FSUsrNevis:0_start_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
+ <rsc_op id="200" operation="start" operation_key="FSUsrNevis:0_start_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
</trigger>
<trigger>
<pseudo_event id="210" operation="start" operation_key="FilesystemGroup:0_start_0"/>
@@ -2222,36 +2222,36 @@
</synapse>
<synapse id="152">
<action_set>
- <rsc_op id="15" operation="monitor" operation_key="FSVarNevis:0_monitor_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu">
+ <rsc_op id="34" operation="monitor" operation_key="FSVarNevis:0_monitor_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu">
<primitive id="FSVarNevis" long-id="FSVarNevis:0" class="ocf" provider="heartbeat" type="Filesystem"/>
- <attributes CRM_meta_clone="0" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_on_node="hypatia-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="hypatia-corosync.nevis.columbia.edu" CRM_meta_op_target_rc="7" CRM_meta_timeout="40000" device="/dev/mapper/ADMIN-var" directory="/var/nevis" fstype="gfs2" options="defaults,noatime,nodiratime"/>
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_on_node="orestes-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="orestes-corosync.nevis.columbia.edu" CRM_meta_op_target_rc="7" CRM_meta_timeout="40000" device="/dev/mapper/ADMIN-var" directory="/var/nevis" fstype="gfs2" options="defaults,noatime,nodiratime"/>
</rsc_op>
</action_set>
<inputs/>
</synapse>
<synapse id="153">
<action_set>
- <rsc_op id="205" operation="monitor" operation_key="FSVirtualMachines:0_monitor_20000" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu">
+ <rsc_op id="205" operation="monitor" operation_key="FSVirtualMachines:0_monitor_20000" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu">
<primitive id="FSVirtualMachines" long-id="FSVirtualMachines:0" class="ocf" provider="heartbeat" type="Filesystem"/>
- <attributes CRM_meta_clone="0" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="20000" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="hypatia-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="hypatia-corosync.nevis.columbia.edu" CRM_meta_timeout="40000" depth="0" device="/dev/mapper/ADMIN-xen" directory="/xen" fstype="gfs2" options="defaults,noatime,nodiratime"/>
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="20000" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="orestes-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="orestes-corosync.nevis.columbia.edu" CRM_meta_timeout="40000" depth="0" device="/dev/mapper/ADMIN-xen" directory="/xen" fstype="gfs2" options="defaults,noatime,nodiratime"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="204" operation="start" operation_key="FSVirtualMachines:0_start_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
+ <rsc_op id="204" operation="start" operation_key="FSVirtualMachines:0_start_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
</trigger>
</inputs>
</synapse>
<synapse id="154">
<action_set>
- <rsc_op id="204" operation="start" operation_key="FSVirtualMachines:0_start_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu">
+ <rsc_op id="204" operation="start" operation_key="FSVirtualMachines:0_start_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu">
<primitive id="FSVirtualMachines" long-id="FSVirtualMachines:0" class="ocf" provider="heartbeat" type="Filesystem"/>
- <attributes CRM_meta_clone="0" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_name="start" CRM_meta_notify="false" CRM_meta_on_node="hypatia-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="hypatia-corosync.nevis.columbia.edu" CRM_meta_timeout="60000" device="/dev/mapper/ADMIN-xen" directory="/xen" fstype="gfs2" options="defaults,noatime,nodiratime"/>
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_name="start" CRM_meta_notify="false" CRM_meta_on_node="orestes-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="orestes-corosync.nevis.columbia.edu" CRM_meta_timeout="60000" device="/dev/mapper/ADMIN-xen" directory="/xen" fstype="gfs2" options="defaults,noatime,nodiratime"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="202" operation="start" operation_key="FSVarNevis:0_start_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
+ <rsc_op id="202" operation="start" operation_key="FSVarNevis:0_start_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
</trigger>
<trigger>
<pseudo_event id="210" operation="start" operation_key="FilesystemGroup:0_start_0"/>
@@ -2260,36 +2260,36 @@
</synapse>
<synapse id="155">
<action_set>
- <rsc_op id="16" operation="monitor" operation_key="FSVirtualMachines:0_monitor_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu">
+ <rsc_op id="35" operation="monitor" operation_key="FSVirtualMachines:0_monitor_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu">
<primitive id="FSVirtualMachines" long-id="FSVirtualMachines:0" class="ocf" provider="heartbeat" type="Filesystem"/>
- <attributes CRM_meta_clone="0" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_on_node="hypatia-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="hypatia-corosync.nevis.columbia.edu" CRM_meta_op_target_rc="7" CRM_meta_timeout="40000" device="/dev/mapper/ADMIN-xen" directory="/xen" fstype="gfs2" options="defaults,noatime,nodiratime"/>
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_on_node="orestes-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="orestes-corosync.nevis.columbia.edu" CRM_meta_op_target_rc="7" CRM_meta_timeout="40000" device="/dev/mapper/ADMIN-xen" directory="/xen" fstype="gfs2" options="defaults,noatime,nodiratime"/>
</rsc_op>
</action_set>
<inputs/>
</synapse>
<synapse id="156">
<action_set>
- <rsc_op id="207" operation="monitor" operation_key="FSMail:0_monitor_20000" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu">
+ <rsc_op id="207" operation="monitor" operation_key="FSMail:0_monitor_20000" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu">
<primitive id="FSMail" long-id="FSMail:0" class="ocf" provider="heartbeat" type="Filesystem"/>
- <attributes CRM_meta_clone="0" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="20000" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="hypatia-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="hypatia-corosync.nevis.columbia.edu" CRM_meta_timeout="40000" depth="0" device="/dev/mapper/ADMIN-mail" directory="/mail" fstype="gfs2" options="defaults,noatime,nodiratime"/>
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="20000" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="orestes-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="orestes-corosync.nevis.columbia.edu" CRM_meta_timeout="40000" depth="0" device="/dev/mapper/ADMIN-mail" directory="/mail" fstype="gfs2" options="defaults,noatime,nodiratime"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="206" operation="start" operation_key="FSMail:0_start_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
+ <rsc_op id="206" operation="start" operation_key="FSMail:0_start_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
</trigger>
</inputs>
</synapse>
<synapse id="157">
<action_set>
- <rsc_op id="206" operation="start" operation_key="FSMail:0_start_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu">
+ <rsc_op id="206" operation="start" operation_key="FSMail:0_start_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu">
<primitive id="FSMail" long-id="FSMail:0" class="ocf" provider="heartbeat" type="Filesystem"/>
- <attributes CRM_meta_clone="0" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_name="start" CRM_meta_notify="false" CRM_meta_on_node="hypatia-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="hypatia-corosync.nevis.columbia.edu" CRM_meta_timeout="60000" device="/dev/mapper/ADMIN-mail" directory="/mail" fstype="gfs2" options="defaults,noatime,nodiratime"/>
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_name="start" CRM_meta_notify="false" CRM_meta_on_node="orestes-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="orestes-corosync.nevis.columbia.edu" CRM_meta_timeout="60000" device="/dev/mapper/ADMIN-mail" directory="/mail" fstype="gfs2" options="defaults,noatime,nodiratime"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="204" operation="start" operation_key="FSVirtualMachines:0_start_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
+ <rsc_op id="204" operation="start" operation_key="FSVirtualMachines:0_start_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
</trigger>
<trigger>
<pseudo_event id="210" operation="start" operation_key="FilesystemGroup:0_start_0"/>
@@ -2298,36 +2298,36 @@
</synapse>
<synapse id="158">
<action_set>
- <rsc_op id="17" operation="monitor" operation_key="FSMail:0_monitor_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu">
+ <rsc_op id="36" operation="monitor" operation_key="FSMail:0_monitor_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu">
<primitive id="FSMail" long-id="FSMail:0" class="ocf" provider="heartbeat" type="Filesystem"/>
- <attributes CRM_meta_clone="0" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_on_node="hypatia-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="hypatia-corosync.nevis.columbia.edu" CRM_meta_op_target_rc="7" CRM_meta_timeout="40000" device="/dev/mapper/ADMIN-mail" directory="/mail" fstype="gfs2" options="defaults,noatime,nodiratime"/>
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_on_node="orestes-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="orestes-corosync.nevis.columbia.edu" CRM_meta_op_target_rc="7" CRM_meta_timeout="40000" device="/dev/mapper/ADMIN-mail" directory="/mail" fstype="gfs2" options="defaults,noatime,nodiratime"/>
</rsc_op>
</action_set>
<inputs/>
</synapse>
<synapse id="159">
<action_set>
- <rsc_op id="209" operation="monitor" operation_key="FSWork:0_monitor_20000" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu">
+ <rsc_op id="209" operation="monitor" operation_key="FSWork:0_monitor_20000" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu">
<primitive id="FSWork" long-id="FSWork:0" class="ocf" provider="heartbeat" type="Filesystem"/>
- <attributes CRM_meta_clone="0" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="20000" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="hypatia-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="hypatia-corosync.nevis.columbia.edu" CRM_meta_timeout="40000" depth="0" device="/dev/mapper/ADMIN-work" directory="/work" fstype="gfs2" options="defaults,noatime,nodiratime"/>
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="20000" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="orestes-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="orestes-corosync.nevis.columbia.edu" CRM_meta_timeout="40000" depth="0" device="/dev/mapper/ADMIN-work" directory="/work" fstype="gfs2" options="defaults,noatime,nodiratime"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="208" operation="start" operation_key="FSWork:0_start_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
+ <rsc_op id="208" operation="start" operation_key="FSWork:0_start_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
</trigger>
</inputs>
</synapse>
<synapse id="160">
<action_set>
- <rsc_op id="208" operation="start" operation_key="FSWork:0_start_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu">
+ <rsc_op id="208" operation="start" operation_key="FSWork:0_start_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu">
<primitive id="FSWork" long-id="FSWork:0" class="ocf" provider="heartbeat" type="Filesystem"/>
- <attributes CRM_meta_clone="0" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_name="start" CRM_meta_notify="false" CRM_meta_on_node="hypatia-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="hypatia-corosync.nevis.columbia.edu" CRM_meta_timeout="60000" device="/dev/mapper/ADMIN-work" directory="/work" fstype="gfs2" options="defaults,noatime,nodiratime"/>
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_name="start" CRM_meta_notify="false" CRM_meta_on_node="orestes-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="orestes-corosync.nevis.columbia.edu" CRM_meta_timeout="60000" device="/dev/mapper/ADMIN-work" directory="/work" fstype="gfs2" options="defaults,noatime,nodiratime"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="206" operation="start" operation_key="FSMail:0_start_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
+ <rsc_op id="206" operation="start" operation_key="FSMail:0_start_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
</trigger>
<trigger>
<pseudo_event id="210" operation="start" operation_key="FilesystemGroup:0_start_0"/>
@@ -2336,9 +2336,9 @@
</synapse>
<synapse id="161">
<action_set>
- <rsc_op id="18" operation="monitor" operation_key="FSWork:0_monitor_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu">
+ <rsc_op id="37" operation="monitor" operation_key="FSWork:0_monitor_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu">
<primitive id="FSWork" long-id="FSWork:0" class="ocf" provider="heartbeat" type="Filesystem"/>
- <attributes CRM_meta_clone="0" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_on_node="hypatia-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="hypatia-corosync.nevis.columbia.edu" CRM_meta_op_target_rc="7" CRM_meta_timeout="40000" device="/dev/mapper/ADMIN-work" directory="/work" fstype="gfs2" options="defaults,noatime,nodiratime"/>
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_on_node="orestes-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="orestes-corosync.nevis.columbia.edu" CRM_meta_op_target_rc="7" CRM_meta_timeout="40000" device="/dev/mapper/ADMIN-work" directory="/work" fstype="gfs2" options="defaults,noatime,nodiratime"/>
</rsc_op>
</action_set>
<inputs/>
@@ -2351,22 +2351,22 @@
</action_set>
<inputs>
<trigger>
- <rsc_op id="214" operation="start" operation_key="AdminLvm:1_start_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
+ <rsc_op id="214" operation="start" operation_key="AdminLvm:1_start_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
</trigger>
<trigger>
- <rsc_op id="216" operation="start" operation_key="FSUsrNevis:1_start_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
+ <rsc_op id="216" operation="start" operation_key="FSUsrNevis:1_start_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
</trigger>
<trigger>
- <rsc_op id="218" operation="start" operation_key="FSVarNevis:1_start_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
+ <rsc_op id="218" operation="start" operation_key="FSVarNevis:1_start_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
</trigger>
<trigger>
- <rsc_op id="220" operation="start" operation_key="FSVirtualMachines:1_start_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
+ <rsc_op id="220" operation="start" operation_key="FSVirtualMachines:1_start_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
</trigger>
<trigger>
- <rsc_op id="222" operation="start" operation_key="FSMail:1_start_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
+ <rsc_op id="222" operation="start" operation_key="FSMail:1_start_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
</trigger>
<trigger>
- <rsc_op id="224" operation="start" operation_key="FSWork:1_start_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
+ <rsc_op id="224" operation="start" operation_key="FSWork:1_start_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
</trigger>
<trigger>
<pseudo_event id="226" operation="start" operation_key="FilesystemGroup:1_start_0"/>
@@ -2381,7 +2381,7 @@
</action_set>
<inputs>
<trigger>
- <rsc_op id="44" operation="promote" operation_key="AdminDrbd:1_promote_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
+ <rsc_op id="44" operation="promote" operation_key="AdminDrbd:1_promote_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
</trigger>
<trigger>
<pseudo_event id="230" operation="start" operation_key="FilesystemClone_start_0"/>
@@ -2390,22 +2390,22 @@
</synapse>
<synapse id="164">
<action_set>
- <rsc_op id="215" operation="monitor" operation_key="AdminLvm:1_monitor_30000" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu">
+ <rsc_op id="215" operation="monitor" operation_key="AdminLvm:1_monitor_30000" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu">
<primitive id="AdminLvm" long-id="AdminLvm:1" class="ocf" provider="heartbeat" type="LVM"/>
- <attributes CRM_meta_clone="1" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="30000" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="orestes-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="orestes-corosync.nevis.columbia.edu" CRM_meta_timeout="100000" depth="0" volgrpname="ADMIN"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="30000" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="hypatia-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="hypatia-corosync.nevis.columbia.edu" CRM_meta_timeout="100000" depth="0" volgrpname="ADMIN"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="214" operation="start" operation_key="AdminLvm:1_start_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
+ <rsc_op id="214" operation="start" operation_key="AdminLvm:1_start_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
</trigger>
</inputs>
</synapse>
<synapse id="165">
<action_set>
- <rsc_op id="214" operation="start" operation_key="AdminLvm:1_start_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu">
+ <rsc_op id="214" operation="start" operation_key="AdminLvm:1_start_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu">
<primitive id="AdminLvm" long-id="AdminLvm:1" class="ocf" provider="heartbeat" type="LVM"/>
- <attributes CRM_meta_clone="1" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_name="start" CRM_meta_notify="false" CRM_meta_on_node="orestes-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="orestes-corosync.nevis.columbia.edu" CRM_meta_timeout="30000" volgrpname="ADMIN"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_name="start" CRM_meta_notify="false" CRM_meta_on_node="hypatia-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="hypatia-corosync.nevis.columbia.edu" CRM_meta_timeout="30000" volgrpname="ADMIN"/>
</rsc_op>
</action_set>
<inputs>
@@ -2416,36 +2416,36 @@
</synapse>
<synapse id="166">
<action_set>
- <rsc_op id="32" operation="monitor" operation_key="AdminLvm:1_monitor_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu">
+ <rsc_op id="13" operation="monitor" operation_key="AdminLvm:1_monitor_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu">
<primitive id="AdminLvm" long-id="AdminLvm:1" class="ocf" provider="heartbeat" type="LVM"/>
- <attributes CRM_meta_clone="1" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_on_node="orestes-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="orestes-corosync.nevis.columbia.edu" CRM_meta_op_target_rc="7" CRM_meta_timeout="100000" volgrpname="ADMIN"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_on_node="hypatia-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="hypatia-corosync.nevis.columbia.edu" CRM_meta_op_target_rc="7" CRM_meta_timeout="100000" volgrpname="ADMIN"/>
</rsc_op>
</action_set>
<inputs/>
</synapse>
<synapse id="167">
<action_set>
- <rsc_op id="217" operation="monitor" operation_key="FSUsrNevis:1_monitor_20000" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu">
+ <rsc_op id="217" operation="monitor" operation_key="FSUsrNevis:1_monitor_20000" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu">
<primitive id="FSUsrNevis" long-id="FSUsrNevis:1" class="ocf" provider="heartbeat" type="Filesystem"/>
- <attributes CRM_meta_clone="1" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="20000" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="orestes-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="orestes-corosync.nevis.columbia.edu" CRM_meta_timeout="40000" depth="0" device="/dev/mapper/ADMIN-usr" directory="/usr/nevis" fstype="gfs2" options="defaults,noatime,nodiratime"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="20000" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="hypatia-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="hypatia-corosync.nevis.columbia.edu" CRM_meta_timeout="40000" depth="0" device="/dev/mapper/ADMIN-usr" directory="/usr/nevis" fstype="gfs2" options="defaults,noatime,nodiratime"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="216" operation="start" operation_key="FSUsrNevis:1_start_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
+ <rsc_op id="216" operation="start" operation_key="FSUsrNevis:1_start_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
</trigger>
</inputs>
</synapse>
<synapse id="168">
<action_set>
- <rsc_op id="216" operation="start" operation_key="FSUsrNevis:1_start_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu">
+ <rsc_op id="216" operation="start" operation_key="FSUsrNevis:1_start_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu">
<primitive id="FSUsrNevis" long-id="FSUsrNevis:1" class="ocf" provider="heartbeat" type="Filesystem"/>
- <attributes CRM_meta_clone="1" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_name="start" CRM_meta_notify="false" CRM_meta_on_node="orestes-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="orestes-corosync.nevis.columbia.edu" CRM_meta_timeout="60000" device="/dev/mapper/ADMIN-usr" directory="/usr/nevis" fstype="gfs2" options="defaults,noatime,nodiratime"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_name="start" CRM_meta_notify="false" CRM_meta_on_node="hypatia-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="hypatia-corosync.nevis.columbia.edu" CRM_meta_timeout="60000" device="/dev/mapper/ADMIN-usr" directory="/usr/nevis" fstype="gfs2" options="defaults,noatime,nodiratime"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="214" operation="start" operation_key="AdminLvm:1_start_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
+ <rsc_op id="214" operation="start" operation_key="AdminLvm:1_start_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
</trigger>
<trigger>
<pseudo_event id="226" operation="start" operation_key="FilesystemGroup:1_start_0"/>
@@ -2454,36 +2454,36 @@
</synapse>
<synapse id="169">
<action_set>
- <rsc_op id="33" operation="monitor" operation_key="FSUsrNevis:1_monitor_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu">
+ <rsc_op id="14" operation="monitor" operation_key="FSUsrNevis:1_monitor_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu">
<primitive id="FSUsrNevis" long-id="FSUsrNevis:1" class="ocf" provider="heartbeat" type="Filesystem"/>
- <attributes CRM_meta_clone="1" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_on_node="orestes-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="orestes-corosync.nevis.columbia.edu" CRM_meta_op_target_rc="7" CRM_meta_timeout="40000" device="/dev/mapper/ADMIN-usr" directory="/usr/nevis" fstype="gfs2" options="defaults,noatime,nodiratime"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_on_node="hypatia-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="hypatia-corosync.nevis.columbia.edu" CRM_meta_op_target_rc="7" CRM_meta_timeout="40000" device="/dev/mapper/ADMIN-usr" directory="/usr/nevis" fstype="gfs2" options="defaults,noatime,nodiratime"/>
</rsc_op>
</action_set>
<inputs/>
</synapse>
<synapse id="170">
<action_set>
- <rsc_op id="219" operation="monitor" operation_key="FSVarNevis:1_monitor_20000" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu">
+ <rsc_op id="219" operation="monitor" operation_key="FSVarNevis:1_monitor_20000" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu">
<primitive id="FSVarNevis" long-id="FSVarNevis:1" class="ocf" provider="heartbeat" type="Filesystem"/>
- <attributes CRM_meta_clone="1" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="20000" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="orestes-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="orestes-corosync.nevis.columbia.edu" CRM_meta_timeout="40000" depth="0" device="/dev/mapper/ADMIN-var" directory="/var/nevis" fstype="gfs2" options="defaults,noatime,nodiratime"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="20000" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="hypatia-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="hypatia-corosync.nevis.columbia.edu" CRM_meta_timeout="40000" depth="0" device="/dev/mapper/ADMIN-var" directory="/var/nevis" fstype="gfs2" options="defaults,noatime,nodiratime"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="218" operation="start" operation_key="FSVarNevis:1_start_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
+ <rsc_op id="218" operation="start" operation_key="FSVarNevis:1_start_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
</trigger>
</inputs>
</synapse>
<synapse id="171">
<action_set>
- <rsc_op id="218" operation="start" operation_key="FSVarNevis:1_start_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu">
+ <rsc_op id="218" operation="start" operation_key="FSVarNevis:1_start_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu">
<primitive id="FSVarNevis" long-id="FSVarNevis:1" class="ocf" provider="heartbeat" type="Filesystem"/>
- <attributes CRM_meta_clone="1" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_name="start" CRM_meta_notify="false" CRM_meta_on_node="orestes-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="orestes-corosync.nevis.columbia.edu" CRM_meta_timeout="60000" device="/dev/mapper/ADMIN-var" directory="/var/nevis" fstype="gfs2" options="defaults,noatime,nodiratime"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_name="start" CRM_meta_notify="false" CRM_meta_on_node="hypatia-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="hypatia-corosync.nevis.columbia.edu" CRM_meta_timeout="60000" device="/dev/mapper/ADMIN-var" directory="/var/nevis" fstype="gfs2" options="defaults,noatime,nodiratime"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="216" operation="start" operation_key="FSUsrNevis:1_start_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
+ <rsc_op id="216" operation="start" operation_key="FSUsrNevis:1_start_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
</trigger>
<trigger>
<pseudo_event id="226" operation="start" operation_key="FilesystemGroup:1_start_0"/>
@@ -2492,36 +2492,36 @@
</synapse>
<synapse id="172">
<action_set>
- <rsc_op id="34" operation="monitor" operation_key="FSVarNevis:1_monitor_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu">
+ <rsc_op id="15" operation="monitor" operation_key="FSVarNevis:1_monitor_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu">
<primitive id="FSVarNevis" long-id="FSVarNevis:1" class="ocf" provider="heartbeat" type="Filesystem"/>
- <attributes CRM_meta_clone="1" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_on_node="orestes-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="orestes-corosync.nevis.columbia.edu" CRM_meta_op_target_rc="7" CRM_meta_timeout="40000" device="/dev/mapper/ADMIN-var" directory="/var/nevis" fstype="gfs2" options="defaults,noatime,nodiratime"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_on_node="hypatia-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="hypatia-corosync.nevis.columbia.edu" CRM_meta_op_target_rc="7" CRM_meta_timeout="40000" device="/dev/mapper/ADMIN-var" directory="/var/nevis" fstype="gfs2" options="defaults,noatime,nodiratime"/>
</rsc_op>
</action_set>
<inputs/>
</synapse>
<synapse id="173">
<action_set>
- <rsc_op id="221" operation="monitor" operation_key="FSVirtualMachines:1_monitor_20000" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu">
+ <rsc_op id="221" operation="monitor" operation_key="FSVirtualMachines:1_monitor_20000" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu">
<primitive id="FSVirtualMachines" long-id="FSVirtualMachines:1" class="ocf" provider="heartbeat" type="Filesystem"/>
- <attributes CRM_meta_clone="1" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="20000" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="orestes-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="orestes-corosync.nevis.columbia.edu" CRM_meta_timeout="40000" depth="0" device="/dev/mapper/ADMIN-xen" directory="/xen" fstype="gfs2" options="defaults,noatime,nodiratime"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="20000" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="hypatia-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="hypatia-corosync.nevis.columbia.edu" CRM_meta_timeout="40000" depth="0" device="/dev/mapper/ADMIN-xen" directory="/xen" fstype="gfs2" options="defaults,noatime,nodiratime"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="220" operation="start" operation_key="FSVirtualMachines:1_start_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
+ <rsc_op id="220" operation="start" operation_key="FSVirtualMachines:1_start_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
</trigger>
</inputs>
</synapse>
<synapse id="174">
<action_set>
- <rsc_op id="220" operation="start" operation_key="FSVirtualMachines:1_start_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu">
+ <rsc_op id="220" operation="start" operation_key="FSVirtualMachines:1_start_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu">
<primitive id="FSVirtualMachines" long-id="FSVirtualMachines:1" class="ocf" provider="heartbeat" type="Filesystem"/>
- <attributes CRM_meta_clone="1" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_name="start" CRM_meta_notify="false" CRM_meta_on_node="orestes-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="orestes-corosync.nevis.columbia.edu" CRM_meta_timeout="60000" device="/dev/mapper/ADMIN-xen" directory="/xen" fstype="gfs2" options="defaults,noatime,nodiratime"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_name="start" CRM_meta_notify="false" CRM_meta_on_node="hypatia-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="hypatia-corosync.nevis.columbia.edu" CRM_meta_timeout="60000" device="/dev/mapper/ADMIN-xen" directory="/xen" fstype="gfs2" options="defaults,noatime,nodiratime"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="218" operation="start" operation_key="FSVarNevis:1_start_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
+ <rsc_op id="218" operation="start" operation_key="FSVarNevis:1_start_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
</trigger>
<trigger>
<pseudo_event id="226" operation="start" operation_key="FilesystemGroup:1_start_0"/>
@@ -2530,36 +2530,36 @@
</synapse>
<synapse id="175">
<action_set>
- <rsc_op id="35" operation="monitor" operation_key="FSVirtualMachines:1_monitor_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu">
+ <rsc_op id="16" operation="monitor" operation_key="FSVirtualMachines:1_monitor_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu">
<primitive id="FSVirtualMachines" long-id="FSVirtualMachines:1" class="ocf" provider="heartbeat" type="Filesystem"/>
- <attributes CRM_meta_clone="1" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_on_node="orestes-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="orestes-corosync.nevis.columbia.edu" CRM_meta_op_target_rc="7" CRM_meta_timeout="40000" device="/dev/mapper/ADMIN-xen" directory="/xen" fstype="gfs2" options="defaults,noatime,nodiratime"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_on_node="hypatia-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="hypatia-corosync.nevis.columbia.edu" CRM_meta_op_target_rc="7" CRM_meta_timeout="40000" device="/dev/mapper/ADMIN-xen" directory="/xen" fstype="gfs2" options="defaults,noatime,nodiratime"/>
</rsc_op>
</action_set>
<inputs/>
</synapse>
<synapse id="176">
<action_set>
- <rsc_op id="223" operation="monitor" operation_key="FSMail:1_monitor_20000" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu">
+ <rsc_op id="223" operation="monitor" operation_key="FSMail:1_monitor_20000" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu">
<primitive id="FSMail" long-id="FSMail:1" class="ocf" provider="heartbeat" type="Filesystem"/>
- <attributes CRM_meta_clone="1" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="20000" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="orestes-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="orestes-corosync.nevis.columbia.edu" CRM_meta_timeout="40000" depth="0" device="/dev/mapper/ADMIN-mail" directory="/mail" fstype="gfs2" options="defaults,noatime,nodiratime"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="20000" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="hypatia-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="hypatia-corosync.nevis.columbia.edu" CRM_meta_timeout="40000" depth="0" device="/dev/mapper/ADMIN-mail" directory="/mail" fstype="gfs2" options="defaults,noatime,nodiratime"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="222" operation="start" operation_key="FSMail:1_start_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
+ <rsc_op id="222" operation="start" operation_key="FSMail:1_start_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
</trigger>
</inputs>
</synapse>
<synapse id="177">
<action_set>
- <rsc_op id="222" operation="start" operation_key="FSMail:1_start_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu">
+ <rsc_op id="222" operation="start" operation_key="FSMail:1_start_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu">
<primitive id="FSMail" long-id="FSMail:1" class="ocf" provider="heartbeat" type="Filesystem"/>
- <attributes CRM_meta_clone="1" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_name="start" CRM_meta_notify="false" CRM_meta_on_node="orestes-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="orestes-corosync.nevis.columbia.edu" CRM_meta_timeout="60000" device="/dev/mapper/ADMIN-mail" directory="/mail" fstype="gfs2" options="defaults,noatime,nodiratime"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_name="start" CRM_meta_notify="false" CRM_meta_on_node="hypatia-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="hypatia-corosync.nevis.columbia.edu" CRM_meta_timeout="60000" device="/dev/mapper/ADMIN-mail" directory="/mail" fstype="gfs2" options="defaults,noatime,nodiratime"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="220" operation="start" operation_key="FSVirtualMachines:1_start_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
+ <rsc_op id="220" operation="start" operation_key="FSVirtualMachines:1_start_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
</trigger>
<trigger>
<pseudo_event id="226" operation="start" operation_key="FilesystemGroup:1_start_0"/>
@@ -2568,36 +2568,36 @@
</synapse>
<synapse id="178">
<action_set>
- <rsc_op id="36" operation="monitor" operation_key="FSMail:1_monitor_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu">
+ <rsc_op id="17" operation="monitor" operation_key="FSMail:1_monitor_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu">
<primitive id="FSMail" long-id="FSMail:1" class="ocf" provider="heartbeat" type="Filesystem"/>
- <attributes CRM_meta_clone="1" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_on_node="orestes-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="orestes-corosync.nevis.columbia.edu" CRM_meta_op_target_rc="7" CRM_meta_timeout="40000" device="/dev/mapper/ADMIN-mail" directory="/mail" fstype="gfs2" options="defaults,noatime,nodiratime"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_on_node="hypatia-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="hypatia-corosync.nevis.columbia.edu" CRM_meta_op_target_rc="7" CRM_meta_timeout="40000" device="/dev/mapper/ADMIN-mail" directory="/mail" fstype="gfs2" options="defaults,noatime,nodiratime"/>
</rsc_op>
</action_set>
<inputs/>
</synapse>
<synapse id="179">
<action_set>
- <rsc_op id="225" operation="monitor" operation_key="FSWork:1_monitor_20000" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu">
+ <rsc_op id="225" operation="monitor" operation_key="FSWork:1_monitor_20000" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu">
<primitive id="FSWork" long-id="FSWork:1" class="ocf" provider="heartbeat" type="Filesystem"/>
- <attributes CRM_meta_clone="1" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="20000" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="orestes-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="orestes-corosync.nevis.columbia.edu" CRM_meta_timeout="40000" depth="0" device="/dev/mapper/ADMIN-work" directory="/work" fstype="gfs2" options="defaults,noatime,nodiratime"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="20000" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="hypatia-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="hypatia-corosync.nevis.columbia.edu" CRM_meta_timeout="40000" depth="0" device="/dev/mapper/ADMIN-work" directory="/work" fstype="gfs2" options="defaults,noatime,nodiratime"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="224" operation="start" operation_key="FSWork:1_start_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
+ <rsc_op id="224" operation="start" operation_key="FSWork:1_start_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
</trigger>
</inputs>
</synapse>
<synapse id="180">
<action_set>
- <rsc_op id="224" operation="start" operation_key="FSWork:1_start_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu">
+ <rsc_op id="224" operation="start" operation_key="FSWork:1_start_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu">
<primitive id="FSWork" long-id="FSWork:1" class="ocf" provider="heartbeat" type="Filesystem"/>
- <attributes CRM_meta_clone="1" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_name="start" CRM_meta_notify="false" CRM_meta_on_node="orestes-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="orestes-corosync.nevis.columbia.edu" CRM_meta_timeout="60000" device="/dev/mapper/ADMIN-work" directory="/work" fstype="gfs2" options="defaults,noatime,nodiratime"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_name="start" CRM_meta_notify="false" CRM_meta_on_node="hypatia-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="hypatia-corosync.nevis.columbia.edu" CRM_meta_timeout="60000" device="/dev/mapper/ADMIN-work" directory="/work" fstype="gfs2" options="defaults,noatime,nodiratime"/>
</rsc_op>
</action_set>
<inputs>
<trigger>
- <rsc_op id="222" operation="start" operation_key="FSMail:1_start_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
+ <rsc_op id="222" operation="start" operation_key="FSMail:1_start_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
</trigger>
<trigger>
<pseudo_event id="226" operation="start" operation_key="FilesystemGroup:1_start_0"/>
@@ -2606,9 +2606,9 @@
</synapse>
<synapse id="181">
<action_set>
- <rsc_op id="37" operation="monitor" operation_key="FSWork:1_monitor_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu">
+ <rsc_op id="18" operation="monitor" operation_key="FSWork:1_monitor_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu">
<primitive id="FSWork" long-id="FSWork:1" class="ocf" provider="heartbeat" type="Filesystem"/>
- <attributes CRM_meta_clone="1" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_on_node="orestes-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="orestes-corosync.nevis.columbia.edu" CRM_meta_op_target_rc="7" CRM_meta_timeout="40000" device="/dev/mapper/ADMIN-work" directory="/work" fstype="gfs2" options="defaults,noatime,nodiratime"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_on_node="hypatia-corosync.nevis.columbia.edu" CRM_meta_on_node_uuid="hypatia-corosync.nevis.columbia.edu" CRM_meta_op_target_rc="7" CRM_meta_timeout="40000" device="/dev/mapper/ADMIN-work" directory="/work" fstype="gfs2" options="defaults,noatime,nodiratime"/>
</rsc_op>
</action_set>
<inputs/>
@@ -2639,40 +2639,40 @@
</action_set>
<inputs>
<trigger>
- <rsc_op id="13" operation="monitor" operation_key="AdminLvm:0_monitor_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
+ <rsc_op id="13" operation="monitor" operation_key="AdminLvm:1_monitor_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
</trigger>
<trigger>
- <rsc_op id="14" operation="monitor" operation_key="FSUsrNevis:0_monitor_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
+ <rsc_op id="14" operation="monitor" operation_key="FSUsrNevis:1_monitor_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
</trigger>
<trigger>
- <rsc_op id="15" operation="monitor" operation_key="FSVarNevis:0_monitor_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
+ <rsc_op id="15" operation="monitor" operation_key="FSVarNevis:1_monitor_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
</trigger>
<trigger>
- <rsc_op id="16" operation="monitor" operation_key="FSVirtualMachines:0_monitor_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
+ <rsc_op id="16" operation="monitor" operation_key="FSVirtualMachines:1_monitor_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
</trigger>
<trigger>
- <rsc_op id="17" operation="monitor" operation_key="FSMail:0_monitor_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
+ <rsc_op id="17" operation="monitor" operation_key="FSMail:1_monitor_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
</trigger>
<trigger>
- <rsc_op id="18" operation="monitor" operation_key="FSWork:0_monitor_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
+ <rsc_op id="18" operation="monitor" operation_key="FSWork:1_monitor_0" on_node="hypatia-corosync.nevis.columbia.edu" on_node_uuid="hypatia-corosync.nevis.columbia.edu"/>
</trigger>
<trigger>
- <rsc_op id="32" operation="monitor" operation_key="AdminLvm:1_monitor_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
+ <rsc_op id="32" operation="monitor" operation_key="AdminLvm:0_monitor_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
</trigger>
<trigger>
- <rsc_op id="33" operation="monitor" operation_key="FSUsrNevis:1_monitor_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
+ <rsc_op id="33" operation="monitor" operation_key="FSUsrNevis:0_monitor_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
</trigger>
<trigger>
- <rsc_op id="34" operation="monitor" operation_key="FSVarNevis:1_monitor_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
+ <rsc_op id="34" operation="monitor" operation_key="FSVarNevis:0_monitor_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
</trigger>
<trigger>
- <rsc_op id="35" operation="monitor" operation_key="FSVirtualMachines:1_monitor_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
+ <rsc_op id="35" operation="monitor" operation_key="FSVirtualMachines:0_monitor_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
</trigger>
<trigger>
- <rsc_op id="36" operation="monitor" operation_key="FSMail:1_monitor_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
+ <rsc_op id="36" operation="monitor" operation_key="FSMail:0_monitor_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
</trigger>
<trigger>
- <rsc_op id="37" operation="monitor" operation_key="FSWork:1_monitor_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
+ <rsc_op id="37" operation="monitor" operation_key="FSWork:0_monitor_0" on_node="orestes-corosync.nevis.columbia.edu" on_node_uuid="orestes-corosync.nevis.columbia.edu"/>
</trigger>
<trigger>
<pseudo_event id="63" operation="notified" operation_key="AdminClone_confirmed-post_notify_promoted_0"/>
diff --git a/cts/scheduler/exp/shutdown-lock-expiration.exp b/cts/scheduler/exp/shutdown-lock-expiration.exp
index 465f12b..9941333 100644
--- a/cts/scheduler/exp/shutdown-lock-expiration.exp
+++ b/cts/scheduler/exp/shutdown-lock-expiration.exp
@@ -60,7 +60,7 @@
<action_set>
<crm_event mode="cib" id="1" operation="lrm_delete" operation_key="rsc2_lrm_delete_0" on_node="node2" on_node_uuid="2">
<primitive id="rsc2" class="ocf" provider="pacemaker" type="Dummy"/>
- <attributes CRM_meta_on_node="node2" CRM_meta_on_node_uuid="2" CRM_meta_timeout="90000" />
+ <attributes CRM_meta_on_fail="ignore" CRM_meta_on_node="node2" CRM_meta_on_node_uuid="2" CRM_meta_timeout="90000" />
</crm_event>
</action_set>
<inputs/>
diff --git a/cts/scheduler/exp/timeout-by-node.exp b/cts/scheduler/exp/timeout-by-node.exp
new file mode 100644
index 0000000..19d1afc
--- /dev/null
+++ b/cts/scheduler/exp/timeout-by-node.exp
@@ -0,0 +1,228 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" transition_id="0">
+ <synapse id="0">
+ <action_set>
+ <rsc_op id="10" operation="monitor" operation_key="rsc1:0_monitor_10000" on_node="node2" on_node_uuid="2">
+ <primitive id="rsc1" long-id="rsc1:0" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="5" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="node2" CRM_meta_on_node_uuid="2" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="9" operation="start" operation_key="rsc1:0_start_0" on_node="node2" on_node_uuid="2"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="1">
+ <action_set>
+ <rsc_op id="9" operation="start" operation_key="rsc1:0_start_0" on_node="node2" on_node_uuid="2">
+ <primitive id="rsc1" long-id="rsc1:0" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="5" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_name="start" CRM_meta_notify="false" CRM_meta_on_node="node2" CRM_meta_on_node_uuid="2" CRM_meta_timeout="23000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="19" operation="start" operation_key="rsc1-clone_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="2">
+ <action_set>
+ <rsc_op id="3" operation="monitor" operation_key="rsc1:0_monitor_0" on_node="node2" on_node_uuid="2">
+ <primitive id="rsc1" long-id="rsc1:0" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_clone="0" CRM_meta_clone_max="5" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_on_node="node2" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="3">
+ <action_set>
+ <rsc_op id="12" operation="monitor" operation_key="rsc1:1_monitor_10000" on_node="node3" on_node_uuid="3">
+ <primitive id="rsc1" long-id="rsc1:1" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="5" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="node3" CRM_meta_on_node_uuid="3" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="11" operation="start" operation_key="rsc1:1_start_0" on_node="node3" on_node_uuid="3"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="4">
+ <action_set>
+ <rsc_op id="11" operation="start" operation_key="rsc1:1_start_0" on_node="node3" on_node_uuid="3">
+ <primitive id="rsc1" long-id="rsc1:1" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="5" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_name="start" CRM_meta_notify="false" CRM_meta_on_node="node3" CRM_meta_on_node_uuid="3" CRM_meta_timeout="23000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="19" operation="start" operation_key="rsc1-clone_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="5">
+ <action_set>
+ <rsc_op id="4" operation="monitor" operation_key="rsc1:1_monitor_0" on_node="node3" on_node_uuid="3">
+ <primitive id="rsc1" long-id="rsc1:1" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_clone="1" CRM_meta_clone_max="5" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_on_node="node3" CRM_meta_on_node_uuid="3" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="6">
+ <action_set>
+ <rsc_op id="14" operation="monitor" operation_key="rsc1:2_monitor_10000" on_node="node4" on_node_uuid="4">
+ <primitive id="rsc1" long-id="rsc1:2" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="5" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="node4" CRM_meta_on_node_uuid="4" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="13" operation="start" operation_key="rsc1:2_start_0" on_node="node4" on_node_uuid="4"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="7">
+ <action_set>
+ <rsc_op id="13" operation="start" operation_key="rsc1:2_start_0" on_node="node4" on_node_uuid="4">
+ <primitive id="rsc1" long-id="rsc1:2" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="5" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_name="start" CRM_meta_notify="false" CRM_meta_on_node="node4" CRM_meta_on_node_uuid="4" CRM_meta_timeout="23000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="19" operation="start" operation_key="rsc1-clone_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="8">
+ <action_set>
+ <rsc_op id="5" operation="monitor" operation_key="rsc1:2_monitor_0" on_node="node4" on_node_uuid="4">
+ <primitive id="rsc1" long-id="rsc1:2" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_clone="2" CRM_meta_clone_max="5" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_on_node="node4" CRM_meta_on_node_uuid="4" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="9">
+ <action_set>
+ <rsc_op id="16" operation="monitor" operation_key="rsc1:3_monitor_10000" on_node="node5" on_node_uuid="5">
+ <primitive id="rsc1" long-id="rsc1:3" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_clone="3" CRM_meta_clone_max="5" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="node5" CRM_meta_on_node_uuid="5" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="15" operation="start" operation_key="rsc1:3_start_0" on_node="node5" on_node_uuid="5"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="10">
+ <action_set>
+ <rsc_op id="15" operation="start" operation_key="rsc1:3_start_0" on_node="node5" on_node_uuid="5">
+ <primitive id="rsc1" long-id="rsc1:3" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_clone="3" CRM_meta_clone_max="5" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_name="start" CRM_meta_notify="false" CRM_meta_on_node="node5" CRM_meta_on_node_uuid="5" CRM_meta_timeout="23000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="19" operation="start" operation_key="rsc1-clone_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="11">
+ <action_set>
+ <rsc_op id="6" operation="monitor" operation_key="rsc1:3_monitor_0" on_node="node5" on_node_uuid="5">
+ <primitive id="rsc1" long-id="rsc1:3" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_clone="3" CRM_meta_clone_max="5" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_on_node="node5" CRM_meta_on_node_uuid="5" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="12">
+ <action_set>
+ <rsc_op id="18" operation="monitor" operation_key="rsc1:4_monitor_10000" on_node="node1" on_node_uuid="1">
+ <primitive id="rsc1" long-id="rsc1:4" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_clone="4" CRM_meta_clone_max="5" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="17" operation="start" operation_key="rsc1:4_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="13">
+ <action_set>
+ <rsc_op id="17" operation="start" operation_key="rsc1:4_start_0" on_node="node1" on_node_uuid="1">
+ <primitive id="rsc1" long-id="rsc1:4" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_clone="4" CRM_meta_clone_max="5" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_name="start" CRM_meta_notify="false" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="25000" />
+ </rsc_op>
+ </action_set>
+ <inputs>
+ <trigger>
+ <pseudo_event id="19" operation="start" operation_key="rsc1-clone_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="14">
+ <action_set>
+ <rsc_op id="2" operation="monitor" operation_key="rsc1:4_monitor_0" on_node="node1" on_node_uuid="1">
+ <primitive id="rsc1" long-id="rsc1:4" class="ocf" provider="pacemaker" type="Dummy"/>
+ <attributes CRM_meta_clone="4" CRM_meta_clone_max="5" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" />
+ </rsc_op>
+ </action_set>
+ <inputs/>
+ </synapse>
+ <synapse id="15" priority="1000000">
+ <action_set>
+ <pseudo_event id="20" operation="running" operation_key="rsc1-clone_running_0">
+ <attributes CRM_meta_clone_max="5" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="9" operation="start" operation_key="rsc1:0_start_0" on_node="node2" on_node_uuid="2"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="11" operation="start" operation_key="rsc1:1_start_0" on_node="node3" on_node_uuid="3"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="13" operation="start" operation_key="rsc1:2_start_0" on_node="node4" on_node_uuid="4"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="15" operation="start" operation_key="rsc1:3_start_0" on_node="node5" on_node_uuid="5"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="17" operation="start" operation_key="rsc1:4_start_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <pseudo_event id="19" operation="start" operation_key="rsc1-clone_start_0"/>
+ </trigger>
+ </inputs>
+ </synapse>
+ <synapse id="16">
+ <action_set>
+ <pseudo_event id="19" operation="start" operation_key="rsc1-clone_start_0">
+ <attributes CRM_meta_clone_max="5" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_timeout="20000" />
+ </pseudo_event>
+ </action_set>
+ <inputs>
+ <trigger>
+ <rsc_op id="2" operation="monitor" operation_key="rsc1:4_monitor_0" on_node="node1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="3" operation="monitor" operation_key="rsc1:0_monitor_0" on_node="node2" on_node_uuid="2"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="4" operation="monitor" operation_key="rsc1:1_monitor_0" on_node="node3" on_node_uuid="3"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="5" operation="monitor" operation_key="rsc1:2_monitor_0" on_node="node4" on_node_uuid="4"/>
+ </trigger>
+ <trigger>
+ <rsc_op id="6" operation="monitor" operation_key="rsc1:3_monitor_0" on_node="node5" on_node_uuid="5"/>
+ </trigger>
+ </inputs>
+ </synapse>
+</transition_graph>
diff --git a/cts/scheduler/exp/unfence-definition.exp b/cts/scheduler/exp/unfence-definition.exp
index 6a098ed..308f638 100644
--- a/cts/scheduler/exp/unfence-definition.exp
+++ b/cts/scheduler/exp/unfence-definition.exp
@@ -90,6 +90,9 @@
</action_set>
<inputs>
<trigger>
+ <rsc_op id="9" operation="monitor" operation_key="dlm:2_monitor_0" on_node="virt-3" on_node_uuid="3"/>
+ </trigger>
+ <trigger>
<pseudo_event id="19" operation="stop" operation_key="dlm-clone_stop_0"/>
</trigger>
<trigger>
@@ -109,6 +112,9 @@
<crm_event id="3" operation="stonith" operation_key="stonith-virt-3-on" on_node="virt-3" on_node_uuid="3"/>
</trigger>
<trigger>
+ <rsc_op id="13" operation="start" operation_key="dlm_start_0" internal_operation_key="dlm:0_start_0" on_node="virt-1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
<pseudo_event id="17" operation="start" operation_key="dlm-clone_start_0"/>
</trigger>
<trigger>
@@ -229,6 +235,9 @@
<rsc_op id="7" operation="monitor" operation_key="clvmd:1_monitor_0" on_node="virt-2" on_node_uuid="2"/>
</trigger>
<trigger>
+ <rsc_op id="10" operation="monitor" operation_key="clvmd:2_monitor_0" on_node="virt-3" on_node_uuid="3"/>
+ </trigger>
+ <trigger>
<pseudo_event id="27" operation="stop" operation_key="clvmd-clone_stop_0"/>
</trigger>
</inputs>
@@ -276,6 +285,9 @@
<rsc_op id="16" operation="start" operation_key="dlm:2_start_0" on_node="virt-3" on_node_uuid="3"/>
</trigger>
<trigger>
+ <rsc_op id="22" operation="start" operation_key="clvmd_start_0" internal_operation_key="clvmd:0_start_0" on_node="virt-1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
<rsc_op id="23" operation="start" operation_key="clvmd:1_start_0" on_node="virt-2" on_node_uuid="2"/>
</trigger>
<trigger>
diff --git a/cts/scheduler/exp/unfence-parameters.exp b/cts/scheduler/exp/unfence-parameters.exp
index 268bf00..0b76e26 100644
--- a/cts/scheduler/exp/unfence-parameters.exp
+++ b/cts/scheduler/exp/unfence-parameters.exp
@@ -74,6 +74,9 @@
</action_set>
<inputs>
<trigger>
+ <rsc_op id="8" operation="monitor" operation_key="dlm:2_monitor_0" on_node="virt-3" on_node_uuid="3"/>
+ </trigger>
+ <trigger>
<pseudo_event id="18" operation="stop" operation_key="dlm-clone_stop_0"/>
</trigger>
<trigger>
@@ -93,6 +96,9 @@
<crm_event id="3" operation="stonith" operation_key="stonith-virt-3-on" on_node="virt-3" on_node_uuid="3"/>
</trigger>
<trigger>
+ <rsc_op id="12" operation="start" operation_key="dlm_start_0" internal_operation_key="dlm:0_start_0" on_node="virt-1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
<pseudo_event id="16" operation="start" operation_key="dlm-clone_start_0"/>
</trigger>
<trigger>
@@ -213,6 +219,9 @@
<rsc_op id="6" operation="monitor" operation_key="clvmd:1_monitor_0" on_node="virt-2" on_node_uuid="2"/>
</trigger>
<trigger>
+ <rsc_op id="9" operation="monitor" operation_key="clvmd:2_monitor_0" on_node="virt-3" on_node_uuid="3"/>
+ </trigger>
+ <trigger>
<pseudo_event id="26" operation="stop" operation_key="clvmd-clone_stop_0"/>
</trigger>
</inputs>
@@ -260,6 +269,9 @@
<rsc_op id="15" operation="start" operation_key="dlm:2_start_0" on_node="virt-3" on_node_uuid="3"/>
</trigger>
<trigger>
+ <rsc_op id="21" operation="start" operation_key="clvmd_start_0" internal_operation_key="clvmd:0_start_0" on_node="virt-1" on_node_uuid="1"/>
+ </trigger>
+ <trigger>
<rsc_op id="22" operation="start" operation_key="clvmd:1_start_0" on_node="virt-2" on_node_uuid="2"/>
</trigger>
<trigger>
diff --git a/cts/scheduler/scores/594.scores b/cts/scheduler/scores/594.scores
index 5e99750..96c8f44 100644
--- a/cts/scheduler/scores/594.scores
+++ b/cts/scheduler/scores/594.scores
@@ -21,8 +21,11 @@ pcmk__primitive_assign: child_DoFencing:1 allocation score on hadev1: 1
pcmk__primitive_assign: child_DoFencing:1 allocation score on hadev2: -INFINITY
pcmk__primitive_assign: child_DoFencing:1 allocation score on hadev3: -INFINITY
pcmk__primitive_assign: child_DoFencing:2 allocation score on hadev1: -INFINITY
+pcmk__primitive_assign: child_DoFencing:2 allocation score on hadev1: -INFINITY
+pcmk__primitive_assign: child_DoFencing:2 allocation score on hadev2: -INFINITY
pcmk__primitive_assign: child_DoFencing:2 allocation score on hadev2: -INFINITY
pcmk__primitive_assign: child_DoFencing:2 allocation score on hadev3: -INFINITY
+pcmk__primitive_assign: child_DoFencing:2 allocation score on hadev3: -INFINITY
pcmk__primitive_assign: rsc_hadev1 allocation score on hadev1: 100
pcmk__primitive_assign: rsc_hadev1 allocation score on hadev2: 0
pcmk__primitive_assign: rsc_hadev1 allocation score on hadev3: 0
diff --git a/cts/scheduler/scores/a-promote-then-b-migrate.scores b/cts/scheduler/scores/a-promote-then-b-migrate.scores
index c94077d..02674be 100644
--- a/cts/scheduler/scores/a-promote-then-b-migrate.scores
+++ b/cts/scheduler/scores/a-promote-then-b-migrate.scores
@@ -5,7 +5,9 @@ pcmk__clone_assign: rsc1:0 allocation score on node1: 1
pcmk__clone_assign: rsc1:0 allocation score on node2: 0
pcmk__clone_assign: rsc1:1 allocation score on node1: 0
pcmk__clone_assign: rsc1:1 allocation score on node2: 1
+pcmk__primitive_assign: rsc1:0 allocation score on node1: -INFINITY
pcmk__primitive_assign: rsc1:0 allocation score on node1: 1
+pcmk__primitive_assign: rsc1:0 allocation score on node2: -INFINITY
pcmk__primitive_assign: rsc1:0 allocation score on node2: 0
pcmk__primitive_assign: rsc1:1 allocation score on node1: -INFINITY
pcmk__primitive_assign: rsc1:1 allocation score on node2: 1
diff --git a/cts/scheduler/scores/asymmetric.scores b/cts/scheduler/scores/asymmetric.scores
index 69310bf..93ed82f 100644
--- a/cts/scheduler/scores/asymmetric.scores
+++ b/cts/scheduler/scores/asymmetric.scores
@@ -11,4 +11,3 @@ pcmk__primitive_assign: ebe3fb6e-7778-426e-be58-190ab1ff3dd3:0 allocation score
pcmk__primitive_assign: ebe3fb6e-7778-426e-be58-190ab1ff3dd3:0 allocation score on puma3: -INFINITY
pcmk__primitive_assign: ebe3fb6e-7778-426e-be58-190ab1ff3dd3:1 allocation score on puma1: 0
pcmk__primitive_assign: ebe3fb6e-7778-426e-be58-190ab1ff3dd3:1 allocation score on puma3: 200
-pcmk__primitive_assign: vpool_ip_poolA allocation score on puma3: -INFINITY
diff --git a/cts/scheduler/scores/bug-1822.scores b/cts/scheduler/scores/bug-1822.scores
index 82191d1..0a9056b 100644
--- a/cts/scheduler/scores/bug-1822.scores
+++ b/cts/scheduler/scores/bug-1822.scores
@@ -1,5 +1,5 @@
-ms-sf_group:0 promotion score on process2b: -INFINITY
+ms-sf_group:0 promotion score on process2b: 49
ms-sf_group:1 promotion score on none: 0
pcmk__clone_assign: ms-sf allocation score on process1a: 0
pcmk__clone_assign: ms-sf allocation score on process2b: 0
diff --git a/cts/scheduler/scores/bug-5014-CLONE-A-stop-B-started.scores b/cts/scheduler/scores/bug-5014-CLONE-A-stop-B-started.scores
index e698b14..d79208c 100644
--- a/cts/scheduler/scores/bug-5014-CLONE-A-stop-B-started.scores
+++ b/cts/scheduler/scores/bug-5014-CLONE-A-stop-B-started.scores
@@ -5,3 +5,4 @@ pcmk__clone_assign: clone1 allocation score on fc16-builder: 0
pcmk__clone_assign: clone2 allocation score on fc16-builder: 0
pcmk__primitive_assign: ClusterIP2:0 allocation score on fc16-builder: 1
pcmk__primitive_assign: ClusterIP:0 allocation score on fc16-builder: -INFINITY
+pcmk__primitive_assign: ClusterIP:0 allocation score on fc16-builder: -INFINITY
diff --git a/cts/scheduler/scores/bug-5143-ms-shuffle.scores b/cts/scheduler/scores/bug-5143-ms-shuffle.scores
index 86a1a78..87bb4e4 100644
--- a/cts/scheduler/scores/bug-5143-ms-shuffle.scores
+++ b/cts/scheduler/scores/bug-5143-ms-shuffle.scores
@@ -173,7 +173,7 @@ pcmk__primitive_assign: clvmd:0 allocation score on hex-1: 4000
pcmk__primitive_assign: clvmd:0 allocation score on hex-2: -INFINITY
pcmk__primitive_assign: clvmd:0 allocation score on hex-3: -INFINITY
pcmk__primitive_assign: clvmd:1 allocation score on hex-1: -INFINITY
-pcmk__primitive_assign: clvmd:1 allocation score on hex-2: 4000
+pcmk__primitive_assign: clvmd:1 allocation score on hex-2: 6000
pcmk__primitive_assign: clvmd:1 allocation score on hex-3: -INFINITY
pcmk__primitive_assign: clvmd:2 allocation score on hex-1: -INFINITY
pcmk__primitive_assign: clvmd:2 allocation score on hex-2: -INFINITY
@@ -182,7 +182,7 @@ pcmk__primitive_assign: dlm:0 allocation score on hex-1: 5000
pcmk__primitive_assign: dlm:0 allocation score on hex-2: -INFINITY
pcmk__primitive_assign: dlm:0 allocation score on hex-3: 0
pcmk__primitive_assign: dlm:1 allocation score on hex-1: 0
-pcmk__primitive_assign: dlm:1 allocation score on hex-2: 5000
+pcmk__primitive_assign: dlm:1 allocation score on hex-2: 7000
pcmk__primitive_assign: dlm:1 allocation score on hex-3: 0
pcmk__primitive_assign: dlm:2 allocation score on hex-1: -INFINITY
pcmk__primitive_assign: dlm:2 allocation score on hex-2: -INFINITY
@@ -227,12 +227,12 @@ pcmk__primitive_assign: fs-ocfs-1:0 allocation score on hex-1: 1000
pcmk__primitive_assign: fs-ocfs-1:0 allocation score on hex-2: -INFINITY
pcmk__primitive_assign: fs-ocfs-1:0 allocation score on hex-3: -INFINITY
pcmk__primitive_assign: fs-ocfs-1:1 allocation score on hex-1: -INFINITY
-pcmk__primitive_assign: fs-ocfs-1:1 allocation score on hex-2: 1000
+pcmk__primitive_assign: fs-ocfs-1:1 allocation score on hex-2: 3000
pcmk__primitive_assign: fs-ocfs-1:1 allocation score on hex-3: -INFINITY
pcmk__primitive_assign: fs-ocfs-1:2 allocation score on hex-1: -INFINITY
pcmk__primitive_assign: fs-ocfs-1:2 allocation score on hex-2: -INFINITY
pcmk__primitive_assign: fs-ocfs-1:2 allocation score on hex-3: 1000
-pcmk__primitive_assign: fs-ocfs-2:0 allocation score on hex-1: 1000
+pcmk__primitive_assign: fs-ocfs-2:0 allocation score on hex-1: 2000
pcmk__primitive_assign: fs-ocfs-2:0 allocation score on hex-2: -INFINITY
pcmk__primitive_assign: fs-ocfs-2:0 allocation score on hex-3: -INFINITY
pcmk__primitive_assign: fs-ocfs-2:1 allocation score on hex-1: -INFINITY
@@ -254,7 +254,7 @@ pcmk__primitive_assign: o2cb:0 allocation score on hex-1: 3000
pcmk__primitive_assign: o2cb:0 allocation score on hex-2: -INFINITY
pcmk__primitive_assign: o2cb:0 allocation score on hex-3: -INFINITY
pcmk__primitive_assign: o2cb:1 allocation score on hex-1: -INFINITY
-pcmk__primitive_assign: o2cb:1 allocation score on hex-2: 3000
+pcmk__primitive_assign: o2cb:1 allocation score on hex-2: 5000
pcmk__primitive_assign: o2cb:1 allocation score on hex-3: -INFINITY
pcmk__primitive_assign: o2cb:2 allocation score on hex-1: -INFINITY
pcmk__primitive_assign: o2cb:2 allocation score on hex-2: -INFINITY
@@ -266,7 +266,7 @@ pcmk__primitive_assign: vg1:0 allocation score on hex-1: 2000
pcmk__primitive_assign: vg1:0 allocation score on hex-2: -INFINITY
pcmk__primitive_assign: vg1:0 allocation score on hex-3: -INFINITY
pcmk__primitive_assign: vg1:1 allocation score on hex-1: -INFINITY
-pcmk__primitive_assign: vg1:1 allocation score on hex-2: 2000
+pcmk__primitive_assign: vg1:1 allocation score on hex-2: 4000
pcmk__primitive_assign: vg1:1 allocation score on hex-3: -INFINITY
pcmk__primitive_assign: vg1:2 allocation score on hex-1: -INFINITY
pcmk__primitive_assign: vg1:2 allocation score on hex-2: -INFINITY
diff --git a/cts/scheduler/scores/bug-5186-partial-migrate.scores b/cts/scheduler/scores/bug-5186-partial-migrate.scores
index 93854f1..a962738 100644
--- a/cts/scheduler/scores/bug-5186-partial-migrate.scores
+++ b/cts/scheduler/scores/bug-5186-partial-migrate.scores
@@ -67,7 +67,7 @@ pcmk__primitive_assign: prmDiskd1:0 allocation score on bl460g1n7: -INFINITY
pcmk__primitive_assign: prmDiskd1:0 allocation score on bl460g1n8: -INFINITY
pcmk__primitive_assign: prmDiskd1:1 allocation score on bl460g1n6: INFINITY
pcmk__primitive_assign: prmDiskd1:1 allocation score on bl460g1n7: -INFINITY
-pcmk__primitive_assign: prmDiskd1:1 allocation score on bl460g1n8: 0
+pcmk__primitive_assign: prmDiskd1:1 allocation score on bl460g1n8: 200
pcmk__primitive_assign: prmDiskd1:2 allocation score on bl460g1n6: -INFINITY
pcmk__primitive_assign: prmDiskd1:2 allocation score on bl460g1n7: -INFINITY
pcmk__primitive_assign: prmDiskd1:2 allocation score on bl460g1n8: INFINITY
@@ -76,7 +76,7 @@ pcmk__primitive_assign: prmDiskd2:0 allocation score on bl460g1n7: -INFINITY
pcmk__primitive_assign: prmDiskd2:0 allocation score on bl460g1n8: -INFINITY
pcmk__primitive_assign: prmDiskd2:1 allocation score on bl460g1n6: INFINITY
pcmk__primitive_assign: prmDiskd2:1 allocation score on bl460g1n7: -INFINITY
-pcmk__primitive_assign: prmDiskd2:1 allocation score on bl460g1n8: 0
+pcmk__primitive_assign: prmDiskd2:1 allocation score on bl460g1n8: 200
pcmk__primitive_assign: prmDiskd2:2 allocation score on bl460g1n6: -INFINITY
pcmk__primitive_assign: prmDiskd2:2 allocation score on bl460g1n7: -INFINITY
pcmk__primitive_assign: prmDiskd2:2 allocation score on bl460g1n8: INFINITY
@@ -88,7 +88,7 @@ pcmk__primitive_assign: prmPing:0 allocation score on bl460g1n7: -INFINITY
pcmk__primitive_assign: prmPing:0 allocation score on bl460g1n8: -INFINITY
pcmk__primitive_assign: prmPing:1 allocation score on bl460g1n6: INFINITY
pcmk__primitive_assign: prmPing:1 allocation score on bl460g1n7: -INFINITY
-pcmk__primitive_assign: prmPing:1 allocation score on bl460g1n8: 0
+pcmk__primitive_assign: prmPing:1 allocation score on bl460g1n8: 200
pcmk__primitive_assign: prmPing:2 allocation score on bl460g1n6: -INFINITY
pcmk__primitive_assign: prmPing:2 allocation score on bl460g1n7: -INFINITY
pcmk__primitive_assign: prmPing:2 allocation score on bl460g1n8: INFINITY
diff --git a/cts/scheduler/scores/bug-cl-5168.scores b/cts/scheduler/scores/bug-cl-5168.scores
index 916fecb..59dee5d 100644
--- a/cts/scheduler/scores/bug-cl-5168.scores
+++ b/cts/scheduler/scores/bug-cl-5168.scores
@@ -200,7 +200,7 @@ pcmk__primitive_assign: drbd-r1:0 allocation score on hex-2: 1001
pcmk__primitive_assign: drbd-r1:0 allocation score on hex-3: -INFINITY
pcmk__primitive_assign: drbd-r1:0 allocation score on hex-3: INFINITY
pcmk__primitive_assign: drbd-r1:1 allocation score on hex-1: -INFINITY
-pcmk__primitive_assign: drbd-r1:1 allocation score on hex-2: 0
+pcmk__primitive_assign: drbd-r1:1 allocation score on hex-2: -INFINITY
pcmk__primitive_assign: drbd-r1:1 allocation score on hex-3: INFINITY
pcmk__primitive_assign: dummy1 allocation score on hex-1: -INFINITY
pcmk__primitive_assign: dummy1 allocation score on hex-2: -INFINITY
diff --git a/cts/scheduler/scores/bug-lf-2106.scores b/cts/scheduler/scores/bug-lf-2106.scores
index b512c6e..30e175f 100644
--- a/cts/scheduler/scores/bug-lf-2106.scores
+++ b/cts/scheduler/scores/bug-lf-2106.scores
@@ -64,45 +64,45 @@ pcmk__group_assign: ssh-ip2 allocation score on cl-virt-2: 100
pcmk__primitive_assign: apcstonith allocation score on cl-virt-1: 100
pcmk__primitive_assign: apcstonith allocation score on cl-virt-2: 0
pcmk__primitive_assign: bugtrack allocation score on cl-virt-1: -INFINITY
-pcmk__primitive_assign: bugtrack allocation score on cl-virt-2: 275
-pcmk__primitive_assign: drbd-bugtrack:0 allocation score on cl-virt-1: 100
+pcmk__primitive_assign: bugtrack allocation score on cl-virt-2: 375
+pcmk__primitive_assign: drbd-bugtrack:0 allocation score on cl-virt-1: 150
pcmk__primitive_assign: drbd-bugtrack:0 allocation score on cl-virt-2: -INFINITY
-pcmk__primitive_assign: drbd-bugtrack:1 allocation score on cl-virt-1: 0
-pcmk__primitive_assign: drbd-bugtrack:1 allocation score on cl-virt-2: 175
+pcmk__primitive_assign: drbd-bugtrack:1 allocation score on cl-virt-1: 50
+pcmk__primitive_assign: drbd-bugtrack:1 allocation score on cl-virt-2: 275
pcmk__primitive_assign: drbd-infotos:0 allocation score on cl-virt-1: 100
pcmk__primitive_assign: drbd-infotos:0 allocation score on cl-virt-2: -INFINITY
pcmk__primitive_assign: drbd-infotos:1 allocation score on cl-virt-1: 0
-pcmk__primitive_assign: drbd-infotos:1 allocation score on cl-virt-2: 175
-pcmk__primitive_assign: drbd-itwiki:0 allocation score on cl-virt-1: 100
+pcmk__primitive_assign: drbd-infotos:1 allocation score on cl-virt-2: 325
+pcmk__primitive_assign: drbd-itwiki:0 allocation score on cl-virt-1: 150
pcmk__primitive_assign: drbd-itwiki:0 allocation score on cl-virt-2: -INFINITY
-pcmk__primitive_assign: drbd-itwiki:1 allocation score on cl-virt-1: 0
-pcmk__primitive_assign: drbd-itwiki:1 allocation score on cl-virt-2: 175
+pcmk__primitive_assign: drbd-itwiki:1 allocation score on cl-virt-1: 50
+pcmk__primitive_assign: drbd-itwiki:1 allocation score on cl-virt-2: 275
pcmk__primitive_assign: drbd-medomus-cvs:0 allocation score on cl-virt-1: 100
pcmk__primitive_assign: drbd-medomus-cvs:0 allocation score on cl-virt-2: -INFINITY
pcmk__primitive_assign: drbd-medomus-cvs:1 allocation score on cl-virt-1: 0
-pcmk__primitive_assign: drbd-medomus-cvs:1 allocation score on cl-virt-2: 175
-pcmk__primitive_assign: drbd-servsyslog:0 allocation score on cl-virt-1: 100
+pcmk__primitive_assign: drbd-medomus-cvs:1 allocation score on cl-virt-2: 325
+pcmk__primitive_assign: drbd-servsyslog:0 allocation score on cl-virt-1: 150
pcmk__primitive_assign: drbd-servsyslog:0 allocation score on cl-virt-2: -INFINITY
-pcmk__primitive_assign: drbd-servsyslog:1 allocation score on cl-virt-1: 0
-pcmk__primitive_assign: drbd-servsyslog:1 allocation score on cl-virt-2: 175
+pcmk__primitive_assign: drbd-servsyslog:1 allocation score on cl-virt-1: 50
+pcmk__primitive_assign: drbd-servsyslog:1 allocation score on cl-virt-2: 275
pcmk__primitive_assign: drbd-smsprod2:0 allocation score on cl-virt-1: 100
pcmk__primitive_assign: drbd-smsprod2:0 allocation score on cl-virt-2: -INFINITY
pcmk__primitive_assign: drbd-smsprod2:1 allocation score on cl-virt-1: 0
-pcmk__primitive_assign: drbd-smsprod2:1 allocation score on cl-virt-2: 175
+pcmk__primitive_assign: drbd-smsprod2:1 allocation score on cl-virt-2: 325
pcmk__primitive_assign: infotos allocation score on cl-virt-1: -INFINITY
-pcmk__primitive_assign: infotos allocation score on cl-virt-2: 325
+pcmk__primitive_assign: infotos allocation score on cl-virt-2: 475
pcmk__primitive_assign: itwiki allocation score on cl-virt-1: -INFINITY
-pcmk__primitive_assign: itwiki allocation score on cl-virt-2: 275
+pcmk__primitive_assign: itwiki allocation score on cl-virt-2: 375
pcmk__primitive_assign: medomus-cvs allocation score on cl-virt-1: -INFINITY
-pcmk__primitive_assign: medomus-cvs allocation score on cl-virt-2: 325
+pcmk__primitive_assign: medomus-cvs allocation score on cl-virt-2: 475
pcmk__primitive_assign: pingd:0 allocation score on cl-virt-1: 100
pcmk__primitive_assign: pingd:0 allocation score on cl-virt-2: 0
pcmk__primitive_assign: pingd:1 allocation score on cl-virt-1: -INFINITY
pcmk__primitive_assign: pingd:1 allocation score on cl-virt-2: 100
pcmk__primitive_assign: servsyslog allocation score on cl-virt-1: -INFINITY
-pcmk__primitive_assign: servsyslog allocation score on cl-virt-2: 275
+pcmk__primitive_assign: servsyslog allocation score on cl-virt-2: 375
pcmk__primitive_assign: smsprod2 allocation score on cl-virt-1: -INFINITY
-pcmk__primitive_assign: smsprod2 allocation score on cl-virt-2: 325
+pcmk__primitive_assign: smsprod2 allocation score on cl-virt-2: 475
pcmk__primitive_assign: ssh-bin allocation score on cl-virt-1: -INFINITY
pcmk__primitive_assign: ssh-bin allocation score on cl-virt-2: 100
pcmk__primitive_assign: ssh-ip1 allocation score on cl-virt-1: 0
diff --git a/cts/scheduler/scores/bug-lf-2153.scores b/cts/scheduler/scores/bug-lf-2153.scores
index d2492f2..afad730 100644
--- a/cts/scheduler/scores/bug-lf-2153.scores
+++ b/cts/scheduler/scores/bug-lf-2153.scores
@@ -29,7 +29,7 @@ pcmk__group_assign: rg_iscsivg01 allocation score on alice: 100
pcmk__group_assign: rg_iscsivg01 allocation score on bob: 0
pcmk__primitive_assign: res_drbd_iscsivg01:0 allocation score on alice: -INFINITY
pcmk__primitive_assign: res_drbd_iscsivg01:0 allocation score on bob: -INFINITY
-pcmk__primitive_assign: res_drbd_iscsivg01:1 allocation score on alice: 300
+pcmk__primitive_assign: res_drbd_iscsivg01:1 allocation score on alice: 1800
pcmk__primitive_assign: res_drbd_iscsivg01:1 allocation score on bob: -INFINITY
pcmk__primitive_assign: res_ip_alicebob01 allocation score on alice: 400
pcmk__primitive_assign: res_ip_alicebob01 allocation score on bob: -INFINITY
@@ -39,7 +39,7 @@ pcmk__primitive_assign: res_lu_iscsivg01_lun2 allocation score on alice: 600
pcmk__primitive_assign: res_lu_iscsivg01_lun2 allocation score on bob: -INFINITY
pcmk__primitive_assign: res_lvm_iscsivg01 allocation score on alice: 1200
pcmk__primitive_assign: res_lvm_iscsivg01 allocation score on bob: -INFINITY
-pcmk__primitive_assign: res_portblock_iscsivg01_block allocation score on alice: 1800
+pcmk__primitive_assign: res_portblock_iscsivg01_block allocation score on alice: 3300
pcmk__primitive_assign: res_portblock_iscsivg01_block allocation score on bob: -INFINITY
pcmk__primitive_assign: res_portblock_iscsivg01_unblock allocation score on alice: 200
pcmk__primitive_assign: res_portblock_iscsivg01_unblock allocation score on bob: -INFINITY
@@ -47,7 +47,7 @@ pcmk__primitive_assign: res_target_iscsivg01 allocation score on alice: 1000
pcmk__primitive_assign: res_target_iscsivg01 allocation score on bob: -INFINITY
pcmk__primitive_assign: res_tgtd:0 allocation score on alice: -INFINITY
pcmk__primitive_assign: res_tgtd:0 allocation score on bob: -INFINITY
-pcmk__primitive_assign: res_tgtd:1 allocation score on alice: 200
+pcmk__primitive_assign: res_tgtd:1 allocation score on alice: 1700
pcmk__primitive_assign: res_tgtd:1 allocation score on bob: -INFINITY
res_drbd_iscsivg01:0 promotion score on none: 0
res_drbd_iscsivg01:1 promotion score on alice: 3100
diff --git a/cts/scheduler/scores/bug-lf-2171.scores b/cts/scheduler/scores/bug-lf-2171.scores
index 7d2bdd4..14cc28a 100644
--- a/cts/scheduler/scores/bug-lf-2171.scores
+++ b/cts/scheduler/scores/bug-lf-2171.scores
@@ -12,8 +12,12 @@ pcmk__group_assign: res_Dummy2 allocation score on xenserver2: 0
pcmk__group_assign: res_Dummy3 allocation score on xenserver1: 200
pcmk__group_assign: res_Dummy3 allocation score on xenserver2: 0
pcmk__primitive_assign: res_Dummy1:0 allocation score on xenserver1: -INFINITY
+pcmk__primitive_assign: res_Dummy1:0 allocation score on xenserver1: -INFINITY
+pcmk__primitive_assign: res_Dummy1:0 allocation score on xenserver2: -INFINITY
pcmk__primitive_assign: res_Dummy1:0 allocation score on xenserver2: -INFINITY
pcmk__primitive_assign: res_Dummy1:1 allocation score on xenserver1: -INFINITY
+pcmk__primitive_assign: res_Dummy1:1 allocation score on xenserver1: -INFINITY
+pcmk__primitive_assign: res_Dummy1:1 allocation score on xenserver2: -INFINITY
pcmk__primitive_assign: res_Dummy1:1 allocation score on xenserver2: -INFINITY
pcmk__primitive_assign: res_Dummy2 allocation score on xenserver1: 200
pcmk__primitive_assign: res_Dummy2 allocation score on xenserver2: 0
diff --git a/cts/scheduler/scores/bug-lf-2422.scores b/cts/scheduler/scores/bug-lf-2422.scores
index 99ff12e..77a284d 100644
--- a/cts/scheduler/scores/bug-lf-2422.scores
+++ b/cts/scheduler/scores/bug-lf-2422.scores
@@ -248,20 +248,36 @@ pcmk__primitive_assign: o2cb:3 allocation score on qa-suse-2: -INFINITY
pcmk__primitive_assign: o2cb:3 allocation score on qa-suse-3: -INFINITY
pcmk__primitive_assign: o2cb:3 allocation score on qa-suse-4: -INFINITY
pcmk__primitive_assign: ocfs:0 allocation score on qa-suse-1: -INFINITY
+pcmk__primitive_assign: ocfs:0 allocation score on qa-suse-1: -INFINITY
+pcmk__primitive_assign: ocfs:0 allocation score on qa-suse-2: -INFINITY
pcmk__primitive_assign: ocfs:0 allocation score on qa-suse-2: -INFINITY
pcmk__primitive_assign: ocfs:0 allocation score on qa-suse-3: -INFINITY
+pcmk__primitive_assign: ocfs:0 allocation score on qa-suse-3: -INFINITY
+pcmk__primitive_assign: ocfs:0 allocation score on qa-suse-4: -INFINITY
pcmk__primitive_assign: ocfs:0 allocation score on qa-suse-4: -INFINITY
pcmk__primitive_assign: ocfs:1 allocation score on qa-suse-1: -INFINITY
+pcmk__primitive_assign: ocfs:1 allocation score on qa-suse-1: -INFINITY
+pcmk__primitive_assign: ocfs:1 allocation score on qa-suse-2: -INFINITY
pcmk__primitive_assign: ocfs:1 allocation score on qa-suse-2: -INFINITY
pcmk__primitive_assign: ocfs:1 allocation score on qa-suse-3: -INFINITY
+pcmk__primitive_assign: ocfs:1 allocation score on qa-suse-3: -INFINITY
+pcmk__primitive_assign: ocfs:1 allocation score on qa-suse-4: -INFINITY
pcmk__primitive_assign: ocfs:1 allocation score on qa-suse-4: -INFINITY
pcmk__primitive_assign: ocfs:2 allocation score on qa-suse-1: -INFINITY
+pcmk__primitive_assign: ocfs:2 allocation score on qa-suse-1: -INFINITY
+pcmk__primitive_assign: ocfs:2 allocation score on qa-suse-2: -INFINITY
pcmk__primitive_assign: ocfs:2 allocation score on qa-suse-2: -INFINITY
pcmk__primitive_assign: ocfs:2 allocation score on qa-suse-3: -INFINITY
+pcmk__primitive_assign: ocfs:2 allocation score on qa-suse-3: -INFINITY
+pcmk__primitive_assign: ocfs:2 allocation score on qa-suse-4: -INFINITY
pcmk__primitive_assign: ocfs:2 allocation score on qa-suse-4: -INFINITY
pcmk__primitive_assign: ocfs:3 allocation score on qa-suse-1: -INFINITY
+pcmk__primitive_assign: ocfs:3 allocation score on qa-suse-1: -INFINITY
+pcmk__primitive_assign: ocfs:3 allocation score on qa-suse-2: -INFINITY
pcmk__primitive_assign: ocfs:3 allocation score on qa-suse-2: -INFINITY
pcmk__primitive_assign: ocfs:3 allocation score on qa-suse-3: -INFINITY
+pcmk__primitive_assign: ocfs:3 allocation score on qa-suse-3: -INFINITY
+pcmk__primitive_assign: ocfs:3 allocation score on qa-suse-4: -INFINITY
pcmk__primitive_assign: ocfs:3 allocation score on qa-suse-4: -INFINITY
pcmk__primitive_assign: sbd_stonith allocation score on qa-suse-1: 0
pcmk__primitive_assign: sbd_stonith allocation score on qa-suse-2: 0
diff --git a/cts/scheduler/scores/bug-lf-2453.scores b/cts/scheduler/scores/bug-lf-2453.scores
index eaee72d..3ef0f6d 100644
--- a/cts/scheduler/scores/bug-lf-2453.scores
+++ b/cts/scheduler/scores/bug-lf-2453.scores
@@ -17,6 +17,10 @@ pcmk__primitive_assign: DummyResource:1 allocation score on domu1: -INFINITY
pcmk__primitive_assign: DummyResource:1 allocation score on domu2: INFINITY
pcmk__primitive_assign: PrimitiveResource1 allocation score on domu1: INFINITY
pcmk__primitive_assign: apache:0 allocation score on domu1: -INFINITY
+pcmk__primitive_assign: apache:0 allocation score on domu1: -INFINITY
+pcmk__primitive_assign: apache:0 allocation score on domu2: -INFINITY
pcmk__primitive_assign: apache:0 allocation score on domu2: -INFINITY
pcmk__primitive_assign: apache:1 allocation score on domu1: -INFINITY
+pcmk__primitive_assign: apache:1 allocation score on domu1: -INFINITY
+pcmk__primitive_assign: apache:1 allocation score on domu2: -INFINITY
pcmk__primitive_assign: apache:1 allocation score on domu2: -INFINITY
diff --git a/cts/scheduler/scores/bug-lf-2551.scores b/cts/scheduler/scores/bug-lf-2551.scores
index d9cb9f5..2dc23a8 100644
--- a/cts/scheduler/scores/bug-lf-2551.scores
+++ b/cts/scheduler/scores/bug-lf-2551.scores
@@ -228,14 +228,14 @@ pcmk__group_assign: vg1:3 allocation score on hex-7: -INFINITY
pcmk__group_assign: vg1:3 allocation score on hex-8: -INFINITY
pcmk__group_assign: vg1:3 allocation score on hex-9: -INFINITY
pcmk__primitive_assign: clvm:0 allocation score on hex-0: -INFINITY
-pcmk__primitive_assign: clvm:0 allocation score on hex-7: 4
+pcmk__primitive_assign: clvm:0 allocation score on hex-7: 20
pcmk__primitive_assign: clvm:0 allocation score on hex-8: -INFINITY
pcmk__primitive_assign: clvm:0 allocation score on hex-9: -INFINITY
pcmk__primitive_assign: clvm:1 allocation score on hex-0: -INFINITY
pcmk__primitive_assign: clvm:1 allocation score on hex-7: -INFINITY
-pcmk__primitive_assign: clvm:1 allocation score on hex-8: 4
+pcmk__primitive_assign: clvm:1 allocation score on hex-8: 20
pcmk__primitive_assign: clvm:1 allocation score on hex-9: -INFINITY
-pcmk__primitive_assign: clvm:2 allocation score on hex-0: 4
+pcmk__primitive_assign: clvm:2 allocation score on hex-0: 18
pcmk__primitive_assign: clvm:2 allocation score on hex-7: -INFINITY
pcmk__primitive_assign: clvm:2 allocation score on hex-8: -INFINITY
pcmk__primitive_assign: clvm:2 allocation score on hex-9: -INFINITY
@@ -244,14 +244,14 @@ pcmk__primitive_assign: clvm:3 allocation score on hex-7: -INFINITY
pcmk__primitive_assign: clvm:3 allocation score on hex-8: -INFINITY
pcmk__primitive_assign: clvm:3 allocation score on hex-9: -INFINITY
pcmk__primitive_assign: cmirrord:0 allocation score on hex-0: -INFINITY
-pcmk__primitive_assign: cmirrord:0 allocation score on hex-7: 3
+pcmk__primitive_assign: cmirrord:0 allocation score on hex-7: 19
pcmk__primitive_assign: cmirrord:0 allocation score on hex-8: -INFINITY
pcmk__primitive_assign: cmirrord:0 allocation score on hex-9: -INFINITY
pcmk__primitive_assign: cmirrord:1 allocation score on hex-0: -INFINITY
pcmk__primitive_assign: cmirrord:1 allocation score on hex-7: -INFINITY
-pcmk__primitive_assign: cmirrord:1 allocation score on hex-8: 3
+pcmk__primitive_assign: cmirrord:1 allocation score on hex-8: 19
pcmk__primitive_assign: cmirrord:1 allocation score on hex-9: -INFINITY
-pcmk__primitive_assign: cmirrord:2 allocation score on hex-0: 3
+pcmk__primitive_assign: cmirrord:2 allocation score on hex-0: 17
pcmk__primitive_assign: cmirrord:2 allocation score on hex-7: -INFINITY
pcmk__primitive_assign: cmirrord:2 allocation score on hex-8: -INFINITY
pcmk__primitive_assign: cmirrord:2 allocation score on hex-9: -INFINITY
@@ -259,15 +259,15 @@ pcmk__primitive_assign: cmirrord:3 allocation score on hex-0: -INFINITY
pcmk__primitive_assign: cmirrord:3 allocation score on hex-7: -INFINITY
pcmk__primitive_assign: cmirrord:3 allocation score on hex-8: -INFINITY
pcmk__primitive_assign: cmirrord:3 allocation score on hex-9: -INFINITY
-pcmk__primitive_assign: dlm:0 allocation score on hex-0: 0
-pcmk__primitive_assign: dlm:0 allocation score on hex-7: 6
-pcmk__primitive_assign: dlm:0 allocation score on hex-8: 0
+pcmk__primitive_assign: dlm:0 allocation score on hex-0: 14
+pcmk__primitive_assign: dlm:0 allocation score on hex-7: 22
+pcmk__primitive_assign: dlm:0 allocation score on hex-8: 16
pcmk__primitive_assign: dlm:0 allocation score on hex-9: -INFINITY
-pcmk__primitive_assign: dlm:1 allocation score on hex-0: 0
+pcmk__primitive_assign: dlm:1 allocation score on hex-0: 14
pcmk__primitive_assign: dlm:1 allocation score on hex-7: -INFINITY
-pcmk__primitive_assign: dlm:1 allocation score on hex-8: 6
+pcmk__primitive_assign: dlm:1 allocation score on hex-8: 22
pcmk__primitive_assign: dlm:1 allocation score on hex-9: -INFINITY
-pcmk__primitive_assign: dlm:2 allocation score on hex-0: 6
+pcmk__primitive_assign: dlm:2 allocation score on hex-0: 20
pcmk__primitive_assign: dlm:2 allocation score on hex-7: -INFINITY
pcmk__primitive_assign: dlm:2 allocation score on hex-8: -INFINITY
pcmk__primitive_assign: dlm:2 allocation score on hex-9: -INFINITY
@@ -284,14 +284,14 @@ pcmk__primitive_assign: fencing-sbd allocation score on hex-7: 0
pcmk__primitive_assign: fencing-sbd allocation score on hex-8: 0
pcmk__primitive_assign: fencing-sbd allocation score on hex-9: 1
pcmk__primitive_assign: o2cb:0 allocation score on hex-0: -INFINITY
-pcmk__primitive_assign: o2cb:0 allocation score on hex-7: 5
+pcmk__primitive_assign: o2cb:0 allocation score on hex-7: 21
pcmk__primitive_assign: o2cb:0 allocation score on hex-8: -INFINITY
pcmk__primitive_assign: o2cb:0 allocation score on hex-9: -INFINITY
pcmk__primitive_assign: o2cb:1 allocation score on hex-0: -INFINITY
pcmk__primitive_assign: o2cb:1 allocation score on hex-7: -INFINITY
-pcmk__primitive_assign: o2cb:1 allocation score on hex-8: 5
+pcmk__primitive_assign: o2cb:1 allocation score on hex-8: 21
pcmk__primitive_assign: o2cb:1 allocation score on hex-9: -INFINITY
-pcmk__primitive_assign: o2cb:2 allocation score on hex-0: 5
+pcmk__primitive_assign: o2cb:2 allocation score on hex-0: 19
pcmk__primitive_assign: o2cb:2 allocation score on hex-7: -INFINITY
pcmk__primitive_assign: o2cb:2 allocation score on hex-8: -INFINITY
pcmk__primitive_assign: o2cb:2 allocation score on hex-9: -INFINITY
@@ -300,14 +300,14 @@ pcmk__primitive_assign: o2cb:3 allocation score on hex-7: -INFINITY
pcmk__primitive_assign: o2cb:3 allocation score on hex-8: -INFINITY
pcmk__primitive_assign: o2cb:3 allocation score on hex-9: -INFINITY
pcmk__primitive_assign: ocfs2-1:0 allocation score on hex-0: -INFINITY
-pcmk__primitive_assign: ocfs2-1:0 allocation score on hex-7: 1
+pcmk__primitive_assign: ocfs2-1:0 allocation score on hex-7: 17
pcmk__primitive_assign: ocfs2-1:0 allocation score on hex-8: -INFINITY
pcmk__primitive_assign: ocfs2-1:0 allocation score on hex-9: -INFINITY
pcmk__primitive_assign: ocfs2-1:1 allocation score on hex-0: -INFINITY
pcmk__primitive_assign: ocfs2-1:1 allocation score on hex-7: -INFINITY
-pcmk__primitive_assign: ocfs2-1:1 allocation score on hex-8: 1
+pcmk__primitive_assign: ocfs2-1:1 allocation score on hex-8: 17
pcmk__primitive_assign: ocfs2-1:1 allocation score on hex-9: -INFINITY
-pcmk__primitive_assign: ocfs2-1:2 allocation score on hex-0: 1
+pcmk__primitive_assign: ocfs2-1:2 allocation score on hex-0: 15
pcmk__primitive_assign: ocfs2-1:2 allocation score on hex-7: -INFINITY
pcmk__primitive_assign: ocfs2-1:2 allocation score on hex-8: -INFINITY
pcmk__primitive_assign: ocfs2-1:2 allocation score on hex-9: -INFINITY
@@ -316,14 +316,14 @@ pcmk__primitive_assign: ocfs2-1:3 allocation score on hex-7: -INFINITY
pcmk__primitive_assign: ocfs2-1:3 allocation score on hex-8: -INFINITY
pcmk__primitive_assign: ocfs2-1:3 allocation score on hex-9: -INFINITY
pcmk__primitive_assign: vg1:0 allocation score on hex-0: -INFINITY
-pcmk__primitive_assign: vg1:0 allocation score on hex-7: 2
+pcmk__primitive_assign: vg1:0 allocation score on hex-7: 18
pcmk__primitive_assign: vg1:0 allocation score on hex-8: -INFINITY
pcmk__primitive_assign: vg1:0 allocation score on hex-9: -INFINITY
pcmk__primitive_assign: vg1:1 allocation score on hex-0: -INFINITY
pcmk__primitive_assign: vg1:1 allocation score on hex-7: -INFINITY
-pcmk__primitive_assign: vg1:1 allocation score on hex-8: 2
+pcmk__primitive_assign: vg1:1 allocation score on hex-8: 18
pcmk__primitive_assign: vg1:1 allocation score on hex-9: -INFINITY
-pcmk__primitive_assign: vg1:2 allocation score on hex-0: 2
+pcmk__primitive_assign: vg1:2 allocation score on hex-0: 16
pcmk__primitive_assign: vg1:2 allocation score on hex-7: -INFINITY
pcmk__primitive_assign: vg1:2 allocation score on hex-8: -INFINITY
pcmk__primitive_assign: vg1:2 allocation score on hex-9: -INFINITY
diff --git a/cts/scheduler/scores/bug-lf-2574.scores b/cts/scheduler/scores/bug-lf-2574.scores
index 0f5cf60..77d8b87 100644
--- a/cts/scheduler/scores/bug-lf-2574.scores
+++ b/cts/scheduler/scores/bug-lf-2574.scores
@@ -34,16 +34,19 @@ pcmk__primitive_assign: prmDummy1:0 allocation score on srv02: -INFINITY
pcmk__primitive_assign: prmDummy1:0 allocation score on srv03: INFINITY
pcmk__primitive_assign: prmDummy1:1 allocation score on srv01: -INFINITY
pcmk__primitive_assign: prmDummy1:1 allocation score on srv02: INFINITY
-pcmk__primitive_assign: prmDummy1:1 allocation score on srv03: 0
+pcmk__primitive_assign: prmDummy1:1 allocation score on srv03: 200
pcmk__primitive_assign: prmDummy1:2 allocation score on srv01: -INFINITY
pcmk__primitive_assign: prmDummy1:2 allocation score on srv02: -INFINITY
pcmk__primitive_assign: prmDummy1:2 allocation score on srv03: -INFINITY
pcmk__primitive_assign: prmPingd:0 allocation score on srv01: -INFINITY
+pcmk__primitive_assign: prmPingd:0 allocation score on srv01: -INFINITY
+pcmk__primitive_assign: prmPingd:0 allocation score on srv02: -INFINITY
pcmk__primitive_assign: prmPingd:0 allocation score on srv02: -INFINITY
pcmk__primitive_assign: prmPingd:0 allocation score on srv03: -INFINITY
+pcmk__primitive_assign: prmPingd:0 allocation score on srv03: -INFINITY
pcmk__primitive_assign: prmPingd:1 allocation score on srv01: -INFINITY
pcmk__primitive_assign: prmPingd:1 allocation score on srv02: -INFINITY
pcmk__primitive_assign: prmPingd:1 allocation score on srv03: INFINITY
pcmk__primitive_assign: prmPingd:2 allocation score on srv01: -INFINITY
pcmk__primitive_assign: prmPingd:2 allocation score on srv02: INFINITY
-pcmk__primitive_assign: prmPingd:2 allocation score on srv03: 0
+pcmk__primitive_assign: prmPingd:2 allocation score on srv03: 200
diff --git a/cts/scheduler/scores/bug-lf-2581.scores b/cts/scheduler/scores/bug-lf-2581.scores
index 267eb6c..29170dd 100644
--- a/cts/scheduler/scores/bug-lf-2581.scores
+++ b/cts/scheduler/scores/bug-lf-2581.scores
@@ -43,7 +43,7 @@ pcmk__group_assign: Z:0 allocation score on elvis: 1
pcmk__group_assign: Z:0 allocation score on queen: 0
pcmk__group_assign: Z:1 allocation score on elvis: -INFINITY
pcmk__group_assign: Z:1 allocation score on queen: 0
-pcmk__primitive_assign: A:0 allocation score on elvis: 2
+pcmk__primitive_assign: A:0 allocation score on elvis: 6
pcmk__primitive_assign: A:0 allocation score on queen: 0
pcmk__primitive_assign: A:1 allocation score on elvis: -INFINITY
pcmk__primitive_assign: A:1 allocation score on queen: 0
@@ -55,7 +55,7 @@ pcmk__primitive_assign: C-1 allocation score on elvis: 1
pcmk__primitive_assign: C-1 allocation score on queen: -INFINITY
pcmk__primitive_assign: C-2 allocation score on elvis: 1
pcmk__primitive_assign: C-2 allocation score on queen: -INFINITY
-pcmk__primitive_assign: Z:0 allocation score on elvis: 1
+pcmk__primitive_assign: Z:0 allocation score on elvis: 5
pcmk__primitive_assign: Z:0 allocation score on queen: -INFINITY
pcmk__primitive_assign: Z:1 allocation score on elvis: -INFINITY
pcmk__primitive_assign: Z:1 allocation score on queen: 0
diff --git a/cts/scheduler/scores/bug-lf-2619.scores b/cts/scheduler/scores/bug-lf-2619.scores
index 32f947f..6fb3857 100644
--- a/cts/scheduler/scores/bug-lf-2619.scores
+++ b/cts/scheduler/scores/bug-lf-2619.scores
@@ -236,7 +236,7 @@ pcmk__primitive_assign: prmPingd:1 allocation score on sby1: INFINITY
pcmk__primitive_assign: prmPingd:1 allocation score on sby2: 0
pcmk__primitive_assign: prmPingd:2 allocation score on act1: -INFINITY
pcmk__primitive_assign: prmPingd:2 allocation score on act2: INFINITY
-pcmk__primitive_assign: prmPingd:2 allocation score on act3: 0
+pcmk__primitive_assign: prmPingd:2 allocation score on act3: INFINITY
pcmk__primitive_assign: prmPingd:2 allocation score on sby1: 0
pcmk__primitive_assign: prmPingd:2 allocation score on sby2: 0
pcmk__primitive_assign: prmPingd:3 allocation score on act1: -INFINITY
diff --git a/cts/scheduler/scores/bug-n-387749.scores b/cts/scheduler/scores/bug-n-387749.scores
index 5165421..bcd4706 100644
--- a/cts/scheduler/scores/bug-n-387749.scores
+++ b/cts/scheduler/scores/bug-n-387749.scores
@@ -20,11 +20,14 @@ pcmk__group_assign: resource_ipaddr1_single allocation score on power720-4: 0
pcmk__group_assign: resource_nfsserver_single allocation score on power720-1: 0
pcmk__group_assign: resource_nfsserver_single allocation score on power720-2: 1000
pcmk__group_assign: resource_nfsserver_single allocation score on power720-4: 0
-pcmk__primitive_assign: export_home_ocfs2:0 allocation score on power720-1: 0
+pcmk__primitive_assign: export_home_ocfs2:0 allocation score on power720-1: INFINITY
pcmk__primitive_assign: export_home_ocfs2:0 allocation score on power720-2: -INFINITY
pcmk__primitive_assign: export_home_ocfs2:0 allocation score on power720-4: -INFINITY
-pcmk__primitive_assign: export_home_ocfs2:1 allocation score on power720-1: 0
-pcmk__primitive_assign: export_home_ocfs2:1 allocation score on power720-2: 1000
+pcmk__primitive_assign: export_home_ocfs2:1 allocation score on power720-1: -INFINITY
+pcmk__primitive_assign: export_home_ocfs2:1 allocation score on power720-1: INFINITY
+pcmk__primitive_assign: export_home_ocfs2:1 allocation score on power720-2: 3000
+pcmk__primitive_assign: export_home_ocfs2:1 allocation score on power720-2: 3000
+pcmk__primitive_assign: export_home_ocfs2:1 allocation score on power720-4: -INFINITY
pcmk__primitive_assign: export_home_ocfs2:1 allocation score on power720-4: -INFINITY
pcmk__primitive_assign: export_home_ocfs2:2 allocation score on power720-1: -INFINITY
pcmk__primitive_assign: export_home_ocfs2:2 allocation score on power720-2: -INFINITY
diff --git a/cts/scheduler/scores/bug-suse-707150.scores b/cts/scheduler/scores/bug-suse-707150.scores
index 7f35079..4e85c86 100644
--- a/cts/scheduler/scores/bug-suse-707150.scores
+++ b/cts/scheduler/scores/bug-suse-707150.scores
@@ -116,8 +116,12 @@ pcmk__clone_assign: vg1:3 allocation score on hex-7: 0
pcmk__clone_assign: vg1:3 allocation score on hex-8: 0
pcmk__clone_assign: vg1:3 allocation score on hex-9: 0
pcmk__group_assign: base-group:0 allocation score on hex-0: 0
+pcmk__group_assign: base-group:0 allocation score on hex-0: 0
+pcmk__group_assign: base-group:0 allocation score on hex-7: -INFINITY
pcmk__group_assign: base-group:0 allocation score on hex-7: -INFINITY
pcmk__group_assign: base-group:0 allocation score on hex-8: -INFINITY
+pcmk__group_assign: base-group:0 allocation score on hex-8: -INFINITY
+pcmk__group_assign: base-group:0 allocation score on hex-9: -INFINITY
pcmk__group_assign: base-group:0 allocation score on hex-9: 0
pcmk__group_assign: base-group:1 allocation score on hex-0: -INFINITY
pcmk__group_assign: base-group:1 allocation score on hex-7: -INFINITY
@@ -132,8 +136,12 @@ pcmk__group_assign: base-group:3 allocation score on hex-7: -INFINITY
pcmk__group_assign: base-group:3 allocation score on hex-8: -INFINITY
pcmk__group_assign: base-group:3 allocation score on hex-9: -INFINITY
pcmk__group_assign: clvm:0 allocation score on hex-0: 0
+pcmk__group_assign: clvm:0 allocation score on hex-0: 0
+pcmk__group_assign: clvm:0 allocation score on hex-7: -INFINITY
pcmk__group_assign: clvm:0 allocation score on hex-7: -INFINITY
pcmk__group_assign: clvm:0 allocation score on hex-8: -INFINITY
+pcmk__group_assign: clvm:0 allocation score on hex-8: -INFINITY
+pcmk__group_assign: clvm:0 allocation score on hex-9: -INFINITY
pcmk__group_assign: clvm:0 allocation score on hex-9: 0
pcmk__group_assign: clvm:1 allocation score on hex-0: -INFINITY
pcmk__group_assign: clvm:1 allocation score on hex-7: -INFINITY
@@ -148,8 +156,12 @@ pcmk__group_assign: clvm:3 allocation score on hex-7: -INFINITY
pcmk__group_assign: clvm:3 allocation score on hex-8: -INFINITY
pcmk__group_assign: clvm:3 allocation score on hex-9: -INFINITY
pcmk__group_assign: cmirrord:0 allocation score on hex-0: 0
+pcmk__group_assign: cmirrord:0 allocation score on hex-0: 0
+pcmk__group_assign: cmirrord:0 allocation score on hex-7: -INFINITY
pcmk__group_assign: cmirrord:0 allocation score on hex-7: -INFINITY
pcmk__group_assign: cmirrord:0 allocation score on hex-8: -INFINITY
+pcmk__group_assign: cmirrord:0 allocation score on hex-8: -INFINITY
+pcmk__group_assign: cmirrord:0 allocation score on hex-9: -INFINITY
pcmk__group_assign: cmirrord:0 allocation score on hex-9: 0
pcmk__group_assign: cmirrord:1 allocation score on hex-0: -INFINITY
pcmk__group_assign: cmirrord:1 allocation score on hex-7: -INFINITY
@@ -164,8 +176,12 @@ pcmk__group_assign: cmirrord:3 allocation score on hex-7: -INFINITY
pcmk__group_assign: cmirrord:3 allocation score on hex-8: -INFINITY
pcmk__group_assign: cmirrord:3 allocation score on hex-9: -INFINITY
pcmk__group_assign: dlm:0 allocation score on hex-0: 1
+pcmk__group_assign: dlm:0 allocation score on hex-0: 1
pcmk__group_assign: dlm:0 allocation score on hex-7: -INFINITY
+pcmk__group_assign: dlm:0 allocation score on hex-7: -INFINITY
+pcmk__group_assign: dlm:0 allocation score on hex-8: -INFINITY
pcmk__group_assign: dlm:0 allocation score on hex-8: -INFINITY
+pcmk__group_assign: dlm:0 allocation score on hex-9: -INFINITY
pcmk__group_assign: dlm:0 allocation score on hex-9: 0
pcmk__group_assign: dlm:1 allocation score on hex-0: -INFINITY
pcmk__group_assign: dlm:1 allocation score on hex-7: -INFINITY
@@ -180,8 +196,12 @@ pcmk__group_assign: dlm:3 allocation score on hex-7: -INFINITY
pcmk__group_assign: dlm:3 allocation score on hex-8: -INFINITY
pcmk__group_assign: dlm:3 allocation score on hex-9: -INFINITY
pcmk__group_assign: o2cb:0 allocation score on hex-0: 0
+pcmk__group_assign: o2cb:0 allocation score on hex-0: 0
pcmk__group_assign: o2cb:0 allocation score on hex-7: -INFINITY
+pcmk__group_assign: o2cb:0 allocation score on hex-7: -INFINITY
+pcmk__group_assign: o2cb:0 allocation score on hex-8: -INFINITY
pcmk__group_assign: o2cb:0 allocation score on hex-8: -INFINITY
+pcmk__group_assign: o2cb:0 allocation score on hex-9: -INFINITY
pcmk__group_assign: o2cb:0 allocation score on hex-9: 0
pcmk__group_assign: o2cb:1 allocation score on hex-0: -INFINITY
pcmk__group_assign: o2cb:1 allocation score on hex-7: -INFINITY
@@ -196,8 +216,12 @@ pcmk__group_assign: o2cb:3 allocation score on hex-7: -INFINITY
pcmk__group_assign: o2cb:3 allocation score on hex-8: -INFINITY
pcmk__group_assign: o2cb:3 allocation score on hex-9: -INFINITY
pcmk__group_assign: ocfs2-1:0 allocation score on hex-0: 0
+pcmk__group_assign: ocfs2-1:0 allocation score on hex-0: 0
+pcmk__group_assign: ocfs2-1:0 allocation score on hex-7: -INFINITY
pcmk__group_assign: ocfs2-1:0 allocation score on hex-7: -INFINITY
pcmk__group_assign: ocfs2-1:0 allocation score on hex-8: -INFINITY
+pcmk__group_assign: ocfs2-1:0 allocation score on hex-8: -INFINITY
+pcmk__group_assign: ocfs2-1:0 allocation score on hex-9: -INFINITY
pcmk__group_assign: ocfs2-1:0 allocation score on hex-9: 0
pcmk__group_assign: ocfs2-1:1 allocation score on hex-0: -INFINITY
pcmk__group_assign: ocfs2-1:1 allocation score on hex-7: -INFINITY
@@ -212,8 +236,12 @@ pcmk__group_assign: ocfs2-1:3 allocation score on hex-7: -INFINITY
pcmk__group_assign: ocfs2-1:3 allocation score on hex-8: -INFINITY
pcmk__group_assign: ocfs2-1:3 allocation score on hex-9: -INFINITY
pcmk__group_assign: vg1:0 allocation score on hex-0: 0
+pcmk__group_assign: vg1:0 allocation score on hex-0: 0
pcmk__group_assign: vg1:0 allocation score on hex-7: -INFINITY
+pcmk__group_assign: vg1:0 allocation score on hex-7: -INFINITY
+pcmk__group_assign: vg1:0 allocation score on hex-8: -INFINITY
pcmk__group_assign: vg1:0 allocation score on hex-8: -INFINITY
+pcmk__group_assign: vg1:0 allocation score on hex-9: -INFINITY
pcmk__group_assign: vg1:0 allocation score on hex-9: 0
pcmk__group_assign: vg1:1 allocation score on hex-0: -INFINITY
pcmk__group_assign: vg1:1 allocation score on hex-7: -INFINITY
@@ -227,10 +255,14 @@ pcmk__group_assign: vg1:3 allocation score on hex-0: -INFINITY
pcmk__group_assign: vg1:3 allocation score on hex-7: -INFINITY
pcmk__group_assign: vg1:3 allocation score on hex-8: -INFINITY
pcmk__group_assign: vg1:3 allocation score on hex-9: -INFINITY
+pcmk__primitive_assign: clvm:0 allocation score on hex-0: -INFINITY
pcmk__primitive_assign: clvm:0 allocation score on hex-0: 0
pcmk__primitive_assign: clvm:0 allocation score on hex-7: -INFINITY
+pcmk__primitive_assign: clvm:0 allocation score on hex-7: -INFINITY
+pcmk__primitive_assign: clvm:0 allocation score on hex-8: -INFINITY
pcmk__primitive_assign: clvm:0 allocation score on hex-8: -INFINITY
pcmk__primitive_assign: clvm:0 allocation score on hex-9: -INFINITY
+pcmk__primitive_assign: clvm:0 allocation score on hex-9: 0
pcmk__primitive_assign: clvm:1 allocation score on hex-0: -INFINITY
pcmk__primitive_assign: clvm:1 allocation score on hex-7: -INFINITY
pcmk__primitive_assign: clvm:1 allocation score on hex-8: -INFINITY
@@ -243,10 +275,14 @@ pcmk__primitive_assign: clvm:3 allocation score on hex-0: -INFINITY
pcmk__primitive_assign: clvm:3 allocation score on hex-7: -INFINITY
pcmk__primitive_assign: clvm:3 allocation score on hex-8: -INFINITY
pcmk__primitive_assign: clvm:3 allocation score on hex-9: -INFINITY
+pcmk__primitive_assign: cmirrord:0 allocation score on hex-0: -INFINITY
pcmk__primitive_assign: cmirrord:0 allocation score on hex-0: 0
pcmk__primitive_assign: cmirrord:0 allocation score on hex-7: -INFINITY
+pcmk__primitive_assign: cmirrord:0 allocation score on hex-7: -INFINITY
+pcmk__primitive_assign: cmirrord:0 allocation score on hex-8: -INFINITY
pcmk__primitive_assign: cmirrord:0 allocation score on hex-8: -INFINITY
pcmk__primitive_assign: cmirrord:0 allocation score on hex-9: -INFINITY
+pcmk__primitive_assign: cmirrord:0 allocation score on hex-9: 0
pcmk__primitive_assign: cmirrord:1 allocation score on hex-0: -INFINITY
pcmk__primitive_assign: cmirrord:1 allocation score on hex-7: -INFINITY
pcmk__primitive_assign: cmirrord:1 allocation score on hex-8: -INFINITY
@@ -259,9 +295,13 @@ pcmk__primitive_assign: cmirrord:3 allocation score on hex-0: -INFINITY
pcmk__primitive_assign: cmirrord:3 allocation score on hex-7: -INFINITY
pcmk__primitive_assign: cmirrord:3 allocation score on hex-8: -INFINITY
pcmk__primitive_assign: cmirrord:3 allocation score on hex-9: -INFINITY
+pcmk__primitive_assign: dlm:0 allocation score on hex-0: -INFINITY
pcmk__primitive_assign: dlm:0 allocation score on hex-0: 1
pcmk__primitive_assign: dlm:0 allocation score on hex-7: -INFINITY
+pcmk__primitive_assign: dlm:0 allocation score on hex-7: -INFINITY
pcmk__primitive_assign: dlm:0 allocation score on hex-8: -INFINITY
+pcmk__primitive_assign: dlm:0 allocation score on hex-8: -INFINITY
+pcmk__primitive_assign: dlm:0 allocation score on hex-9: -INFINITY
pcmk__primitive_assign: dlm:0 allocation score on hex-9: 0
pcmk__primitive_assign: dlm:1 allocation score on hex-0: -INFINITY
pcmk__primitive_assign: dlm:1 allocation score on hex-7: -INFINITY
@@ -283,10 +323,14 @@ pcmk__primitive_assign: fencing-sbd allocation score on hex-0: 0
pcmk__primitive_assign: fencing-sbd allocation score on hex-7: 0
pcmk__primitive_assign: fencing-sbd allocation score on hex-8: 0
pcmk__primitive_assign: fencing-sbd allocation score on hex-9: 1
+pcmk__primitive_assign: o2cb:0 allocation score on hex-0: -INFINITY
pcmk__primitive_assign: o2cb:0 allocation score on hex-0: 0
pcmk__primitive_assign: o2cb:0 allocation score on hex-7: -INFINITY
+pcmk__primitive_assign: o2cb:0 allocation score on hex-7: -INFINITY
+pcmk__primitive_assign: o2cb:0 allocation score on hex-8: -INFINITY
pcmk__primitive_assign: o2cb:0 allocation score on hex-8: -INFINITY
pcmk__primitive_assign: o2cb:0 allocation score on hex-9: -INFINITY
+pcmk__primitive_assign: o2cb:0 allocation score on hex-9: 0
pcmk__primitive_assign: o2cb:1 allocation score on hex-0: -INFINITY
pcmk__primitive_assign: o2cb:1 allocation score on hex-7: -INFINITY
pcmk__primitive_assign: o2cb:1 allocation score on hex-8: -INFINITY
@@ -300,8 +344,12 @@ pcmk__primitive_assign: o2cb:3 allocation score on hex-7: -INFINITY
pcmk__primitive_assign: o2cb:3 allocation score on hex-8: -INFINITY
pcmk__primitive_assign: o2cb:3 allocation score on hex-9: -INFINITY
pcmk__primitive_assign: ocfs2-1:0 allocation score on hex-0: -INFINITY
+pcmk__primitive_assign: ocfs2-1:0 allocation score on hex-0: -INFINITY
+pcmk__primitive_assign: ocfs2-1:0 allocation score on hex-7: -INFINITY
pcmk__primitive_assign: ocfs2-1:0 allocation score on hex-7: -INFINITY
pcmk__primitive_assign: ocfs2-1:0 allocation score on hex-8: -INFINITY
+pcmk__primitive_assign: ocfs2-1:0 allocation score on hex-8: -INFINITY
+pcmk__primitive_assign: ocfs2-1:0 allocation score on hex-9: -INFINITY
pcmk__primitive_assign: ocfs2-1:0 allocation score on hex-9: -INFINITY
pcmk__primitive_assign: ocfs2-1:1 allocation score on hex-0: -INFINITY
pcmk__primitive_assign: ocfs2-1:1 allocation score on hex-7: -INFINITY
@@ -316,8 +364,12 @@ pcmk__primitive_assign: ocfs2-1:3 allocation score on hex-7: -INFINITY
pcmk__primitive_assign: ocfs2-1:3 allocation score on hex-8: -INFINITY
pcmk__primitive_assign: ocfs2-1:3 allocation score on hex-9: -INFINITY
pcmk__primitive_assign: vg1:0 allocation score on hex-0: -INFINITY
+pcmk__primitive_assign: vg1:0 allocation score on hex-0: -INFINITY
+pcmk__primitive_assign: vg1:0 allocation score on hex-7: -INFINITY
pcmk__primitive_assign: vg1:0 allocation score on hex-7: -INFINITY
pcmk__primitive_assign: vg1:0 allocation score on hex-8: -INFINITY
+pcmk__primitive_assign: vg1:0 allocation score on hex-8: -INFINITY
+pcmk__primitive_assign: vg1:0 allocation score on hex-9: -INFINITY
pcmk__primitive_assign: vg1:0 allocation score on hex-9: -INFINITY
pcmk__primitive_assign: vg1:1 allocation score on hex-0: -INFINITY
pcmk__primitive_assign: vg1:1 allocation score on hex-7: -INFINITY
diff --git a/cts/scheduler/scores/bundle-connection-with-container.scores b/cts/scheduler/scores/bundle-connection-with-container.scores
index fc20405..30d63ac 100644
--- a/cts/scheduler/scores/bundle-connection-with-container.scores
+++ b/cts/scheduler/scores/bundle-connection-with-container.scores
@@ -1,76 +1,76 @@
-pcmk__bundle_allocate: httpd-bundle allocation score on remote-rhel8-2: 0
-pcmk__bundle_allocate: httpd-bundle allocation score on rhel8-1: 0
-pcmk__bundle_allocate: httpd-bundle allocation score on rhel8-2: 0
-pcmk__bundle_allocate: httpd-bundle allocation score on rhel8-3: 0
-pcmk__bundle_allocate: httpd-bundle allocation score on rhel8-4: -INFINITY
-pcmk__bundle_allocate: httpd-bundle allocation score on rhel8-5: -INFINITY
-pcmk__bundle_allocate: httpd-bundle-0 allocation score on remote-rhel8-2: -INFINITY
-pcmk__bundle_allocate: httpd-bundle-0 allocation score on rhel8-1: 0
-pcmk__bundle_allocate: httpd-bundle-0 allocation score on rhel8-2: 0
-pcmk__bundle_allocate: httpd-bundle-0 allocation score on rhel8-3: 0
-pcmk__bundle_allocate: httpd-bundle-0 allocation score on rhel8-4: 0
-pcmk__bundle_allocate: httpd-bundle-0 allocation score on rhel8-5: 0
-pcmk__bundle_allocate: httpd-bundle-1 allocation score on remote-rhel8-2: -INFINITY
-pcmk__bundle_allocate: httpd-bundle-1 allocation score on rhel8-1: 0
-pcmk__bundle_allocate: httpd-bundle-1 allocation score on rhel8-2: 0
-pcmk__bundle_allocate: httpd-bundle-1 allocation score on rhel8-3: 0
-pcmk__bundle_allocate: httpd-bundle-1 allocation score on rhel8-4: 0
-pcmk__bundle_allocate: httpd-bundle-1 allocation score on rhel8-5: 0
-pcmk__bundle_allocate: httpd-bundle-2 allocation score on remote-rhel8-2: -INFINITY
-pcmk__bundle_allocate: httpd-bundle-2 allocation score on rhel8-1: 0
-pcmk__bundle_allocate: httpd-bundle-2 allocation score on rhel8-2: 0
-pcmk__bundle_allocate: httpd-bundle-2 allocation score on rhel8-3: 0
-pcmk__bundle_allocate: httpd-bundle-2 allocation score on rhel8-4: 0
-pcmk__bundle_allocate: httpd-bundle-2 allocation score on rhel8-5: 0
-pcmk__bundle_allocate: httpd-bundle-clone allocation score on httpd-bundle-0: -INFINITY
-pcmk__bundle_allocate: httpd-bundle-clone allocation score on httpd-bundle-1: -INFINITY
-pcmk__bundle_allocate: httpd-bundle-clone allocation score on httpd-bundle-2: -INFINITY
-pcmk__bundle_allocate: httpd-bundle-clone allocation score on remote-rhel8-2: 0
-pcmk__bundle_allocate: httpd-bundle-clone allocation score on rhel8-1: 0
-pcmk__bundle_allocate: httpd-bundle-clone allocation score on rhel8-2: 0
-pcmk__bundle_allocate: httpd-bundle-clone allocation score on rhel8-3: 0
-pcmk__bundle_allocate: httpd-bundle-clone allocation score on rhel8-4: 0
-pcmk__bundle_allocate: httpd-bundle-clone allocation score on rhel8-5: 0
-pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.131 allocation score on remote-rhel8-2: 0
-pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.131 allocation score on rhel8-1: 0
-pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.131 allocation score on rhel8-2: 0
-pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.131 allocation score on rhel8-3: 0
-pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.131 allocation score on rhel8-4: -INFINITY
-pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.131 allocation score on rhel8-5: -INFINITY
-pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.132 allocation score on remote-rhel8-2: 0
-pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.132 allocation score on rhel8-1: 0
-pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.132 allocation score on rhel8-2: 0
-pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.132 allocation score on rhel8-3: 0
-pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.132 allocation score on rhel8-4: -INFINITY
-pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.132 allocation score on rhel8-5: -INFINITY
-pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.133 allocation score on remote-rhel8-2: 0
-pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.133 allocation score on rhel8-1: 0
-pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.133 allocation score on rhel8-2: 0
-pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.133 allocation score on rhel8-3: 0
-pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.133 allocation score on rhel8-4: -INFINITY
-pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.133 allocation score on rhel8-5: -INFINITY
-pcmk__bundle_allocate: httpd-bundle-podman-0 allocation score on remote-rhel8-2: 0
-pcmk__bundle_allocate: httpd-bundle-podman-0 allocation score on rhel8-1: 0
-pcmk__bundle_allocate: httpd-bundle-podman-0 allocation score on rhel8-2: 0
-pcmk__bundle_allocate: httpd-bundle-podman-0 allocation score on rhel8-3: 0
-pcmk__bundle_allocate: httpd-bundle-podman-0 allocation score on rhel8-4: -INFINITY
-pcmk__bundle_allocate: httpd-bundle-podman-0 allocation score on rhel8-5: -INFINITY
-pcmk__bundle_allocate: httpd-bundle-podman-1 allocation score on remote-rhel8-2: 0
-pcmk__bundle_allocate: httpd-bundle-podman-1 allocation score on rhel8-1: 0
-pcmk__bundle_allocate: httpd-bundle-podman-1 allocation score on rhel8-2: 0
-pcmk__bundle_allocate: httpd-bundle-podman-1 allocation score on rhel8-3: 0
-pcmk__bundle_allocate: httpd-bundle-podman-1 allocation score on rhel8-4: -INFINITY
-pcmk__bundle_allocate: httpd-bundle-podman-1 allocation score on rhel8-5: -INFINITY
-pcmk__bundle_allocate: httpd-bundle-podman-2 allocation score on remote-rhel8-2: 0
-pcmk__bundle_allocate: httpd-bundle-podman-2 allocation score on rhel8-1: 0
-pcmk__bundle_allocate: httpd-bundle-podman-2 allocation score on rhel8-2: 0
-pcmk__bundle_allocate: httpd-bundle-podman-2 allocation score on rhel8-3: 0
-pcmk__bundle_allocate: httpd-bundle-podman-2 allocation score on rhel8-4: -INFINITY
-pcmk__bundle_allocate: httpd-bundle-podman-2 allocation score on rhel8-5: -INFINITY
-pcmk__bundle_allocate: httpd:0 allocation score on httpd-bundle-0: 501
-pcmk__bundle_allocate: httpd:1 allocation score on httpd-bundle-1: 501
-pcmk__bundle_allocate: httpd:2 allocation score on httpd-bundle-2: 501
+pcmk__bundle_assign: httpd-bundle allocation score on remote-rhel8-2: 0
+pcmk__bundle_assign: httpd-bundle allocation score on rhel8-1: 0
+pcmk__bundle_assign: httpd-bundle allocation score on rhel8-2: 0
+pcmk__bundle_assign: httpd-bundle allocation score on rhel8-3: 0
+pcmk__bundle_assign: httpd-bundle allocation score on rhel8-4: -INFINITY
+pcmk__bundle_assign: httpd-bundle allocation score on rhel8-5: -INFINITY
+pcmk__bundle_assign: httpd-bundle-0 allocation score on remote-rhel8-2: -INFINITY
+pcmk__bundle_assign: httpd-bundle-0 allocation score on rhel8-1: 0
+pcmk__bundle_assign: httpd-bundle-0 allocation score on rhel8-2: 0
+pcmk__bundle_assign: httpd-bundle-0 allocation score on rhel8-3: 0
+pcmk__bundle_assign: httpd-bundle-0 allocation score on rhel8-4: 0
+pcmk__bundle_assign: httpd-bundle-0 allocation score on rhel8-5: 0
+pcmk__bundle_assign: httpd-bundle-1 allocation score on remote-rhel8-2: -INFINITY
+pcmk__bundle_assign: httpd-bundle-1 allocation score on rhel8-1: 0
+pcmk__bundle_assign: httpd-bundle-1 allocation score on rhel8-2: 0
+pcmk__bundle_assign: httpd-bundle-1 allocation score on rhel8-3: 0
+pcmk__bundle_assign: httpd-bundle-1 allocation score on rhel8-4: 0
+pcmk__bundle_assign: httpd-bundle-1 allocation score on rhel8-5: 0
+pcmk__bundle_assign: httpd-bundle-2 allocation score on remote-rhel8-2: -INFINITY
+pcmk__bundle_assign: httpd-bundle-2 allocation score on rhel8-1: 0
+pcmk__bundle_assign: httpd-bundle-2 allocation score on rhel8-2: 0
+pcmk__bundle_assign: httpd-bundle-2 allocation score on rhel8-3: 0
+pcmk__bundle_assign: httpd-bundle-2 allocation score on rhel8-4: 0
+pcmk__bundle_assign: httpd-bundle-2 allocation score on rhel8-5: 0
+pcmk__bundle_assign: httpd-bundle-clone allocation score on httpd-bundle-0: -INFINITY
+pcmk__bundle_assign: httpd-bundle-clone allocation score on httpd-bundle-1: -INFINITY
+pcmk__bundle_assign: httpd-bundle-clone allocation score on httpd-bundle-2: -INFINITY
+pcmk__bundle_assign: httpd-bundle-clone allocation score on remote-rhel8-2: 0
+pcmk__bundle_assign: httpd-bundle-clone allocation score on rhel8-1: 0
+pcmk__bundle_assign: httpd-bundle-clone allocation score on rhel8-2: 0
+pcmk__bundle_assign: httpd-bundle-clone allocation score on rhel8-3: 0
+pcmk__bundle_assign: httpd-bundle-clone allocation score on rhel8-4: 0
+pcmk__bundle_assign: httpd-bundle-clone allocation score on rhel8-5: 0
+pcmk__bundle_assign: httpd-bundle-ip-192.168.122.131 allocation score on remote-rhel8-2: 0
+pcmk__bundle_assign: httpd-bundle-ip-192.168.122.131 allocation score on rhel8-1: 0
+pcmk__bundle_assign: httpd-bundle-ip-192.168.122.131 allocation score on rhel8-2: 0
+pcmk__bundle_assign: httpd-bundle-ip-192.168.122.131 allocation score on rhel8-3: 0
+pcmk__bundle_assign: httpd-bundle-ip-192.168.122.131 allocation score on rhel8-4: -INFINITY
+pcmk__bundle_assign: httpd-bundle-ip-192.168.122.131 allocation score on rhel8-5: -INFINITY
+pcmk__bundle_assign: httpd-bundle-ip-192.168.122.132 allocation score on remote-rhel8-2: 0
+pcmk__bundle_assign: httpd-bundle-ip-192.168.122.132 allocation score on rhel8-1: 0
+pcmk__bundle_assign: httpd-bundle-ip-192.168.122.132 allocation score on rhel8-2: 0
+pcmk__bundle_assign: httpd-bundle-ip-192.168.122.132 allocation score on rhel8-3: 0
+pcmk__bundle_assign: httpd-bundle-ip-192.168.122.132 allocation score on rhel8-4: -INFINITY
+pcmk__bundle_assign: httpd-bundle-ip-192.168.122.132 allocation score on rhel8-5: -INFINITY
+pcmk__bundle_assign: httpd-bundle-ip-192.168.122.133 allocation score on remote-rhel8-2: 0
+pcmk__bundle_assign: httpd-bundle-ip-192.168.122.133 allocation score on rhel8-1: 0
+pcmk__bundle_assign: httpd-bundle-ip-192.168.122.133 allocation score on rhel8-2: 0
+pcmk__bundle_assign: httpd-bundle-ip-192.168.122.133 allocation score on rhel8-3: 0
+pcmk__bundle_assign: httpd-bundle-ip-192.168.122.133 allocation score on rhel8-4: -INFINITY
+pcmk__bundle_assign: httpd-bundle-ip-192.168.122.133 allocation score on rhel8-5: -INFINITY
+pcmk__bundle_assign: httpd-bundle-podman-0 allocation score on remote-rhel8-2: 0
+pcmk__bundle_assign: httpd-bundle-podman-0 allocation score on rhel8-1: 0
+pcmk__bundle_assign: httpd-bundle-podman-0 allocation score on rhel8-2: 0
+pcmk__bundle_assign: httpd-bundle-podman-0 allocation score on rhel8-3: 0
+pcmk__bundle_assign: httpd-bundle-podman-0 allocation score on rhel8-4: -INFINITY
+pcmk__bundle_assign: httpd-bundle-podman-0 allocation score on rhel8-5: -INFINITY
+pcmk__bundle_assign: httpd-bundle-podman-1 allocation score on remote-rhel8-2: 0
+pcmk__bundle_assign: httpd-bundle-podman-1 allocation score on rhel8-1: 0
+pcmk__bundle_assign: httpd-bundle-podman-1 allocation score on rhel8-2: 0
+pcmk__bundle_assign: httpd-bundle-podman-1 allocation score on rhel8-3: 0
+pcmk__bundle_assign: httpd-bundle-podman-1 allocation score on rhel8-4: -INFINITY
+pcmk__bundle_assign: httpd-bundle-podman-1 allocation score on rhel8-5: -INFINITY
+pcmk__bundle_assign: httpd-bundle-podman-2 allocation score on remote-rhel8-2: 0
+pcmk__bundle_assign: httpd-bundle-podman-2 allocation score on rhel8-1: 0
+pcmk__bundle_assign: httpd-bundle-podman-2 allocation score on rhel8-2: 0
+pcmk__bundle_assign: httpd-bundle-podman-2 allocation score on rhel8-3: 0
+pcmk__bundle_assign: httpd-bundle-podman-2 allocation score on rhel8-4: -INFINITY
+pcmk__bundle_assign: httpd-bundle-podman-2 allocation score on rhel8-5: -INFINITY
+pcmk__bundle_assign: httpd:0 allocation score on httpd-bundle-0: 501
+pcmk__bundle_assign: httpd:1 allocation score on httpd-bundle-1: 501
+pcmk__bundle_assign: httpd:2 allocation score on httpd-bundle-2: 501
pcmk__clone_assign: httpd-bundle-clone allocation score on httpd-bundle-0: 0
pcmk__clone_assign: httpd-bundle-clone allocation score on httpd-bundle-1: 0
pcmk__clone_assign: httpd-bundle-clone allocation score on httpd-bundle-2: 0
diff --git a/cts/scheduler/scores/bundle-interleave-promote.scores b/cts/scheduler/scores/bundle-interleave-promote.scores
index 8996be1..85c5aed 100644
--- a/cts/scheduler/scores/bundle-interleave-promote.scores
+++ b/cts/scheduler/scores/bundle-interleave-promote.scores
@@ -1,102 +1,102 @@
app:0 promotion score on app-bundle-0: 12
app:1 promotion score on app-bundle-1: 13
-app:2 promotion score on app-bundle-2: 14
+app:2 promotion score on app-bundle-2: INFINITY
base:0 promotion score on base-bundle-0: 12
base:1 promotion score on base-bundle-1: 13
base:2 promotion score on base-bundle-2: 14
-pcmk__bundle_allocate: app-bundle allocation score on node1: 0
-pcmk__bundle_allocate: app-bundle allocation score on node2: 0
-pcmk__bundle_allocate: app-bundle allocation score on node3: 0
-pcmk__bundle_allocate: app-bundle allocation score on node4: 0
-pcmk__bundle_allocate: app-bundle allocation score on node5: 0
-pcmk__bundle_allocate: app-bundle-0 allocation score on node1: 0
-pcmk__bundle_allocate: app-bundle-0 allocation score on node2: 0
-pcmk__bundle_allocate: app-bundle-0 allocation score on node3: 0
-pcmk__bundle_allocate: app-bundle-0 allocation score on node4: 0
-pcmk__bundle_allocate: app-bundle-0 allocation score on node5: 0
-pcmk__bundle_allocate: app-bundle-1 allocation score on node1: 0
-pcmk__bundle_allocate: app-bundle-1 allocation score on node2: 0
-pcmk__bundle_allocate: app-bundle-1 allocation score on node3: 0
-pcmk__bundle_allocate: app-bundle-1 allocation score on node4: 0
-pcmk__bundle_allocate: app-bundle-1 allocation score on node5: 0
-pcmk__bundle_allocate: app-bundle-2 allocation score on node1: 0
-pcmk__bundle_allocate: app-bundle-2 allocation score on node2: 0
-pcmk__bundle_allocate: app-bundle-2 allocation score on node3: 0
-pcmk__bundle_allocate: app-bundle-2 allocation score on node4: 0
-pcmk__bundle_allocate: app-bundle-2 allocation score on node5: 0
-pcmk__bundle_allocate: app-bundle-clone allocation score on app-bundle-0: -INFINITY
-pcmk__bundle_allocate: app-bundle-clone allocation score on app-bundle-1: -INFINITY
-pcmk__bundle_allocate: app-bundle-clone allocation score on app-bundle-2: -INFINITY
-pcmk__bundle_allocate: app-bundle-clone allocation score on node1: 0
-pcmk__bundle_allocate: app-bundle-clone allocation score on node2: 0
-pcmk__bundle_allocate: app-bundle-clone allocation score on node3: 0
-pcmk__bundle_allocate: app-bundle-clone allocation score on node4: 0
-pcmk__bundle_allocate: app-bundle-clone allocation score on node5: 0
-pcmk__bundle_allocate: app-bundle-podman-0 allocation score on node1: 0
-pcmk__bundle_allocate: app-bundle-podman-0 allocation score on node2: 0
-pcmk__bundle_allocate: app-bundle-podman-0 allocation score on node3: 0
-pcmk__bundle_allocate: app-bundle-podman-0 allocation score on node4: 0
-pcmk__bundle_allocate: app-bundle-podman-0 allocation score on node5: 0
-pcmk__bundle_allocate: app-bundle-podman-1 allocation score on node1: 0
-pcmk__bundle_allocate: app-bundle-podman-1 allocation score on node2: 0
-pcmk__bundle_allocate: app-bundle-podman-1 allocation score on node3: 0
-pcmk__bundle_allocate: app-bundle-podman-1 allocation score on node4: 0
-pcmk__bundle_allocate: app-bundle-podman-1 allocation score on node5: 0
-pcmk__bundle_allocate: app-bundle-podman-2 allocation score on node1: 0
-pcmk__bundle_allocate: app-bundle-podman-2 allocation score on node2: 0
-pcmk__bundle_allocate: app-bundle-podman-2 allocation score on node3: 0
-pcmk__bundle_allocate: app-bundle-podman-2 allocation score on node4: 0
-pcmk__bundle_allocate: app-bundle-podman-2 allocation score on node5: 0
-pcmk__bundle_allocate: app:0 allocation score on app-bundle-0: 501
-pcmk__bundle_allocate: app:1 allocation score on app-bundle-1: 501
-pcmk__bundle_allocate: app:2 allocation score on app-bundle-2: 501
-pcmk__bundle_allocate: base-bundle allocation score on node1: 0
-pcmk__bundle_allocate: base-bundle allocation score on node2: 0
-pcmk__bundle_allocate: base-bundle allocation score on node3: 0
-pcmk__bundle_allocate: base-bundle allocation score on node4: 0
-pcmk__bundle_allocate: base-bundle allocation score on node5: 0
-pcmk__bundle_allocate: base-bundle-0 allocation score on node1: 0
-pcmk__bundle_allocate: base-bundle-0 allocation score on node2: 0
-pcmk__bundle_allocate: base-bundle-0 allocation score on node3: 0
-pcmk__bundle_allocate: base-bundle-0 allocation score on node4: 0
-pcmk__bundle_allocate: base-bundle-0 allocation score on node5: 0
-pcmk__bundle_allocate: base-bundle-1 allocation score on node1: 0
-pcmk__bundle_allocate: base-bundle-1 allocation score on node2: 0
-pcmk__bundle_allocate: base-bundle-1 allocation score on node3: 0
-pcmk__bundle_allocate: base-bundle-1 allocation score on node4: 0
-pcmk__bundle_allocate: base-bundle-1 allocation score on node5: 0
-pcmk__bundle_allocate: base-bundle-2 allocation score on node1: 0
-pcmk__bundle_allocate: base-bundle-2 allocation score on node2: 0
-pcmk__bundle_allocate: base-bundle-2 allocation score on node3: 0
-pcmk__bundle_allocate: base-bundle-2 allocation score on node4: 0
-pcmk__bundle_allocate: base-bundle-2 allocation score on node5: 0
-pcmk__bundle_allocate: base-bundle-clone allocation score on base-bundle-0: -INFINITY
-pcmk__bundle_allocate: base-bundle-clone allocation score on base-bundle-1: -INFINITY
-pcmk__bundle_allocate: base-bundle-clone allocation score on base-bundle-2: -INFINITY
-pcmk__bundle_allocate: base-bundle-clone allocation score on node1: 0
-pcmk__bundle_allocate: base-bundle-clone allocation score on node2: 0
-pcmk__bundle_allocate: base-bundle-clone allocation score on node3: 0
-pcmk__bundle_allocate: base-bundle-clone allocation score on node4: 0
-pcmk__bundle_allocate: base-bundle-clone allocation score on node5: 0
-pcmk__bundle_allocate: base-bundle-podman-0 allocation score on node1: 0
-pcmk__bundle_allocate: base-bundle-podman-0 allocation score on node2: 0
-pcmk__bundle_allocate: base-bundle-podman-0 allocation score on node3: 0
-pcmk__bundle_allocate: base-bundle-podman-0 allocation score on node4: 0
-pcmk__bundle_allocate: base-bundle-podman-0 allocation score on node5: 0
-pcmk__bundle_allocate: base-bundle-podman-1 allocation score on node1: 0
-pcmk__bundle_allocate: base-bundle-podman-1 allocation score on node2: 0
-pcmk__bundle_allocate: base-bundle-podman-1 allocation score on node3: 0
-pcmk__bundle_allocate: base-bundle-podman-1 allocation score on node4: 0
-pcmk__bundle_allocate: base-bundle-podman-1 allocation score on node5: 0
-pcmk__bundle_allocate: base-bundle-podman-2 allocation score on node1: 0
-pcmk__bundle_allocate: base-bundle-podman-2 allocation score on node2: 0
-pcmk__bundle_allocate: base-bundle-podman-2 allocation score on node3: 0
-pcmk__bundle_allocate: base-bundle-podman-2 allocation score on node4: 0
-pcmk__bundle_allocate: base-bundle-podman-2 allocation score on node5: 0
-pcmk__bundle_allocate: base:0 allocation score on base-bundle-0: 501
-pcmk__bundle_allocate: base:1 allocation score on base-bundle-1: 501
-pcmk__bundle_allocate: base:2 allocation score on base-bundle-2: 501
+pcmk__bundle_assign: app-bundle allocation score on node1: 0
+pcmk__bundle_assign: app-bundle allocation score on node2: 0
+pcmk__bundle_assign: app-bundle allocation score on node3: 0
+pcmk__bundle_assign: app-bundle allocation score on node4: 0
+pcmk__bundle_assign: app-bundle allocation score on node5: 0
+pcmk__bundle_assign: app-bundle-0 allocation score on node1: 0
+pcmk__bundle_assign: app-bundle-0 allocation score on node2: 0
+pcmk__bundle_assign: app-bundle-0 allocation score on node3: 0
+pcmk__bundle_assign: app-bundle-0 allocation score on node4: 0
+pcmk__bundle_assign: app-bundle-0 allocation score on node5: 0
+pcmk__bundle_assign: app-bundle-1 allocation score on node1: 0
+pcmk__bundle_assign: app-bundle-1 allocation score on node2: 0
+pcmk__bundle_assign: app-bundle-1 allocation score on node3: 0
+pcmk__bundle_assign: app-bundle-1 allocation score on node4: 0
+pcmk__bundle_assign: app-bundle-1 allocation score on node5: 0
+pcmk__bundle_assign: app-bundle-2 allocation score on node1: 0
+pcmk__bundle_assign: app-bundle-2 allocation score on node2: 0
+pcmk__bundle_assign: app-bundle-2 allocation score on node3: 0
+pcmk__bundle_assign: app-bundle-2 allocation score on node4: 0
+pcmk__bundle_assign: app-bundle-2 allocation score on node5: 0
+pcmk__bundle_assign: app-bundle-clone allocation score on app-bundle-0: -INFINITY
+pcmk__bundle_assign: app-bundle-clone allocation score on app-bundle-1: -INFINITY
+pcmk__bundle_assign: app-bundle-clone allocation score on app-bundle-2: -INFINITY
+pcmk__bundle_assign: app-bundle-clone allocation score on node1: 0
+pcmk__bundle_assign: app-bundle-clone allocation score on node2: 0
+pcmk__bundle_assign: app-bundle-clone allocation score on node3: 0
+pcmk__bundle_assign: app-bundle-clone allocation score on node4: 0
+pcmk__bundle_assign: app-bundle-clone allocation score on node5: 0
+pcmk__bundle_assign: app-bundle-podman-0 allocation score on node1: 0
+pcmk__bundle_assign: app-bundle-podman-0 allocation score on node2: 0
+pcmk__bundle_assign: app-bundle-podman-0 allocation score on node3: 0
+pcmk__bundle_assign: app-bundle-podman-0 allocation score on node4: 0
+pcmk__bundle_assign: app-bundle-podman-0 allocation score on node5: 0
+pcmk__bundle_assign: app-bundle-podman-1 allocation score on node1: 0
+pcmk__bundle_assign: app-bundle-podman-1 allocation score on node2: 0
+pcmk__bundle_assign: app-bundle-podman-1 allocation score on node3: 0
+pcmk__bundle_assign: app-bundle-podman-1 allocation score on node4: 0
+pcmk__bundle_assign: app-bundle-podman-1 allocation score on node5: 0
+pcmk__bundle_assign: app-bundle-podman-2 allocation score on node1: 0
+pcmk__bundle_assign: app-bundle-podman-2 allocation score on node2: 0
+pcmk__bundle_assign: app-bundle-podman-2 allocation score on node3: 0
+pcmk__bundle_assign: app-bundle-podman-2 allocation score on node4: 0
+pcmk__bundle_assign: app-bundle-podman-2 allocation score on node5: 0
+pcmk__bundle_assign: app:0 allocation score on app-bundle-0: 501
+pcmk__bundle_assign: app:1 allocation score on app-bundle-1: 501
+pcmk__bundle_assign: app:2 allocation score on app-bundle-2: 501
+pcmk__bundle_assign: base-bundle allocation score on node1: 0
+pcmk__bundle_assign: base-bundle allocation score on node2: 0
+pcmk__bundle_assign: base-bundle allocation score on node3: 0
+pcmk__bundle_assign: base-bundle allocation score on node4: 0
+pcmk__bundle_assign: base-bundle allocation score on node5: 0
+pcmk__bundle_assign: base-bundle-0 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-0 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-0 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-0 allocation score on node4: 0
+pcmk__bundle_assign: base-bundle-0 allocation score on node5: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node4: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node5: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node4: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node5: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-0: -INFINITY
+pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-1: -INFINITY
+pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-2: -INFINITY
+pcmk__bundle_assign: base-bundle-clone allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on node4: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on node5: 0
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node4: 0
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node5: 0
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node4: 0
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node5: 0
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node4: 0
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node5: 0
+pcmk__bundle_assign: base:0 allocation score on base-bundle-0: 501
+pcmk__bundle_assign: base:1 allocation score on base-bundle-1: 501
+pcmk__bundle_assign: base:2 allocation score on base-bundle-2: 501
pcmk__clone_assign: app-bundle-clone allocation score on app-bundle-0: 0
pcmk__clone_assign: app-bundle-clone allocation score on app-bundle-1: 0
pcmk__clone_assign: app-bundle-clone allocation score on app-bundle-2: 0
diff --git a/cts/scheduler/scores/bundle-interleave-start.scores b/cts/scheduler/scores/bundle-interleave-start.scores
index 7f4a370..52f95e8 100644
--- a/cts/scheduler/scores/bundle-interleave-start.scores
+++ b/cts/scheduler/scores/bundle-interleave-start.scores
@@ -1,102 +1,102 @@
-app:0 promotion score on app-bundle-0: -1
-app:1 promotion score on app-bundle-1: -1
-app:2 promotion score on app-bundle-2: -1
-base:0 promotion score on base-bundle-0: -1
-base:1 promotion score on base-bundle-1: -1
-base:2 promotion score on base-bundle-2: -1
-pcmk__bundle_allocate: app-bundle allocation score on node1: 0
-pcmk__bundle_allocate: app-bundle allocation score on node2: 0
-pcmk__bundle_allocate: app-bundle allocation score on node3: 0
-pcmk__bundle_allocate: app-bundle allocation score on node4: 0
-pcmk__bundle_allocate: app-bundle allocation score on node5: 0
-pcmk__bundle_allocate: app-bundle-0 allocation score on node1: 0
-pcmk__bundle_allocate: app-bundle-0 allocation score on node2: 0
-pcmk__bundle_allocate: app-bundle-0 allocation score on node3: 0
-pcmk__bundle_allocate: app-bundle-0 allocation score on node4: 0
-pcmk__bundle_allocate: app-bundle-0 allocation score on node5: 0
-pcmk__bundle_allocate: app-bundle-1 allocation score on node1: 0
-pcmk__bundle_allocate: app-bundle-1 allocation score on node2: 0
-pcmk__bundle_allocate: app-bundle-1 allocation score on node3: 0
-pcmk__bundle_allocate: app-bundle-1 allocation score on node4: 0
-pcmk__bundle_allocate: app-bundle-1 allocation score on node5: 0
-pcmk__bundle_allocate: app-bundle-2 allocation score on node1: 0
-pcmk__bundle_allocate: app-bundle-2 allocation score on node2: 0
-pcmk__bundle_allocate: app-bundle-2 allocation score on node3: 0
-pcmk__bundle_allocate: app-bundle-2 allocation score on node4: 0
-pcmk__bundle_allocate: app-bundle-2 allocation score on node5: 0
-pcmk__bundle_allocate: app-bundle-clone allocation score on app-bundle-0: -INFINITY
-pcmk__bundle_allocate: app-bundle-clone allocation score on app-bundle-1: -INFINITY
-pcmk__bundle_allocate: app-bundle-clone allocation score on app-bundle-2: -INFINITY
-pcmk__bundle_allocate: app-bundle-clone allocation score on node1: 0
-pcmk__bundle_allocate: app-bundle-clone allocation score on node2: 0
-pcmk__bundle_allocate: app-bundle-clone allocation score on node3: 0
-pcmk__bundle_allocate: app-bundle-clone allocation score on node4: 0
-pcmk__bundle_allocate: app-bundle-clone allocation score on node5: 0
-pcmk__bundle_allocate: app-bundle-podman-0 allocation score on node1: 0
-pcmk__bundle_allocate: app-bundle-podman-0 allocation score on node2: 0
-pcmk__bundle_allocate: app-bundle-podman-0 allocation score on node3: 0
-pcmk__bundle_allocate: app-bundle-podman-0 allocation score on node4: 0
-pcmk__bundle_allocate: app-bundle-podman-0 allocation score on node5: 0
-pcmk__bundle_allocate: app-bundle-podman-1 allocation score on node1: 0
-pcmk__bundle_allocate: app-bundle-podman-1 allocation score on node2: 0
-pcmk__bundle_allocate: app-bundle-podman-1 allocation score on node3: 0
-pcmk__bundle_allocate: app-bundle-podman-1 allocation score on node4: 0
-pcmk__bundle_allocate: app-bundle-podman-1 allocation score on node5: 0
-pcmk__bundle_allocate: app-bundle-podman-2 allocation score on node1: 0
-pcmk__bundle_allocate: app-bundle-podman-2 allocation score on node2: 0
-pcmk__bundle_allocate: app-bundle-podman-2 allocation score on node3: 0
-pcmk__bundle_allocate: app-bundle-podman-2 allocation score on node4: 0
-pcmk__bundle_allocate: app-bundle-podman-2 allocation score on node5: 0
-pcmk__bundle_allocate: app:0 allocation score on app-bundle-0: 500
-pcmk__bundle_allocate: app:1 allocation score on app-bundle-1: 500
-pcmk__bundle_allocate: app:2 allocation score on app-bundle-2: 500
-pcmk__bundle_allocate: base-bundle allocation score on node1: 0
-pcmk__bundle_allocate: base-bundle allocation score on node2: 0
-pcmk__bundle_allocate: base-bundle allocation score on node3: 0
-pcmk__bundle_allocate: base-bundle allocation score on node4: 0
-pcmk__bundle_allocate: base-bundle allocation score on node5: 0
-pcmk__bundle_allocate: base-bundle-0 allocation score on node1: 0
-pcmk__bundle_allocate: base-bundle-0 allocation score on node2: 0
-pcmk__bundle_allocate: base-bundle-0 allocation score on node3: 0
-pcmk__bundle_allocate: base-bundle-0 allocation score on node4: 0
-pcmk__bundle_allocate: base-bundle-0 allocation score on node5: 0
-pcmk__bundle_allocate: base-bundle-1 allocation score on node1: 0
-pcmk__bundle_allocate: base-bundle-1 allocation score on node2: 0
-pcmk__bundle_allocate: base-bundle-1 allocation score on node3: 0
-pcmk__bundle_allocate: base-bundle-1 allocation score on node4: 0
-pcmk__bundle_allocate: base-bundle-1 allocation score on node5: 0
-pcmk__bundle_allocate: base-bundle-2 allocation score on node1: 0
-pcmk__bundle_allocate: base-bundle-2 allocation score on node2: 0
-pcmk__bundle_allocate: base-bundle-2 allocation score on node3: 0
-pcmk__bundle_allocate: base-bundle-2 allocation score on node4: 0
-pcmk__bundle_allocate: base-bundle-2 allocation score on node5: 0
-pcmk__bundle_allocate: base-bundle-clone allocation score on base-bundle-0: -INFINITY
-pcmk__bundle_allocate: base-bundle-clone allocation score on base-bundle-1: -INFINITY
-pcmk__bundle_allocate: base-bundle-clone allocation score on base-bundle-2: -INFINITY
-pcmk__bundle_allocate: base-bundle-clone allocation score on node1: 0
-pcmk__bundle_allocate: base-bundle-clone allocation score on node2: 0
-pcmk__bundle_allocate: base-bundle-clone allocation score on node3: 0
-pcmk__bundle_allocate: base-bundle-clone allocation score on node4: 0
-pcmk__bundle_allocate: base-bundle-clone allocation score on node5: 0
-pcmk__bundle_allocate: base-bundle-podman-0 allocation score on node1: 0
-pcmk__bundle_allocate: base-bundle-podman-0 allocation score on node2: 0
-pcmk__bundle_allocate: base-bundle-podman-0 allocation score on node3: 0
-pcmk__bundle_allocate: base-bundle-podman-0 allocation score on node4: 0
-pcmk__bundle_allocate: base-bundle-podman-0 allocation score on node5: 0
-pcmk__bundle_allocate: base-bundle-podman-1 allocation score on node1: 0
-pcmk__bundle_allocate: base-bundle-podman-1 allocation score on node2: 0
-pcmk__bundle_allocate: base-bundle-podman-1 allocation score on node3: 0
-pcmk__bundle_allocate: base-bundle-podman-1 allocation score on node4: 0
-pcmk__bundle_allocate: base-bundle-podman-1 allocation score on node5: 0
-pcmk__bundle_allocate: base-bundle-podman-2 allocation score on node1: 0
-pcmk__bundle_allocate: base-bundle-podman-2 allocation score on node2: 0
-pcmk__bundle_allocate: base-bundle-podman-2 allocation score on node3: 0
-pcmk__bundle_allocate: base-bundle-podman-2 allocation score on node4: 0
-pcmk__bundle_allocate: base-bundle-podman-2 allocation score on node5: 0
-pcmk__bundle_allocate: base:0 allocation score on base-bundle-0: 500
-pcmk__bundle_allocate: base:1 allocation score on base-bundle-1: 500
-pcmk__bundle_allocate: base:2 allocation score on base-bundle-2: 500
+app:0 promotion score on app-bundle-0: 12
+app:1 promotion score on app-bundle-1: 13
+app:2 promotion score on app-bundle-2: INFINITY
+base:0 promotion score on base-bundle-0: 12
+base:1 promotion score on base-bundle-1: 13
+base:2 promotion score on base-bundle-2: 14
+pcmk__bundle_assign: app-bundle allocation score on node1: 0
+pcmk__bundle_assign: app-bundle allocation score on node2: 0
+pcmk__bundle_assign: app-bundle allocation score on node3: 0
+pcmk__bundle_assign: app-bundle allocation score on node4: 0
+pcmk__bundle_assign: app-bundle allocation score on node5: 0
+pcmk__bundle_assign: app-bundle-0 allocation score on node1: 0
+pcmk__bundle_assign: app-bundle-0 allocation score on node2: 0
+pcmk__bundle_assign: app-bundle-0 allocation score on node3: 0
+pcmk__bundle_assign: app-bundle-0 allocation score on node4: 0
+pcmk__bundle_assign: app-bundle-0 allocation score on node5: 0
+pcmk__bundle_assign: app-bundle-1 allocation score on node1: 0
+pcmk__bundle_assign: app-bundle-1 allocation score on node2: 0
+pcmk__bundle_assign: app-bundle-1 allocation score on node3: 0
+pcmk__bundle_assign: app-bundle-1 allocation score on node4: 0
+pcmk__bundle_assign: app-bundle-1 allocation score on node5: 0
+pcmk__bundle_assign: app-bundle-2 allocation score on node1: 0
+pcmk__bundle_assign: app-bundle-2 allocation score on node2: 0
+pcmk__bundle_assign: app-bundle-2 allocation score on node3: 0
+pcmk__bundle_assign: app-bundle-2 allocation score on node4: 0
+pcmk__bundle_assign: app-bundle-2 allocation score on node5: 0
+pcmk__bundle_assign: app-bundle-clone allocation score on app-bundle-0: -INFINITY
+pcmk__bundle_assign: app-bundle-clone allocation score on app-bundle-1: -INFINITY
+pcmk__bundle_assign: app-bundle-clone allocation score on app-bundle-2: -INFINITY
+pcmk__bundle_assign: app-bundle-clone allocation score on node1: 0
+pcmk__bundle_assign: app-bundle-clone allocation score on node2: 0
+pcmk__bundle_assign: app-bundle-clone allocation score on node3: 0
+pcmk__bundle_assign: app-bundle-clone allocation score on node4: 0
+pcmk__bundle_assign: app-bundle-clone allocation score on node5: 0
+pcmk__bundle_assign: app-bundle-podman-0 allocation score on node1: 0
+pcmk__bundle_assign: app-bundle-podman-0 allocation score on node2: 0
+pcmk__bundle_assign: app-bundle-podman-0 allocation score on node3: 0
+pcmk__bundle_assign: app-bundle-podman-0 allocation score on node4: 0
+pcmk__bundle_assign: app-bundle-podman-0 allocation score on node5: 0
+pcmk__bundle_assign: app-bundle-podman-1 allocation score on node1: 0
+pcmk__bundle_assign: app-bundle-podman-1 allocation score on node2: 0
+pcmk__bundle_assign: app-bundle-podman-1 allocation score on node3: 0
+pcmk__bundle_assign: app-bundle-podman-1 allocation score on node4: 0
+pcmk__bundle_assign: app-bundle-podman-1 allocation score on node5: 0
+pcmk__bundle_assign: app-bundle-podman-2 allocation score on node1: 0
+pcmk__bundle_assign: app-bundle-podman-2 allocation score on node2: 0
+pcmk__bundle_assign: app-bundle-podman-2 allocation score on node3: 0
+pcmk__bundle_assign: app-bundle-podman-2 allocation score on node4: 0
+pcmk__bundle_assign: app-bundle-podman-2 allocation score on node5: 0
+pcmk__bundle_assign: app:0 allocation score on app-bundle-0: 500
+pcmk__bundle_assign: app:1 allocation score on app-bundle-1: 500
+pcmk__bundle_assign: app:2 allocation score on app-bundle-2: 500
+pcmk__bundle_assign: base-bundle allocation score on node1: 0
+pcmk__bundle_assign: base-bundle allocation score on node2: 0
+pcmk__bundle_assign: base-bundle allocation score on node3: 0
+pcmk__bundle_assign: base-bundle allocation score on node4: 0
+pcmk__bundle_assign: base-bundle allocation score on node5: 0
+pcmk__bundle_assign: base-bundle-0 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-0 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-0 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-0 allocation score on node4: 0
+pcmk__bundle_assign: base-bundle-0 allocation score on node5: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node4: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node5: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node4: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node5: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-0: -INFINITY
+pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-1: -INFINITY
+pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-2: -INFINITY
+pcmk__bundle_assign: base-bundle-clone allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on node4: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on node5: 0
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node4: 0
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node5: 0
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node4: 0
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node5: 0
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node4: 0
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node5: 0
+pcmk__bundle_assign: base:0 allocation score on base-bundle-0: 500
+pcmk__bundle_assign: base:1 allocation score on base-bundle-1: 500
+pcmk__bundle_assign: base:2 allocation score on base-bundle-2: 500
pcmk__clone_assign: app-bundle-clone allocation score on app-bundle-0: 0
pcmk__clone_assign: app-bundle-clone allocation score on app-bundle-1: 0
pcmk__clone_assign: app-bundle-clone allocation score on app-bundle-2: 0
diff --git a/cts/scheduler/scores/bundle-nested-colocation.scores b/cts/scheduler/scores/bundle-nested-colocation.scores
index b83b212..9baa073 100644
--- a/cts/scheduler/scores/bundle-nested-colocation.scores
+++ b/cts/scheduler/scores/bundle-nested-colocation.scores
@@ -1,118 +1,118 @@
-pcmk__bundle_allocate: galera-bundle allocation score on overcloud-controller-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle allocation score on overcloud-controller-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle allocation score on overcloud-controller-2: -INFINITY
-pcmk__bundle_allocate: galera-bundle allocation score on overcloud-galera-0: 0
-pcmk__bundle_allocate: galera-bundle allocation score on overcloud-galera-1: 0
-pcmk__bundle_allocate: galera-bundle allocation score on overcloud-galera-2: 0
-pcmk__bundle_allocate: galera-bundle allocation score on overcloud-rabbit-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle allocation score on overcloud-rabbit-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle allocation score on overcloud-rabbit-2: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on overcloud-controller-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on overcloud-controller-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on overcloud-controller-2: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on overcloud-galera-0: INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on overcloud-galera-1: 0
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on overcloud-galera-2: 0
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on overcloud-rabbit-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on overcloud-rabbit-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on overcloud-rabbit-2: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on overcloud-controller-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on overcloud-controller-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on overcloud-controller-2: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on overcloud-galera-0: 0
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on overcloud-galera-1: INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on overcloud-galera-2: 0
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on overcloud-rabbit-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on overcloud-rabbit-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on overcloud-rabbit-2: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on overcloud-controller-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on overcloud-controller-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on overcloud-controller-2: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on overcloud-galera-0: 0
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on overcloud-galera-1: 0
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on overcloud-galera-2: INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on overcloud-rabbit-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on overcloud-rabbit-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on overcloud-rabbit-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on overcloud-controller-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on overcloud-controller-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on overcloud-controller-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on overcloud-galera-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on overcloud-galera-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on overcloud-galera-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on overcloud-rabbit-0: 1
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on overcloud-rabbit-1: 1
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on overcloud-rabbit-2: 1
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on overcloud-controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on overcloud-controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on overcloud-controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on overcloud-galera-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on overcloud-galera-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on overcloud-galera-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on overcloud-rabbit-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on overcloud-rabbit-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on overcloud-rabbit-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on overcloud-controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on overcloud-controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on overcloud-controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on overcloud-galera-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on overcloud-galera-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on overcloud-galera-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on overcloud-rabbit-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on overcloud-rabbit-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on overcloud-rabbit-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on overcloud-controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on overcloud-controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on overcloud-controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on overcloud-galera-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on overcloud-galera-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on overcloud-galera-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on overcloud-rabbit-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on overcloud-rabbit-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on overcloud-rabbit-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on overcloud-controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on overcloud-controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on overcloud-controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on overcloud-galera-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on overcloud-galera-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on overcloud-galera-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on overcloud-rabbit-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on overcloud-rabbit-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on overcloud-rabbit-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on overcloud-controller-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on overcloud-controller-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on overcloud-controller-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on overcloud-galera-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on overcloud-galera-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on overcloud-galera-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on overcloud-rabbit-0: INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on overcloud-rabbit-1: 1
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on overcloud-rabbit-2: 1
-pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on overcloud-controller-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on overcloud-controller-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on overcloud-controller-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on overcloud-galera-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on overcloud-galera-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on overcloud-galera-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on overcloud-rabbit-0: 1
-pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on overcloud-rabbit-1: INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on overcloud-rabbit-2: 1
-pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on overcloud-controller-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on overcloud-controller-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on overcloud-controller-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on overcloud-galera-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on overcloud-galera-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on overcloud-galera-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on overcloud-rabbit-0: 1
-pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on overcloud-rabbit-1: 1
-pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on overcloud-rabbit-2: INFINITY
-pcmk__bundle_allocate: rabbitmq:0 allocation score on rabbitmq-bundle-0: 500
-pcmk__bundle_allocate: rabbitmq:1 allocation score on rabbitmq-bundle-1: 500
-pcmk__bundle_allocate: rabbitmq:2 allocation score on rabbitmq-bundle-2: 500
+pcmk__bundle_assign: galera-bundle allocation score on overcloud-controller-0: -INFINITY
+pcmk__bundle_assign: galera-bundle allocation score on overcloud-controller-1: -INFINITY
+pcmk__bundle_assign: galera-bundle allocation score on overcloud-controller-2: -INFINITY
+pcmk__bundle_assign: galera-bundle allocation score on overcloud-galera-0: 0
+pcmk__bundle_assign: galera-bundle allocation score on overcloud-galera-1: 0
+pcmk__bundle_assign: galera-bundle allocation score on overcloud-galera-2: 0
+pcmk__bundle_assign: galera-bundle allocation score on overcloud-rabbit-0: -INFINITY
+pcmk__bundle_assign: galera-bundle allocation score on overcloud-rabbit-1: -INFINITY
+pcmk__bundle_assign: galera-bundle allocation score on overcloud-rabbit-2: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on overcloud-controller-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on overcloud-controller-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on overcloud-controller-2: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on overcloud-galera-0: INFINITY
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on overcloud-galera-1: 0
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on overcloud-galera-2: 0
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on overcloud-rabbit-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on overcloud-rabbit-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on overcloud-rabbit-2: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on overcloud-controller-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on overcloud-controller-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on overcloud-controller-2: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on overcloud-galera-0: 0
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on overcloud-galera-1: INFINITY
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on overcloud-galera-2: 0
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on overcloud-rabbit-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on overcloud-rabbit-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on overcloud-rabbit-2: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on overcloud-controller-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on overcloud-controller-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on overcloud-controller-2: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on overcloud-galera-0: 0
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on overcloud-galera-1: 0
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on overcloud-galera-2: INFINITY
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on overcloud-rabbit-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on overcloud-rabbit-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on overcloud-rabbit-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle allocation score on overcloud-controller-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle allocation score on overcloud-controller-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle allocation score on overcloud-controller-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle allocation score on overcloud-galera-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle allocation score on overcloud-galera-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle allocation score on overcloud-galera-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle allocation score on overcloud-rabbit-0: 1
+pcmk__bundle_assign: rabbitmq-bundle allocation score on overcloud-rabbit-1: 1
+pcmk__bundle_assign: rabbitmq-bundle allocation score on overcloud-rabbit-2: 1
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on overcloud-controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on overcloud-controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on overcloud-controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on overcloud-galera-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on overcloud-galera-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on overcloud-galera-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on overcloud-rabbit-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on overcloud-rabbit-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on overcloud-rabbit-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on overcloud-controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on overcloud-controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on overcloud-controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on overcloud-galera-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on overcloud-galera-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on overcloud-galera-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on overcloud-rabbit-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on overcloud-rabbit-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on overcloud-rabbit-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on overcloud-controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on overcloud-controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on overcloud-controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on overcloud-galera-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on overcloud-galera-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on overcloud-galera-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on overcloud-rabbit-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on overcloud-rabbit-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on overcloud-rabbit-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on overcloud-controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on overcloud-controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on overcloud-controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on overcloud-galera-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on overcloud-galera-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on overcloud-galera-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on overcloud-rabbit-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on overcloud-rabbit-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on overcloud-rabbit-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on overcloud-controller-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on overcloud-controller-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on overcloud-controller-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on overcloud-galera-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on overcloud-galera-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on overcloud-galera-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on overcloud-rabbit-0: INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on overcloud-rabbit-1: 1
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on overcloud-rabbit-2: 1
+pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on overcloud-controller-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on overcloud-controller-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on overcloud-controller-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on overcloud-galera-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on overcloud-galera-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on overcloud-galera-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on overcloud-rabbit-0: 1
+pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on overcloud-rabbit-1: INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on overcloud-rabbit-2: 1
+pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on overcloud-controller-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on overcloud-controller-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on overcloud-controller-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on overcloud-galera-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on overcloud-galera-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on overcloud-galera-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on overcloud-rabbit-0: 1
+pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on overcloud-rabbit-1: 1
+pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on overcloud-rabbit-2: INFINITY
+pcmk__bundle_assign: rabbitmq:0 allocation score on rabbitmq-bundle-0: 500
+pcmk__bundle_assign: rabbitmq:1 allocation score on rabbitmq-bundle-1: 500
+pcmk__bundle_assign: rabbitmq:2 allocation score on rabbitmq-bundle-2: 500
pcmk__clone_assign: rabbitmq-bundle-clone allocation score on overcloud-controller-0: -INFINITY
pcmk__clone_assign: rabbitmq-bundle-clone allocation score on overcloud-controller-1: -INFINITY
pcmk__clone_assign: rabbitmq-bundle-clone allocation score on overcloud-controller-2: -INFINITY
diff --git a/cts/scheduler/scores/bundle-order-fencing.scores b/cts/scheduler/scores/bundle-order-fencing.scores
index a3dee02..54db322 100644
--- a/cts/scheduler/scores/bundle-order-fencing.scores
+++ b/cts/scheduler/scores/bundle-order-fencing.scores
@@ -2,129 +2,129 @@
galera:0 promotion score on galera-bundle-0: -1
galera:1 promotion score on galera-bundle-1: 100
galera:2 promotion score on galera-bundle-2: 100
-pcmk__bundle_allocate: galera-bundle allocation score on controller-0: 0
-pcmk__bundle_allocate: galera-bundle allocation score on controller-1: 0
-pcmk__bundle_allocate: galera-bundle allocation score on controller-2: 0
-pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-0: INFINITY
-pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-1: INFINITY
-pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-2: 0
-pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-2: INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on controller-0: INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on controller-1: INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on controller-2: 0
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on controller-2: INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on controller-0: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on controller-1: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on controller-2: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-2: -INFINITY
-pcmk__bundle_allocate: galera:0 allocation score on galera-bundle-0: INFINITY
-pcmk__bundle_allocate: galera:1 allocation score on galera-bundle-1: INFINITY
-pcmk__bundle_allocate: galera:2 allocation score on galera-bundle-2: INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on controller-0: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on controller-0: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on controller-1: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on controller-1: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on controller-2: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on controller-2: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-0: INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-1: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-1: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-2: INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-2: INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-1: INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-1: INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-0: INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on rabbitmq-bundle-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on rabbitmq-bundle-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-1: INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on rabbitmq-bundle-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-2: INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on controller-0: INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on rabbitmq-bundle-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on rabbitmq-bundle-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on controller-1: INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on rabbitmq-bundle-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on rabbitmq-bundle-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on controller-2: INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on rabbitmq-bundle-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq:0 allocation score on rabbitmq-bundle-0: INFINITY
-pcmk__bundle_allocate: rabbitmq:1 allocation score on rabbitmq-bundle-1: INFINITY
-pcmk__bundle_allocate: rabbitmq:2 allocation score on rabbitmq-bundle-2: INFINITY
-pcmk__bundle_allocate: redis-bundle allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle allocation score on controller-2: 0
-pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-0: INFINITY
-pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-1: INFINITY
-pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-2: 0
-pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-2: INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on controller-0: INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on controller-1: INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on controller-2: 0
-pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on controller-2: INFINITY
-pcmk__bundle_allocate: redis-bundle-master allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on controller-2: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-2: -INFINITY
-pcmk__bundle_allocate: redis:0 allocation score on redis-bundle-0: INFINITY
-pcmk__bundle_allocate: redis:1 allocation score on redis-bundle-1: INFINITY
-pcmk__bundle_allocate: redis:2 allocation score on redis-bundle-2: INFINITY
+pcmk__bundle_assign: galera-bundle allocation score on controller-0: 0
+pcmk__bundle_assign: galera-bundle allocation score on controller-1: 0
+pcmk__bundle_assign: galera-bundle allocation score on controller-2: 0
+pcmk__bundle_assign: galera-bundle-0 allocation score on controller-0: INFINITY
+pcmk__bundle_assign: galera-bundle-0 allocation score on controller-1: 0
+pcmk__bundle_assign: galera-bundle-0 allocation score on controller-2: 0
+pcmk__bundle_assign: galera-bundle-1 allocation score on controller-0: 0
+pcmk__bundle_assign: galera-bundle-1 allocation score on controller-1: INFINITY
+pcmk__bundle_assign: galera-bundle-1 allocation score on controller-2: 0
+pcmk__bundle_assign: galera-bundle-2 allocation score on controller-0: 0
+pcmk__bundle_assign: galera-bundle-2 allocation score on controller-1: 0
+pcmk__bundle_assign: galera-bundle-2 allocation score on controller-2: INFINITY
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on controller-0: INFINITY
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on controller-1: 0
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on controller-2: 0
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on controller-0: 0
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on controller-1: INFINITY
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on controller-2: 0
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on controller-0: 0
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on controller-1: 0
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on controller-2: INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on controller-0: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on controller-1: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on controller-2: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-2: -INFINITY
+pcmk__bundle_assign: galera:0 allocation score on galera-bundle-0: INFINITY
+pcmk__bundle_assign: galera:1 allocation score on galera-bundle-1: INFINITY
+pcmk__bundle_assign: galera:2 allocation score on galera-bundle-2: INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on controller-0: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on controller-0: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on controller-1: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on controller-1: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on controller-2: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on controller-2: 0
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-0: INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-1: 0
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-2: 0
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-0: 0
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-1: 0
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-1: 0
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-2: INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-2: INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-0: 0
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-1: INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-1: INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle allocation score on controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle allocation score on controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle allocation score on controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on controller-0: INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on rabbitmq-bundle-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on rabbitmq-bundle-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on controller-1: INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on rabbitmq-bundle-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on controller-2: INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on controller-0: INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on rabbitmq-bundle-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on rabbitmq-bundle-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on controller-1: INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on rabbitmq-bundle-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on rabbitmq-bundle-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on controller-2: INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on rabbitmq-bundle-2: -INFINITY
+pcmk__bundle_assign: rabbitmq:0 allocation score on rabbitmq-bundle-0: INFINITY
+pcmk__bundle_assign: rabbitmq:1 allocation score on rabbitmq-bundle-1: INFINITY
+pcmk__bundle_assign: rabbitmq:2 allocation score on rabbitmq-bundle-2: INFINITY
+pcmk__bundle_assign: redis-bundle allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle allocation score on controller-2: 0
+pcmk__bundle_assign: redis-bundle-0 allocation score on controller-0: INFINITY
+pcmk__bundle_assign: redis-bundle-0 allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle-0 allocation score on controller-2: 0
+pcmk__bundle_assign: redis-bundle-1 allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle-1 allocation score on controller-1: INFINITY
+pcmk__bundle_assign: redis-bundle-1 allocation score on controller-2: 0
+pcmk__bundle_assign: redis-bundle-2 allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle-2 allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle-2 allocation score on controller-2: INFINITY
+pcmk__bundle_assign: redis-bundle-docker-0 allocation score on controller-0: INFINITY
+pcmk__bundle_assign: redis-bundle-docker-0 allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle-docker-0 allocation score on controller-2: 0
+pcmk__bundle_assign: redis-bundle-docker-1 allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle-docker-1 allocation score on controller-1: INFINITY
+pcmk__bundle_assign: redis-bundle-docker-1 allocation score on controller-2: 0
+pcmk__bundle_assign: redis-bundle-docker-2 allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle-docker-2 allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle-docker-2 allocation score on controller-2: INFINITY
+pcmk__bundle_assign: redis-bundle-master allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on controller-2: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-2: -INFINITY
+pcmk__bundle_assign: redis:0 allocation score on redis-bundle-0: INFINITY
+pcmk__bundle_assign: redis:1 allocation score on redis-bundle-1: INFINITY
+pcmk__bundle_assign: redis:2 allocation score on redis-bundle-2: INFINITY
pcmk__clone_assign: galera-bundle-master allocation score on controller-0: -INFINITY
pcmk__clone_assign: galera-bundle-master allocation score on controller-1: -INFINITY
pcmk__clone_assign: galera-bundle-master allocation score on controller-2: -INFINITY
diff --git a/cts/scheduler/scores/bundle-order-partial-start-2.scores b/cts/scheduler/scores/bundle-order-partial-start-2.scores
index bb77c77..3d3eb82 100644
--- a/cts/scheduler/scores/bundle-order-partial-start-2.scores
+++ b/cts/scheduler/scores/bundle-order-partial-start-2.scores
@@ -1,30 +1,30 @@
galera:0 promotion score on galera-bundle-0: -1
-pcmk__bundle_allocate: galera-bundle allocation score on undercloud: 0
-pcmk__bundle_allocate: galera-bundle-0 allocation score on undercloud: INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on undercloud: INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on undercloud: 0
-pcmk__bundle_allocate: galera:0 allocation score on galera-bundle-0: 500
-pcmk__bundle_allocate: haproxy-bundle allocation score on undercloud: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on undercloud: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on undercloud: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on undercloud: 0
-pcmk__bundle_allocate: openstack-cinder-volume allocation score on undercloud: 0
-pcmk__bundle_allocate: openstack-cinder-volume-docker-0 allocation score on undercloud: INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on undercloud: 0
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on undercloud: INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on undercloud: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on undercloud: INFINITY
-pcmk__bundle_allocate: rabbitmq:0 allocation score on rabbitmq-bundle-0: 500
-pcmk__bundle_allocate: redis-bundle allocation score on undercloud: 0
-pcmk__bundle_allocate: redis-bundle-0 allocation score on undercloud: INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on undercloud: INFINITY
-pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-master allocation score on undercloud: 0
-pcmk__bundle_allocate: redis:0 allocation score on redis-bundle-0: INFINITY
+pcmk__bundle_assign: galera-bundle allocation score on undercloud: 0
+pcmk__bundle_assign: galera-bundle-0 allocation score on undercloud: INFINITY
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on undercloud: INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on undercloud: 0
+pcmk__bundle_assign: galera:0 allocation score on galera-bundle-0: 500
+pcmk__bundle_assign: haproxy-bundle allocation score on undercloud: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on undercloud: 0
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on undercloud: 0
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on undercloud: INFINITY
+pcmk__bundle_assign: openstack-cinder-volume allocation score on undercloud: 0
+pcmk__bundle_assign: openstack-cinder-volume-docker-0 allocation score on undercloud: INFINITY
+pcmk__bundle_assign: rabbitmq-bundle allocation score on undercloud: 0
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on undercloud: INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on undercloud: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on undercloud: INFINITY
+pcmk__bundle_assign: rabbitmq:0 allocation score on rabbitmq-bundle-0: 500
+pcmk__bundle_assign: redis-bundle allocation score on undercloud: 0
+pcmk__bundle_assign: redis-bundle-0 allocation score on undercloud: INFINITY
+pcmk__bundle_assign: redis-bundle-docker-0 allocation score on undercloud: INFINITY
+pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-master allocation score on undercloud: 0
+pcmk__bundle_assign: redis:0 allocation score on redis-bundle-0: INFINITY
pcmk__clone_assign: galera-bundle-master allocation score on galera-bundle-0: 0
pcmk__clone_assign: galera-bundle-master allocation score on undercloud: -INFINITY
pcmk__clone_assign: galera:0 allocation score on galera-bundle-0: INFINITY
@@ -37,7 +37,7 @@ pcmk__clone_assign: redis:0 allocation score on redis-bundle-0: INFINITY
pcmk__primitive_assign: galera-bundle-0 allocation score on undercloud: INFINITY
pcmk__primitive_assign: galera-bundle-docker-0 allocation score on undercloud: INFINITY
pcmk__primitive_assign: galera:0 allocation score on galera-bundle-0: INFINITY
-pcmk__primitive_assign: haproxy-bundle-docker-0 allocation score on undercloud: 0
+pcmk__primitive_assign: haproxy-bundle-docker-0 allocation score on undercloud: INFINITY
pcmk__primitive_assign: ip-192.168.122.247 allocation score on undercloud: INFINITY
pcmk__primitive_assign: ip-192.168.122.248 allocation score on undercloud: INFINITY
pcmk__primitive_assign: ip-192.168.122.249 allocation score on undercloud: INFINITY
diff --git a/cts/scheduler/scores/bundle-order-partial-start.scores b/cts/scheduler/scores/bundle-order-partial-start.scores
index d765883..7e76f44 100644
--- a/cts/scheduler/scores/bundle-order-partial-start.scores
+++ b/cts/scheduler/scores/bundle-order-partial-start.scores
@@ -1,30 +1,30 @@
galera:0 promotion score on galera-bundle-0: -1
-pcmk__bundle_allocate: galera-bundle allocation score on undercloud: 0
-pcmk__bundle_allocate: galera-bundle-0 allocation score on undercloud: 0
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on undercloud: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on undercloud: 0
-pcmk__bundle_allocate: galera:0 allocation score on galera-bundle-0: 500
-pcmk__bundle_allocate: haproxy-bundle allocation score on undercloud: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on undercloud: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on undercloud: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on undercloud: 0
-pcmk__bundle_allocate: openstack-cinder-volume allocation score on undercloud: 0
-pcmk__bundle_allocate: openstack-cinder-volume-docker-0 allocation score on undercloud: INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on undercloud: 0
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on undercloud: INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on undercloud: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on undercloud: INFINITY
-pcmk__bundle_allocate: rabbitmq:0 allocation score on rabbitmq-bundle-0: 500
-pcmk__bundle_allocate: redis-bundle allocation score on undercloud: 0
-pcmk__bundle_allocate: redis-bundle-0 allocation score on undercloud: INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on undercloud: INFINITY
-pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-master allocation score on undercloud: 0
-pcmk__bundle_allocate: redis:0 allocation score on redis-bundle-0: INFINITY
+pcmk__bundle_assign: galera-bundle allocation score on undercloud: 0
+pcmk__bundle_assign: galera-bundle-0 allocation score on undercloud: 0
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on undercloud: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on undercloud: 0
+pcmk__bundle_assign: galera:0 allocation score on galera-bundle-0: 500
+pcmk__bundle_assign: haproxy-bundle allocation score on undercloud: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on undercloud: 0
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on undercloud: 0
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on undercloud: INFINITY
+pcmk__bundle_assign: openstack-cinder-volume allocation score on undercloud: 0
+pcmk__bundle_assign: openstack-cinder-volume-docker-0 allocation score on undercloud: INFINITY
+pcmk__bundle_assign: rabbitmq-bundle allocation score on undercloud: 0
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on undercloud: INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on undercloud: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on undercloud: INFINITY
+pcmk__bundle_assign: rabbitmq:0 allocation score on rabbitmq-bundle-0: 500
+pcmk__bundle_assign: redis-bundle allocation score on undercloud: 0
+pcmk__bundle_assign: redis-bundle-0 allocation score on undercloud: INFINITY
+pcmk__bundle_assign: redis-bundle-docker-0 allocation score on undercloud: INFINITY
+pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-master allocation score on undercloud: 0
+pcmk__bundle_assign: redis:0 allocation score on redis-bundle-0: INFINITY
pcmk__clone_assign: galera-bundle-master allocation score on galera-bundle-0: 0
pcmk__clone_assign: galera-bundle-master allocation score on undercloud: -INFINITY
pcmk__clone_assign: galera:0 allocation score on galera-bundle-0: INFINITY
@@ -37,7 +37,7 @@ pcmk__clone_assign: redis:0 allocation score on redis-bundle-0: INFINITY
pcmk__primitive_assign: galera-bundle-0 allocation score on undercloud: 10000
pcmk__primitive_assign: galera-bundle-docker-0 allocation score on undercloud: 0
pcmk__primitive_assign: galera:0 allocation score on galera-bundle-0: INFINITY
-pcmk__primitive_assign: haproxy-bundle-docker-0 allocation score on undercloud: 0
+pcmk__primitive_assign: haproxy-bundle-docker-0 allocation score on undercloud: INFINITY
pcmk__primitive_assign: ip-192.168.122.247 allocation score on undercloud: INFINITY
pcmk__primitive_assign: ip-192.168.122.248 allocation score on undercloud: INFINITY
pcmk__primitive_assign: ip-192.168.122.249 allocation score on undercloud: INFINITY
diff --git a/cts/scheduler/scores/bundle-order-partial-stop.scores b/cts/scheduler/scores/bundle-order-partial-stop.scores
index e00df39..2bb6cb6 100644
--- a/cts/scheduler/scores/bundle-order-partial-stop.scores
+++ b/cts/scheduler/scores/bundle-order-partial-stop.scores
@@ -1,30 +1,30 @@
galera:0 promotion score on galera-bundle-0: 100
-pcmk__bundle_allocate: galera-bundle allocation score on undercloud: 0
-pcmk__bundle_allocate: galera-bundle-0 allocation score on undercloud: INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on undercloud: INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on undercloud: 0
-pcmk__bundle_allocate: galera:0 allocation score on galera-bundle-0: INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on undercloud: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on undercloud: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on undercloud: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on undercloud: INFINITY
-pcmk__bundle_allocate: openstack-cinder-volume allocation score on undercloud: 0
-pcmk__bundle_allocate: openstack-cinder-volume-docker-0 allocation score on undercloud: INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on undercloud: 0
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on undercloud: INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on undercloud: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on undercloud: INFINITY
-pcmk__bundle_allocate: rabbitmq:0 allocation score on rabbitmq-bundle-0: INFINITY
-pcmk__bundle_allocate: redis-bundle allocation score on undercloud: 0
-pcmk__bundle_allocate: redis-bundle-0 allocation score on undercloud: INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on undercloud: INFINITY
-pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-master allocation score on undercloud: 0
-pcmk__bundle_allocate: redis:0 allocation score on redis-bundle-0: INFINITY
+pcmk__bundle_assign: galera-bundle allocation score on undercloud: 0
+pcmk__bundle_assign: galera-bundle-0 allocation score on undercloud: INFINITY
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on undercloud: INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on undercloud: 0
+pcmk__bundle_assign: galera:0 allocation score on galera-bundle-0: INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on undercloud: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on undercloud: 0
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on undercloud: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on undercloud: INFINITY
+pcmk__bundle_assign: openstack-cinder-volume allocation score on undercloud: 0
+pcmk__bundle_assign: openstack-cinder-volume-docker-0 allocation score on undercloud: INFINITY
+pcmk__bundle_assign: rabbitmq-bundle allocation score on undercloud: 0
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on undercloud: INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on undercloud: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on undercloud: INFINITY
+pcmk__bundle_assign: rabbitmq:0 allocation score on rabbitmq-bundle-0: INFINITY
+pcmk__bundle_assign: redis-bundle allocation score on undercloud: 0
+pcmk__bundle_assign: redis-bundle-0 allocation score on undercloud: INFINITY
+pcmk__bundle_assign: redis-bundle-docker-0 allocation score on undercloud: INFINITY
+pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-master allocation score on undercloud: 0
+pcmk__bundle_assign: redis:0 allocation score on redis-bundle-0: INFINITY
pcmk__clone_assign: galera-bundle-master allocation score on galera-bundle-0: 0
pcmk__clone_assign: galera-bundle-master allocation score on undercloud: -INFINITY
pcmk__clone_assign: galera:0 allocation score on galera-bundle-0: INFINITY
diff --git a/cts/scheduler/scores/bundle-order-startup-clone-2.scores b/cts/scheduler/scores/bundle-order-startup-clone-2.scores
index d44e358..f4e5353 100644
--- a/cts/scheduler/scores/bundle-order-startup-clone-2.scores
+++ b/cts/scheduler/scores/bundle-order-startup-clone-2.scores
@@ -2,98 +2,98 @@
galera:0 promotion score on galera-bundle-0: -1
galera:1 promotion score on galera-bundle-1: -1
galera:2 promotion score on galera-bundle-2: -1
-pcmk__bundle_allocate: galera-bundle allocation score on metal-1: 0
-pcmk__bundle_allocate: galera-bundle allocation score on metal-2: 0
-pcmk__bundle_allocate: galera-bundle allocation score on metal-3: 0
-pcmk__bundle_allocate: galera-bundle allocation score on rabbitmq-bundle-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-0 allocation score on metal-1: 0
-pcmk__bundle_allocate: galera-bundle-0 allocation score on metal-2: 0
-pcmk__bundle_allocate: galera-bundle-0 allocation score on metal-3: 0
-pcmk__bundle_allocate: galera-bundle-0 allocation score on rabbitmq-bundle-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-1 allocation score on metal-1: 0
-pcmk__bundle_allocate: galera-bundle-1 allocation score on metal-2: 0
-pcmk__bundle_allocate: galera-bundle-1 allocation score on metal-3: 0
-pcmk__bundle_allocate: galera-bundle-1 allocation score on rabbitmq-bundle-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-2 allocation score on metal-1: 0
-pcmk__bundle_allocate: galera-bundle-2 allocation score on metal-2: 0
-pcmk__bundle_allocate: galera-bundle-2 allocation score on metal-3: 0
-pcmk__bundle_allocate: galera-bundle-2 allocation score on rabbitmq-bundle-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on metal-1: 0
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on metal-2: 0
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on metal-3: 0
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on metal-1: 0
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on metal-2: 0
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on metal-3: 0
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on rabbitmq-bundle-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on metal-1: 0
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on metal-2: 0
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on metal-3: 0
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on rabbitmq-bundle-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-2: -INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on metal-1: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on metal-2: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on metal-3: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on rabbitmq-bundle-0: 0
-pcmk__bundle_allocate: galera:0 allocation score on galera-bundle-0: 500
-pcmk__bundle_allocate: galera:1 allocation score on galera-bundle-1: 500
-pcmk__bundle_allocate: galera:2 allocation score on galera-bundle-2: 500
-pcmk__bundle_allocate: haproxy-bundle allocation score on metal-1: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on metal-2: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on metal-3: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on rabbitmq-bundle-0: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on metal-1: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on metal-2: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on metal-3: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on rabbitmq-bundle-0: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on metal-1: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on metal-2: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on metal-3: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on rabbitmq-bundle-0: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on metal-1: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on metal-2: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on metal-3: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on rabbitmq-bundle-0: 0
-pcmk__bundle_allocate: redis-bundle allocation score on metal-1: 0
-pcmk__bundle_allocate: redis-bundle allocation score on metal-2: 0
-pcmk__bundle_allocate: redis-bundle allocation score on metal-3: 0
-pcmk__bundle_allocate: redis-bundle allocation score on rabbitmq-bundle-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-0 allocation score on metal-1: 0
-pcmk__bundle_allocate: redis-bundle-0 allocation score on metal-2: 0
-pcmk__bundle_allocate: redis-bundle-0 allocation score on metal-3: 0
-pcmk__bundle_allocate: redis-bundle-0 allocation score on rabbitmq-bundle-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-1 allocation score on metal-1: 0
-pcmk__bundle_allocate: redis-bundle-1 allocation score on metal-2: 0
-pcmk__bundle_allocate: redis-bundle-1 allocation score on metal-3: 0
-pcmk__bundle_allocate: redis-bundle-1 allocation score on rabbitmq-bundle-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-2 allocation score on metal-1: 0
-pcmk__bundle_allocate: redis-bundle-2 allocation score on metal-2: 0
-pcmk__bundle_allocate: redis-bundle-2 allocation score on metal-3: 0
-pcmk__bundle_allocate: redis-bundle-2 allocation score on rabbitmq-bundle-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on metal-1: 0
-pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on metal-2: 0
-pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on metal-3: 0
-pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on metal-1: 0
-pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on metal-2: 0
-pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on metal-3: 0
-pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on rabbitmq-bundle-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on metal-1: 0
-pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on metal-2: 0
-pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on metal-3: 0
-pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on rabbitmq-bundle-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-master allocation score on metal-1: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on metal-2: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on metal-3: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on rabbitmq-bundle-0: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-2: -INFINITY
-pcmk__bundle_allocate: redis:0 allocation score on redis-bundle-0: 500
-pcmk__bundle_allocate: redis:1 allocation score on redis-bundle-1: 500
-pcmk__bundle_allocate: redis:2 allocation score on redis-bundle-2: 500
+pcmk__bundle_assign: galera-bundle allocation score on metal-1: 0
+pcmk__bundle_assign: galera-bundle allocation score on metal-2: 0
+pcmk__bundle_assign: galera-bundle allocation score on metal-3: 0
+pcmk__bundle_assign: galera-bundle allocation score on rabbitmq-bundle-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-0 allocation score on metal-1: 0
+pcmk__bundle_assign: galera-bundle-0 allocation score on metal-2: 0
+pcmk__bundle_assign: galera-bundle-0 allocation score on metal-3: 0
+pcmk__bundle_assign: galera-bundle-0 allocation score on rabbitmq-bundle-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-1 allocation score on metal-1: 0
+pcmk__bundle_assign: galera-bundle-1 allocation score on metal-2: 0
+pcmk__bundle_assign: galera-bundle-1 allocation score on metal-3: 0
+pcmk__bundle_assign: galera-bundle-1 allocation score on rabbitmq-bundle-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-2 allocation score on metal-1: 0
+pcmk__bundle_assign: galera-bundle-2 allocation score on metal-2: 0
+pcmk__bundle_assign: galera-bundle-2 allocation score on metal-3: 0
+pcmk__bundle_assign: galera-bundle-2 allocation score on rabbitmq-bundle-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on metal-1: 0
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on metal-2: 0
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on metal-3: 0
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on metal-1: 0
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on metal-2: 0
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on metal-3: 0
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on rabbitmq-bundle-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on metal-1: 0
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on metal-2: 0
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on metal-3: 0
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on rabbitmq-bundle-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-2: -INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on metal-1: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on metal-2: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on metal-3: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on rabbitmq-bundle-0: 0
+pcmk__bundle_assign: galera:0 allocation score on galera-bundle-0: 500
+pcmk__bundle_assign: galera:1 allocation score on galera-bundle-1: 500
+pcmk__bundle_assign: galera:2 allocation score on galera-bundle-2: 500
+pcmk__bundle_assign: haproxy-bundle allocation score on metal-1: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on metal-2: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on metal-3: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on rabbitmq-bundle-0: 0
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on metal-1: 0
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on metal-2: 0
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on metal-3: 0
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on rabbitmq-bundle-0: 0
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on metal-1: 0
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on metal-2: 0
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on metal-3: 0
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on rabbitmq-bundle-0: 0
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on metal-1: 0
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on metal-2: 0
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on metal-3: 0
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on rabbitmq-bundle-0: 0
+pcmk__bundle_assign: redis-bundle allocation score on metal-1: 0
+pcmk__bundle_assign: redis-bundle allocation score on metal-2: 0
+pcmk__bundle_assign: redis-bundle allocation score on metal-3: 0
+pcmk__bundle_assign: redis-bundle allocation score on rabbitmq-bundle-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-0 allocation score on metal-1: 0
+pcmk__bundle_assign: redis-bundle-0 allocation score on metal-2: 0
+pcmk__bundle_assign: redis-bundle-0 allocation score on metal-3: 0
+pcmk__bundle_assign: redis-bundle-0 allocation score on rabbitmq-bundle-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-1 allocation score on metal-1: 0
+pcmk__bundle_assign: redis-bundle-1 allocation score on metal-2: 0
+pcmk__bundle_assign: redis-bundle-1 allocation score on metal-3: 0
+pcmk__bundle_assign: redis-bundle-1 allocation score on rabbitmq-bundle-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-2 allocation score on metal-1: 0
+pcmk__bundle_assign: redis-bundle-2 allocation score on metal-2: 0
+pcmk__bundle_assign: redis-bundle-2 allocation score on metal-3: 0
+pcmk__bundle_assign: redis-bundle-2 allocation score on rabbitmq-bundle-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-docker-0 allocation score on metal-1: 0
+pcmk__bundle_assign: redis-bundle-docker-0 allocation score on metal-2: 0
+pcmk__bundle_assign: redis-bundle-docker-0 allocation score on metal-3: 0
+pcmk__bundle_assign: redis-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-docker-1 allocation score on metal-1: 0
+pcmk__bundle_assign: redis-bundle-docker-1 allocation score on metal-2: 0
+pcmk__bundle_assign: redis-bundle-docker-1 allocation score on metal-3: 0
+pcmk__bundle_assign: redis-bundle-docker-1 allocation score on rabbitmq-bundle-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-docker-2 allocation score on metal-1: 0
+pcmk__bundle_assign: redis-bundle-docker-2 allocation score on metal-2: 0
+pcmk__bundle_assign: redis-bundle-docker-2 allocation score on metal-3: 0
+pcmk__bundle_assign: redis-bundle-docker-2 allocation score on rabbitmq-bundle-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-master allocation score on metal-1: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on metal-2: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on metal-3: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on rabbitmq-bundle-0: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-2: -INFINITY
+pcmk__bundle_assign: redis:0 allocation score on redis-bundle-0: 500
+pcmk__bundle_assign: redis:1 allocation score on redis-bundle-1: 500
+pcmk__bundle_assign: redis:2 allocation score on redis-bundle-2: 500
pcmk__clone_assign: galera-bundle-master allocation score on galera-bundle-0: 0
pcmk__clone_assign: galera-bundle-master allocation score on galera-bundle-1: 0
pcmk__clone_assign: galera-bundle-master allocation score on galera-bundle-2: 0
diff --git a/cts/scheduler/scores/bundle-order-startup-clone.scores b/cts/scheduler/scores/bundle-order-startup-clone.scores
index f749b33..e64246b 100644
--- a/cts/scheduler/scores/bundle-order-startup-clone.scores
+++ b/cts/scheduler/scores/bundle-order-startup-clone.scores
@@ -1,49 +1,49 @@
galera:0 promotion score on galera-bundle-0: -1
-pcmk__bundle_allocate: galera-bundle allocation score on metal-1: 0
-pcmk__bundle_allocate: galera-bundle allocation score on metal-2: 0
-pcmk__bundle_allocate: galera-bundle allocation score on metal-3: 0
-pcmk__bundle_allocate: galera-bundle allocation score on rabbitmq-bundle-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-0 allocation score on metal-1: 0
-pcmk__bundle_allocate: galera-bundle-0 allocation score on metal-2: 0
-pcmk__bundle_allocate: galera-bundle-0 allocation score on metal-3: 0
-pcmk__bundle_allocate: galera-bundle-0 allocation score on rabbitmq-bundle-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on metal-1: 0
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on metal-2: 0
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on metal-3: 0
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on metal-1: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on metal-2: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on metal-3: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on rabbitmq-bundle-0: 0
-pcmk__bundle_allocate: galera:0 allocation score on galera-bundle-0: 500
-pcmk__bundle_allocate: haproxy-bundle allocation score on metal-1: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on metal-2: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on metal-3: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on rabbitmq-bundle-0: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on metal-1: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on metal-2: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on metal-3: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on rabbitmq-bundle-0: 0
-pcmk__bundle_allocate: redis-bundle allocation score on metal-1: 0
-pcmk__bundle_allocate: redis-bundle allocation score on metal-2: 0
-pcmk__bundle_allocate: redis-bundle allocation score on metal-3: 0
-pcmk__bundle_allocate: redis-bundle allocation score on rabbitmq-bundle-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-0 allocation score on metal-1: 0
-pcmk__bundle_allocate: redis-bundle-0 allocation score on metal-2: 0
-pcmk__bundle_allocate: redis-bundle-0 allocation score on metal-3: 0
-pcmk__bundle_allocate: redis-bundle-0 allocation score on rabbitmq-bundle-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on metal-1: 0
-pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on metal-2: 0
-pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on metal-3: 0
-pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-master allocation score on metal-1: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on metal-2: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on metal-3: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on rabbitmq-bundle-0: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-0: -INFINITY
-pcmk__bundle_allocate: redis:0 allocation score on redis-bundle-0: 500
+pcmk__bundle_assign: galera-bundle allocation score on metal-1: 0
+pcmk__bundle_assign: galera-bundle allocation score on metal-2: 0
+pcmk__bundle_assign: galera-bundle allocation score on metal-3: 0
+pcmk__bundle_assign: galera-bundle allocation score on rabbitmq-bundle-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-0 allocation score on metal-1: 0
+pcmk__bundle_assign: galera-bundle-0 allocation score on metal-2: 0
+pcmk__bundle_assign: galera-bundle-0 allocation score on metal-3: 0
+pcmk__bundle_assign: galera-bundle-0 allocation score on rabbitmq-bundle-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on metal-1: 0
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on metal-2: 0
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on metal-3: 0
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on metal-1: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on metal-2: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on metal-3: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on rabbitmq-bundle-0: 0
+pcmk__bundle_assign: galera:0 allocation score on galera-bundle-0: 500
+pcmk__bundle_assign: haproxy-bundle allocation score on metal-1: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on metal-2: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on metal-3: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on rabbitmq-bundle-0: 0
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on metal-1: 0
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on metal-2: 0
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on metal-3: 0
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on rabbitmq-bundle-0: 0
+pcmk__bundle_assign: redis-bundle allocation score on metal-1: 0
+pcmk__bundle_assign: redis-bundle allocation score on metal-2: 0
+pcmk__bundle_assign: redis-bundle allocation score on metal-3: 0
+pcmk__bundle_assign: redis-bundle allocation score on rabbitmq-bundle-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-0 allocation score on metal-1: 0
+pcmk__bundle_assign: redis-bundle-0 allocation score on metal-2: 0
+pcmk__bundle_assign: redis-bundle-0 allocation score on metal-3: 0
+pcmk__bundle_assign: redis-bundle-0 allocation score on rabbitmq-bundle-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-docker-0 allocation score on metal-1: 0
+pcmk__bundle_assign: redis-bundle-docker-0 allocation score on metal-2: 0
+pcmk__bundle_assign: redis-bundle-docker-0 allocation score on metal-3: 0
+pcmk__bundle_assign: redis-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-master allocation score on metal-1: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on metal-2: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on metal-3: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on rabbitmq-bundle-0: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-0: -INFINITY
+pcmk__bundle_assign: redis:0 allocation score on redis-bundle-0: 500
pcmk__clone_assign: galera-bundle-master allocation score on galera-bundle-0: 0
pcmk__clone_assign: galera-bundle-master allocation score on metal-1: -INFINITY
pcmk__clone_assign: galera-bundle-master allocation score on metal-2: -INFINITY
diff --git a/cts/scheduler/scores/bundle-order-startup.scores b/cts/scheduler/scores/bundle-order-startup.scores
index 9b32784..92fce09 100644
--- a/cts/scheduler/scores/bundle-order-startup.scores
+++ b/cts/scheduler/scores/bundle-order-startup.scores
@@ -1,30 +1,30 @@
galera:0 promotion score on galera-bundle-0: -1
-pcmk__bundle_allocate: galera-bundle allocation score on undercloud: 0
-pcmk__bundle_allocate: galera-bundle-0 allocation score on undercloud: 0
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on undercloud: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on undercloud: 0
-pcmk__bundle_allocate: galera:0 allocation score on galera-bundle-0: 500
-pcmk__bundle_allocate: haproxy-bundle allocation score on undercloud: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on undercloud: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on undercloud: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on undercloud: 0
-pcmk__bundle_allocate: openstack-cinder-volume allocation score on undercloud: 0
-pcmk__bundle_allocate: openstack-cinder-volume-docker-0 allocation score on undercloud: 0
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on undercloud: 0
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on undercloud: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on undercloud: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on undercloud: 0
-pcmk__bundle_allocate: rabbitmq:0 allocation score on rabbitmq-bundle-0: 500
-pcmk__bundle_allocate: redis-bundle allocation score on undercloud: 0
-pcmk__bundle_allocate: redis-bundle-0 allocation score on undercloud: 0
-pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on undercloud: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-master allocation score on undercloud: 0
-pcmk__bundle_allocate: redis:0 allocation score on redis-bundle-0: 500
+pcmk__bundle_assign: galera-bundle allocation score on undercloud: 0
+pcmk__bundle_assign: galera-bundle-0 allocation score on undercloud: 0
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on undercloud: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on undercloud: 0
+pcmk__bundle_assign: galera:0 allocation score on galera-bundle-0: 500
+pcmk__bundle_assign: haproxy-bundle allocation score on undercloud: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on undercloud: 0
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on undercloud: 0
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on undercloud: 0
+pcmk__bundle_assign: openstack-cinder-volume allocation score on undercloud: 0
+pcmk__bundle_assign: openstack-cinder-volume-docker-0 allocation score on undercloud: 0
+pcmk__bundle_assign: rabbitmq-bundle allocation score on undercloud: 0
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on undercloud: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on undercloud: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on undercloud: 0
+pcmk__bundle_assign: rabbitmq:0 allocation score on rabbitmq-bundle-0: 500
+pcmk__bundle_assign: redis-bundle allocation score on undercloud: 0
+pcmk__bundle_assign: redis-bundle-0 allocation score on undercloud: 0
+pcmk__bundle_assign: redis-bundle-docker-0 allocation score on undercloud: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-master allocation score on undercloud: 0
+pcmk__bundle_assign: redis:0 allocation score on redis-bundle-0: 500
pcmk__clone_assign: galera-bundle-master allocation score on galera-bundle-0: 0
pcmk__clone_assign: galera-bundle-master allocation score on undercloud: -INFINITY
pcmk__clone_assign: galera:0 allocation score on galera-bundle-0: INFINITY
diff --git a/cts/scheduler/scores/bundle-order-stop-clone.scores b/cts/scheduler/scores/bundle-order-stop-clone.scores
index 707260b..59419eb 100644
--- a/cts/scheduler/scores/bundle-order-stop-clone.scores
+++ b/cts/scheduler/scores/bundle-order-stop-clone.scores
@@ -2,98 +2,98 @@
galera:0 promotion score on galera-bundle-0: -1
galera:1 promotion score on galera-bundle-1: -1
galera:2 promotion score on galera-bundle-2: -1
-pcmk__bundle_allocate: galera-bundle allocation score on metal-1: 0
-pcmk__bundle_allocate: galera-bundle allocation score on metal-2: 0
-pcmk__bundle_allocate: galera-bundle allocation score on metal-3: 0
-pcmk__bundle_allocate: galera-bundle allocation score on rabbitmq-bundle-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-0 allocation score on metal-1: INFINITY
-pcmk__bundle_allocate: galera-bundle-0 allocation score on metal-2: 0
-pcmk__bundle_allocate: galera-bundle-0 allocation score on metal-3: 0
-pcmk__bundle_allocate: galera-bundle-0 allocation score on rabbitmq-bundle-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-1 allocation score on metal-1: 0
-pcmk__bundle_allocate: galera-bundle-1 allocation score on metal-2: INFINITY
-pcmk__bundle_allocate: galera-bundle-1 allocation score on metal-3: 0
-pcmk__bundle_allocate: galera-bundle-1 allocation score on rabbitmq-bundle-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-2 allocation score on metal-1: 0
-pcmk__bundle_allocate: galera-bundle-2 allocation score on metal-2: 0
-pcmk__bundle_allocate: galera-bundle-2 allocation score on metal-3: INFINITY
-pcmk__bundle_allocate: galera-bundle-2 allocation score on rabbitmq-bundle-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on metal-1: INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on metal-2: 0
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on metal-3: 0
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on metal-1: 0
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on metal-2: INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on metal-3: 0
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on rabbitmq-bundle-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on metal-1: 0
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on metal-2: 0
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on metal-3: INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on rabbitmq-bundle-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-2: -INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on metal-1: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on metal-2: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on metal-3: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on rabbitmq-bundle-0: 0
-pcmk__bundle_allocate: galera:0 allocation score on galera-bundle-0: INFINITY
-pcmk__bundle_allocate: galera:1 allocation score on galera-bundle-1: INFINITY
-pcmk__bundle_allocate: galera:2 allocation score on galera-bundle-2: INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on metal-1: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on metal-2: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on metal-3: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on rabbitmq-bundle-0: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on metal-1: INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on metal-2: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on metal-3: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on rabbitmq-bundle-0: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on metal-1: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on metal-2: INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on metal-3: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on rabbitmq-bundle-0: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on metal-1: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on metal-2: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on metal-3: INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on rabbitmq-bundle-0: 0
-pcmk__bundle_allocate: redis-bundle allocation score on metal-1: 0
-pcmk__bundle_allocate: redis-bundle allocation score on metal-2: 0
-pcmk__bundle_allocate: redis-bundle allocation score on metal-3: 0
-pcmk__bundle_allocate: redis-bundle allocation score on rabbitmq-bundle-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-0 allocation score on metal-1: INFINITY
-pcmk__bundle_allocate: redis-bundle-0 allocation score on metal-2: 0
-pcmk__bundle_allocate: redis-bundle-0 allocation score on metal-3: 0
-pcmk__bundle_allocate: redis-bundle-0 allocation score on rabbitmq-bundle-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-1 allocation score on metal-1: 0
-pcmk__bundle_allocate: redis-bundle-1 allocation score on metal-2: INFINITY
-pcmk__bundle_allocate: redis-bundle-1 allocation score on metal-3: 0
-pcmk__bundle_allocate: redis-bundle-1 allocation score on rabbitmq-bundle-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-2 allocation score on metal-1: 0
-pcmk__bundle_allocate: redis-bundle-2 allocation score on metal-2: 0
-pcmk__bundle_allocate: redis-bundle-2 allocation score on metal-3: INFINITY
-pcmk__bundle_allocate: redis-bundle-2 allocation score on rabbitmq-bundle-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on metal-1: INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on metal-2: 0
-pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on metal-3: 0
-pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on metal-1: 0
-pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on metal-2: INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on metal-3: 0
-pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on rabbitmq-bundle-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on metal-1: 0
-pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on metal-2: 0
-pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on metal-3: INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on rabbitmq-bundle-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-master allocation score on metal-1: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on metal-2: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on metal-3: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on rabbitmq-bundle-0: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-2: -INFINITY
-pcmk__bundle_allocate: redis:0 allocation score on redis-bundle-0: INFINITY
-pcmk__bundle_allocate: redis:1 allocation score on redis-bundle-1: INFINITY
-pcmk__bundle_allocate: redis:2 allocation score on redis-bundle-2: INFINITY
+pcmk__bundle_assign: galera-bundle allocation score on metal-1: 0
+pcmk__bundle_assign: galera-bundle allocation score on metal-2: 0
+pcmk__bundle_assign: galera-bundle allocation score on metal-3: 0
+pcmk__bundle_assign: galera-bundle allocation score on rabbitmq-bundle-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-0 allocation score on metal-1: INFINITY
+pcmk__bundle_assign: galera-bundle-0 allocation score on metal-2: 0
+pcmk__bundle_assign: galera-bundle-0 allocation score on metal-3: 0
+pcmk__bundle_assign: galera-bundle-0 allocation score on rabbitmq-bundle-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-1 allocation score on metal-1: 0
+pcmk__bundle_assign: galera-bundle-1 allocation score on metal-2: INFINITY
+pcmk__bundle_assign: galera-bundle-1 allocation score on metal-3: 0
+pcmk__bundle_assign: galera-bundle-1 allocation score on rabbitmq-bundle-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-2 allocation score on metal-1: 0
+pcmk__bundle_assign: galera-bundle-2 allocation score on metal-2: 0
+pcmk__bundle_assign: galera-bundle-2 allocation score on metal-3: INFINITY
+pcmk__bundle_assign: galera-bundle-2 allocation score on rabbitmq-bundle-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on metal-1: INFINITY
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on metal-2: 0
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on metal-3: 0
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on metal-1: 0
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on metal-2: INFINITY
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on metal-3: 0
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on rabbitmq-bundle-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on metal-1: 0
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on metal-2: 0
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on metal-3: INFINITY
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on rabbitmq-bundle-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-2: -INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on metal-1: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on metal-2: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on metal-3: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on rabbitmq-bundle-0: 0
+pcmk__bundle_assign: galera:0 allocation score on galera-bundle-0: INFINITY
+pcmk__bundle_assign: galera:1 allocation score on galera-bundle-1: INFINITY
+pcmk__bundle_assign: galera:2 allocation score on galera-bundle-2: INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on metal-1: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on metal-2: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on metal-3: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on rabbitmq-bundle-0: 0
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on metal-1: INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on metal-2: 0
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on metal-3: 0
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on rabbitmq-bundle-0: 0
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on metal-1: 0
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on metal-2: INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on metal-3: 0
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on rabbitmq-bundle-0: 0
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on metal-1: 0
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on metal-2: 0
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on metal-3: INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on rabbitmq-bundle-0: 0
+pcmk__bundle_assign: redis-bundle allocation score on metal-1: 0
+pcmk__bundle_assign: redis-bundle allocation score on metal-2: 0
+pcmk__bundle_assign: redis-bundle allocation score on metal-3: 0
+pcmk__bundle_assign: redis-bundle allocation score on rabbitmq-bundle-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-0 allocation score on metal-1: INFINITY
+pcmk__bundle_assign: redis-bundle-0 allocation score on metal-2: 0
+pcmk__bundle_assign: redis-bundle-0 allocation score on metal-3: 0
+pcmk__bundle_assign: redis-bundle-0 allocation score on rabbitmq-bundle-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-1 allocation score on metal-1: 0
+pcmk__bundle_assign: redis-bundle-1 allocation score on metal-2: INFINITY
+pcmk__bundle_assign: redis-bundle-1 allocation score on metal-3: 0
+pcmk__bundle_assign: redis-bundle-1 allocation score on rabbitmq-bundle-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-2 allocation score on metal-1: 0
+pcmk__bundle_assign: redis-bundle-2 allocation score on metal-2: 0
+pcmk__bundle_assign: redis-bundle-2 allocation score on metal-3: INFINITY
+pcmk__bundle_assign: redis-bundle-2 allocation score on rabbitmq-bundle-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-docker-0 allocation score on metal-1: INFINITY
+pcmk__bundle_assign: redis-bundle-docker-0 allocation score on metal-2: 0
+pcmk__bundle_assign: redis-bundle-docker-0 allocation score on metal-3: 0
+pcmk__bundle_assign: redis-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-docker-1 allocation score on metal-1: 0
+pcmk__bundle_assign: redis-bundle-docker-1 allocation score on metal-2: INFINITY
+pcmk__bundle_assign: redis-bundle-docker-1 allocation score on metal-3: 0
+pcmk__bundle_assign: redis-bundle-docker-1 allocation score on rabbitmq-bundle-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-docker-2 allocation score on metal-1: 0
+pcmk__bundle_assign: redis-bundle-docker-2 allocation score on metal-2: 0
+pcmk__bundle_assign: redis-bundle-docker-2 allocation score on metal-3: INFINITY
+pcmk__bundle_assign: redis-bundle-docker-2 allocation score on rabbitmq-bundle-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-master allocation score on metal-1: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on metal-2: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on metal-3: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on rabbitmq-bundle-0: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-2: -INFINITY
+pcmk__bundle_assign: redis:0 allocation score on redis-bundle-0: INFINITY
+pcmk__bundle_assign: redis:1 allocation score on redis-bundle-1: INFINITY
+pcmk__bundle_assign: redis:2 allocation score on redis-bundle-2: INFINITY
pcmk__clone_assign: galera-bundle-master allocation score on galera-bundle-0: 0
pcmk__clone_assign: galera-bundle-master allocation score on galera-bundle-1: 0
pcmk__clone_assign: galera-bundle-master allocation score on galera-bundle-2: 0
@@ -147,8 +147,12 @@ pcmk__primitive_assign: galera-bundle-2 allocation score on metal-2: 0
pcmk__primitive_assign: galera-bundle-2 allocation score on metal-3: INFINITY
pcmk__primitive_assign: galera-bundle-2 allocation score on rabbitmq-bundle-0: -INFINITY
pcmk__primitive_assign: galera-bundle-docker-0 allocation score on metal-1: -INFINITY
+pcmk__primitive_assign: galera-bundle-docker-0 allocation score on metal-1: -INFINITY
+pcmk__primitive_assign: galera-bundle-docker-0 allocation score on metal-2: -INFINITY
pcmk__primitive_assign: galera-bundle-docker-0 allocation score on metal-2: -INFINITY
pcmk__primitive_assign: galera-bundle-docker-0 allocation score on metal-3: -INFINITY
+pcmk__primitive_assign: galera-bundle-docker-0 allocation score on metal-3: -INFINITY
+pcmk__primitive_assign: galera-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY
pcmk__primitive_assign: galera-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY
pcmk__primitive_assign: galera-bundle-docker-1 allocation score on metal-1: -INFINITY
pcmk__primitive_assign: galera-bundle-docker-1 allocation score on metal-2: INFINITY
diff --git a/cts/scheduler/scores/bundle-order-stop-on-remote.scores b/cts/scheduler/scores/bundle-order-stop-on-remote.scores
index 4f592d1..7d92b2c 100644
--- a/cts/scheduler/scores/bundle-order-stop-on-remote.scores
+++ b/cts/scheduler/scores/bundle-order-stop-on-remote.scores
@@ -2,312 +2,312 @@
galera:0 promotion score on galera-bundle-0: 100
galera:1 promotion score on galera-bundle-1: 100
galera:2 promotion score on galera-bundle-2: 100
-pcmk__bundle_allocate: galera-bundle allocation score on controller-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle allocation score on controller-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle allocation score on controller-2: -INFINITY
-pcmk__bundle_allocate: galera-bundle allocation score on database-0: 0
-pcmk__bundle_allocate: galera-bundle allocation score on database-1: 0
-pcmk__bundle_allocate: galera-bundle allocation score on database-2: 0
-pcmk__bundle_allocate: galera-bundle allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-0: 0
-pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: galera-bundle-0 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-0 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-0 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: galera-bundle-0 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-0 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-0 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-1: 0
-pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-2: INFINITY
-pcmk__bundle_allocate: galera-bundle-1 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-1 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-1 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: galera-bundle-1 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-1 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-1 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-2: 0
-pcmk__bundle_allocate: galera-bundle-2 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-2 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-2 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: galera-bundle-2 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-2 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-2 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on controller-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on controller-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on controller-2: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on database-0: INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on database-1: 0
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on database-2: 0
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on controller-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on controller-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on controller-2: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on database-0: 0
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on database-1: INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on database-2: 0
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on controller-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on controller-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on controller-2: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on database-0: 0
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on database-1: 0
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on database-2: INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on controller-0: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on controller-1: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on controller-2: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on database-0: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on database-1: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on database-2: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-2: -INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on messaging-0: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on messaging-1: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on messaging-2: 0
-pcmk__bundle_allocate: galera:0 allocation score on galera-bundle-0: INFINITY
-pcmk__bundle_allocate: galera:1 allocation score on galera-bundle-1: INFINITY
-pcmk__bundle_allocate: galera:2 allocation score on galera-bundle-2: INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on controller-0: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on controller-0: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on controller-1: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on controller-1: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on controller-2: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on controller-2: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-0: INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-0: INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-1: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-1: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-2: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-2: INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-2: INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on messaging-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on messaging-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on messaging-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-2: INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-2: INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-2: INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on database-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on database-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on database-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on messaging-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on messaging-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on messaging-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on controller-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on controller-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on controller-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on messaging-0: INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on messaging-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on messaging-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on controller-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on controller-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on controller-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on messaging-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on messaging-1: INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on messaging-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on controller-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on controller-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on controller-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on messaging-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on messaging-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on messaging-2: INFINITY
-pcmk__bundle_allocate: rabbitmq:0 allocation score on rabbitmq-bundle-0: INFINITY
-pcmk__bundle_allocate: rabbitmq:1 allocation score on rabbitmq-bundle-1: INFINITY
-pcmk__bundle_allocate: rabbitmq:2 allocation score on rabbitmq-bundle-2: INFINITY
-pcmk__bundle_allocate: redis-bundle allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle allocation score on controller-2: 0
-pcmk__bundle_allocate: redis-bundle allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: redis-bundle allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-0: INFINITY
-pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: redis-bundle-0 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-0 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-0 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: redis-bundle-0 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-0 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-0 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-2: 0
-pcmk__bundle_allocate: redis-bundle-1 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-1 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-1 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: redis-bundle-1 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-1 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-1 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-2: INFINITY
-pcmk__bundle_allocate: redis-bundle-2 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-2 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-2 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: redis-bundle-2 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-2 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-2 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on controller-0: INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on controller-2: 0
-pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on controller-2: INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: redis-bundle-master allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on controller-2: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on database-0: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on database-1: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on database-2: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on messaging-0: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on messaging-1: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on messaging-2: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-2: -INFINITY
-pcmk__bundle_allocate: redis:0 allocation score on redis-bundle-0: INFINITY
-pcmk__bundle_allocate: redis:1 allocation score on redis-bundle-1: 500
-pcmk__bundle_allocate: redis:2 allocation score on redis-bundle-2: INFINITY
+pcmk__bundle_assign: galera-bundle allocation score on controller-0: -INFINITY
+pcmk__bundle_assign: galera-bundle allocation score on controller-1: -INFINITY
+pcmk__bundle_assign: galera-bundle allocation score on controller-2: -INFINITY
+pcmk__bundle_assign: galera-bundle allocation score on database-0: 0
+pcmk__bundle_assign: galera-bundle allocation score on database-1: 0
+pcmk__bundle_assign: galera-bundle allocation score on database-2: 0
+pcmk__bundle_assign: galera-bundle allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: galera-bundle allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: galera-bundle allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: galera-bundle-0 allocation score on controller-0: 0
+pcmk__bundle_assign: galera-bundle-0 allocation score on controller-1: 0
+pcmk__bundle_assign: galera-bundle-0 allocation score on controller-2: 0
+pcmk__bundle_assign: galera-bundle-0 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-0 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-0 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: galera-bundle-0 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-0 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-0 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: galera-bundle-1 allocation score on controller-0: 0
+pcmk__bundle_assign: galera-bundle-1 allocation score on controller-1: 0
+pcmk__bundle_assign: galera-bundle-1 allocation score on controller-2: INFINITY
+pcmk__bundle_assign: galera-bundle-1 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-1 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-1 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: galera-bundle-1 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-1 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-1 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: galera-bundle-2 allocation score on controller-0: 0
+pcmk__bundle_assign: galera-bundle-2 allocation score on controller-1: 0
+pcmk__bundle_assign: galera-bundle-2 allocation score on controller-2: 0
+pcmk__bundle_assign: galera-bundle-2 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-2 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-2 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: galera-bundle-2 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-2 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-2 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on controller-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on controller-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on controller-2: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on database-0: INFINITY
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on database-1: 0
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on database-2: 0
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on controller-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on controller-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on controller-2: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on database-0: 0
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on database-1: INFINITY
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on database-2: 0
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on controller-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on controller-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on controller-2: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on database-0: 0
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on database-1: 0
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on database-2: INFINITY
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on controller-0: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on controller-1: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on controller-2: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on database-0: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on database-1: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on database-2: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-2: -INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on messaging-0: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on messaging-1: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on messaging-2: 0
+pcmk__bundle_assign: galera:0 allocation score on galera-bundle-0: INFINITY
+pcmk__bundle_assign: galera:1 allocation score on galera-bundle-1: INFINITY
+pcmk__bundle_assign: galera:2 allocation score on galera-bundle-2: INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on controller-0: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on controller-0: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on controller-1: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on controller-1: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on controller-2: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on controller-2: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on database-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on database-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on database-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on database-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on database-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on database-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-0: INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-0: INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-1: 0
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-1: 0
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-2: 0
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-0: 0
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-1: 0
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-1: 0
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-2: 0
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-0: 0
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-0: 0
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-1: 0
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-1: 0
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-2: INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-2: INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle allocation score on controller-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle allocation score on controller-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle allocation score on controller-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle allocation score on database-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle allocation score on database-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle allocation score on database-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle allocation score on messaging-0: 0
+pcmk__bundle_assign: rabbitmq-bundle allocation score on messaging-1: 0
+pcmk__bundle_assign: rabbitmq-bundle allocation score on messaging-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on controller-2: INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on controller-2: INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on controller-2: INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on database-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on database-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on database-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on messaging-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on messaging-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on messaging-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on controller-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on controller-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on controller-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on messaging-0: INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on messaging-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on messaging-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on controller-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on controller-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on controller-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on messaging-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on messaging-1: INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on messaging-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on controller-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on controller-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on controller-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on messaging-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on messaging-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on messaging-2: INFINITY
+pcmk__bundle_assign: rabbitmq:0 allocation score on rabbitmq-bundle-0: INFINITY
+pcmk__bundle_assign: rabbitmq:1 allocation score on rabbitmq-bundle-1: INFINITY
+pcmk__bundle_assign: rabbitmq:2 allocation score on rabbitmq-bundle-2: INFINITY
+pcmk__bundle_assign: redis-bundle allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle allocation score on controller-2: 0
+pcmk__bundle_assign: redis-bundle allocation score on database-0: -INFINITY
+pcmk__bundle_assign: redis-bundle allocation score on database-1: -INFINITY
+pcmk__bundle_assign: redis-bundle allocation score on database-2: -INFINITY
+pcmk__bundle_assign: redis-bundle allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: redis-bundle allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: redis-bundle allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: redis-bundle-0 allocation score on controller-0: INFINITY
+pcmk__bundle_assign: redis-bundle-0 allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle-0 allocation score on controller-2: 0
+pcmk__bundle_assign: redis-bundle-0 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-0 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-0 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: redis-bundle-0 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-0 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-0 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: redis-bundle-1 allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle-1 allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle-1 allocation score on controller-2: 0
+pcmk__bundle_assign: redis-bundle-1 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-1 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-1 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: redis-bundle-1 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-1 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-1 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: redis-bundle-2 allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle-2 allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle-2 allocation score on controller-2: INFINITY
+pcmk__bundle_assign: redis-bundle-2 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-2 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-2 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: redis-bundle-2 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-2 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-2 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: redis-bundle-docker-0 allocation score on controller-0: INFINITY
+pcmk__bundle_assign: redis-bundle-docker-0 allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle-docker-0 allocation score on controller-2: 0
+pcmk__bundle_assign: redis-bundle-docker-0 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-docker-0 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-docker-0 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: redis-bundle-docker-0 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-docker-0 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-docker-0 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: redis-bundle-docker-1 allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle-docker-1 allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle-docker-1 allocation score on controller-2: 0
+pcmk__bundle_assign: redis-bundle-docker-1 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-docker-1 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-docker-1 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: redis-bundle-docker-1 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-docker-1 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-docker-1 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: redis-bundle-docker-2 allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle-docker-2 allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle-docker-2 allocation score on controller-2: INFINITY
+pcmk__bundle_assign: redis-bundle-docker-2 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-docker-2 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-docker-2 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: redis-bundle-docker-2 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-docker-2 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-docker-2 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: redis-bundle-master allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on controller-2: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on database-0: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on database-1: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on database-2: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on messaging-0: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on messaging-1: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on messaging-2: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-2: -INFINITY
+pcmk__bundle_assign: redis:0 allocation score on redis-bundle-0: INFINITY
+pcmk__bundle_assign: redis:1 allocation score on redis-bundle-1: 500
+pcmk__bundle_assign: redis:2 allocation score on redis-bundle-2: INFINITY
pcmk__clone_assign: galera-bundle-master allocation score on controller-0: -INFINITY
pcmk__clone_assign: galera-bundle-master allocation score on controller-1: -INFINITY
pcmk__clone_assign: galera-bundle-master allocation score on controller-2: -INFINITY
diff --git a/cts/scheduler/scores/bundle-order-stop.scores b/cts/scheduler/scores/bundle-order-stop.scores
index e00df39..2bb6cb6 100644
--- a/cts/scheduler/scores/bundle-order-stop.scores
+++ b/cts/scheduler/scores/bundle-order-stop.scores
@@ -1,30 +1,30 @@
galera:0 promotion score on galera-bundle-0: 100
-pcmk__bundle_allocate: galera-bundle allocation score on undercloud: 0
-pcmk__bundle_allocate: galera-bundle-0 allocation score on undercloud: INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on undercloud: INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on undercloud: 0
-pcmk__bundle_allocate: galera:0 allocation score on galera-bundle-0: INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on undercloud: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on undercloud: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on undercloud: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on undercloud: INFINITY
-pcmk__bundle_allocate: openstack-cinder-volume allocation score on undercloud: 0
-pcmk__bundle_allocate: openstack-cinder-volume-docker-0 allocation score on undercloud: INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on undercloud: 0
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on undercloud: INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on undercloud: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on undercloud: INFINITY
-pcmk__bundle_allocate: rabbitmq:0 allocation score on rabbitmq-bundle-0: INFINITY
-pcmk__bundle_allocate: redis-bundle allocation score on undercloud: 0
-pcmk__bundle_allocate: redis-bundle-0 allocation score on undercloud: INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on undercloud: INFINITY
-pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-master allocation score on undercloud: 0
-pcmk__bundle_allocate: redis:0 allocation score on redis-bundle-0: INFINITY
+pcmk__bundle_assign: galera-bundle allocation score on undercloud: 0
+pcmk__bundle_assign: galera-bundle-0 allocation score on undercloud: INFINITY
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on undercloud: INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on undercloud: 0
+pcmk__bundle_assign: galera:0 allocation score on galera-bundle-0: INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on undercloud: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on undercloud: 0
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on undercloud: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on undercloud: INFINITY
+pcmk__bundle_assign: openstack-cinder-volume allocation score on undercloud: 0
+pcmk__bundle_assign: openstack-cinder-volume-docker-0 allocation score on undercloud: INFINITY
+pcmk__bundle_assign: rabbitmq-bundle allocation score on undercloud: 0
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on undercloud: INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on undercloud: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on undercloud: INFINITY
+pcmk__bundle_assign: rabbitmq:0 allocation score on rabbitmq-bundle-0: INFINITY
+pcmk__bundle_assign: redis-bundle allocation score on undercloud: 0
+pcmk__bundle_assign: redis-bundle-0 allocation score on undercloud: INFINITY
+pcmk__bundle_assign: redis-bundle-docker-0 allocation score on undercloud: INFINITY
+pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-master allocation score on undercloud: 0
+pcmk__bundle_assign: redis:0 allocation score on redis-bundle-0: INFINITY
pcmk__clone_assign: galera-bundle-master allocation score on galera-bundle-0: 0
pcmk__clone_assign: galera-bundle-master allocation score on undercloud: -INFINITY
pcmk__clone_assign: galera:0 allocation score on galera-bundle-0: INFINITY
diff --git a/cts/scheduler/scores/bundle-probe-order-1.scores b/cts/scheduler/scores/bundle-probe-order-1.scores
index 0716be6..edaaaa3 100644
--- a/cts/scheduler/scores/bundle-probe-order-1.scores
+++ b/cts/scheduler/scores/bundle-probe-order-1.scores
@@ -2,36 +2,36 @@
galera:0 promotion score on none: 0
galera:1 promotion score on none: 0
galera:2 promotion score on none: 0
-pcmk__bundle_allocate: galera-bundle allocation score on centos1: -INFINITY
-pcmk__bundle_allocate: galera-bundle allocation score on centos2: 0
-pcmk__bundle_allocate: galera-bundle allocation score on centos3: -INFINITY
-pcmk__bundle_allocate: galera-bundle-0 allocation score on centos1: 0
-pcmk__bundle_allocate: galera-bundle-0 allocation score on centos2: 0
-pcmk__bundle_allocate: galera-bundle-0 allocation score on centos3: 0
-pcmk__bundle_allocate: galera-bundle-1 allocation score on centos1: 0
-pcmk__bundle_allocate: galera-bundle-1 allocation score on centos2: 0
-pcmk__bundle_allocate: galera-bundle-1 allocation score on centos3: 0
-pcmk__bundle_allocate: galera-bundle-2 allocation score on centos1: 0
-pcmk__bundle_allocate: galera-bundle-2 allocation score on centos2: 0
-pcmk__bundle_allocate: galera-bundle-2 allocation score on centos3: 0
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on centos1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on centos2: 0
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on centos3: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on centos1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on centos2: 0
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on centos3: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on centos1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on centos2: 0
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on centos3: -INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on centos1: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on centos2: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on centos3: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-2: -INFINITY
-pcmk__bundle_allocate: galera:0 allocation score on galera-bundle-0: 500
-pcmk__bundle_allocate: galera:1 allocation score on galera-bundle-1: 500
-pcmk__bundle_allocate: galera:2 allocation score on galera-bundle-2: 500
+pcmk__bundle_assign: galera-bundle allocation score on centos1: -INFINITY
+pcmk__bundle_assign: galera-bundle allocation score on centos2: 0
+pcmk__bundle_assign: galera-bundle allocation score on centos3: -INFINITY
+pcmk__bundle_assign: galera-bundle-0 allocation score on centos1: 0
+pcmk__bundle_assign: galera-bundle-0 allocation score on centos2: 0
+pcmk__bundle_assign: galera-bundle-0 allocation score on centos3: 0
+pcmk__bundle_assign: galera-bundle-1 allocation score on centos1: 0
+pcmk__bundle_assign: galera-bundle-1 allocation score on centos2: 0
+pcmk__bundle_assign: galera-bundle-1 allocation score on centos3: 0
+pcmk__bundle_assign: galera-bundle-2 allocation score on centos1: 0
+pcmk__bundle_assign: galera-bundle-2 allocation score on centos2: 0
+pcmk__bundle_assign: galera-bundle-2 allocation score on centos3: 0
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on centos1: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on centos2: 0
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on centos3: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on centos1: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on centos2: 0
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on centos3: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on centos1: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on centos2: 0
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on centos3: -INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on centos1: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on centos2: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on centos3: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-2: -INFINITY
+pcmk__bundle_assign: galera:0 allocation score on galera-bundle-0: 500
+pcmk__bundle_assign: galera:1 allocation score on galera-bundle-1: 500
+pcmk__bundle_assign: galera:2 allocation score on galera-bundle-2: 500
pcmk__clone_assign: galera-bundle-master allocation score on centos1: -INFINITY
pcmk__clone_assign: galera-bundle-master allocation score on centos2: -INFINITY
pcmk__clone_assign: galera-bundle-master allocation score on centos3: -INFINITY
diff --git a/cts/scheduler/scores/bundle-probe-order-2.scores b/cts/scheduler/scores/bundle-probe-order-2.scores
index ed8f93f..2de3bbc 100644
--- a/cts/scheduler/scores/bundle-probe-order-2.scores
+++ b/cts/scheduler/scores/bundle-probe-order-2.scores
@@ -2,36 +2,36 @@
galera:0 promotion score on none: 0
galera:1 promotion score on none: 0
galera:2 promotion score on none: 0
-pcmk__bundle_allocate: galera-bundle allocation score on centos1: -INFINITY
-pcmk__bundle_allocate: galera-bundle allocation score on centos2: 0
-pcmk__bundle_allocate: galera-bundle allocation score on centos3: -INFINITY
-pcmk__bundle_allocate: galera-bundle-0 allocation score on centos1: 0
-pcmk__bundle_allocate: galera-bundle-0 allocation score on centos2: INFINITY
-pcmk__bundle_allocate: galera-bundle-0 allocation score on centos3: 0
-pcmk__bundle_allocate: galera-bundle-1 allocation score on centos1: 0
-pcmk__bundle_allocate: galera-bundle-1 allocation score on centos2: 0
-pcmk__bundle_allocate: galera-bundle-1 allocation score on centos3: 0
-pcmk__bundle_allocate: galera-bundle-2 allocation score on centos1: 0
-pcmk__bundle_allocate: galera-bundle-2 allocation score on centos2: 0
-pcmk__bundle_allocate: galera-bundle-2 allocation score on centos3: 0
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on centos1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on centos2: INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on centos3: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on centos1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on centos2: 0
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on centos3: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on centos1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on centos2: 0
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on centos3: -INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on centos1: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on centos2: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on centos3: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-2: -INFINITY
-pcmk__bundle_allocate: galera:0 allocation score on galera-bundle-0: 500
-pcmk__bundle_allocate: galera:1 allocation score on galera-bundle-1: 500
-pcmk__bundle_allocate: galera:2 allocation score on galera-bundle-2: 500
+pcmk__bundle_assign: galera-bundle allocation score on centos1: -INFINITY
+pcmk__bundle_assign: galera-bundle allocation score on centos2: 0
+pcmk__bundle_assign: galera-bundle allocation score on centos3: -INFINITY
+pcmk__bundle_assign: galera-bundle-0 allocation score on centos1: 0
+pcmk__bundle_assign: galera-bundle-0 allocation score on centos2: INFINITY
+pcmk__bundle_assign: galera-bundle-0 allocation score on centos3: 0
+pcmk__bundle_assign: galera-bundle-1 allocation score on centos1: 0
+pcmk__bundle_assign: galera-bundle-1 allocation score on centos2: 0
+pcmk__bundle_assign: galera-bundle-1 allocation score on centos3: 0
+pcmk__bundle_assign: galera-bundle-2 allocation score on centos1: 0
+pcmk__bundle_assign: galera-bundle-2 allocation score on centos2: 0
+pcmk__bundle_assign: galera-bundle-2 allocation score on centos3: 0
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on centos1: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on centos2: INFINITY
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on centos3: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on centos1: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on centos2: 0
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on centos3: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on centos1: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on centos2: 0
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on centos3: -INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on centos1: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on centos2: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on centos3: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-2: -INFINITY
+pcmk__bundle_assign: galera:0 allocation score on galera-bundle-0: 500
+pcmk__bundle_assign: galera:1 allocation score on galera-bundle-1: 500
+pcmk__bundle_assign: galera:2 allocation score on galera-bundle-2: 500
pcmk__clone_assign: galera-bundle-master allocation score on centos1: -INFINITY
pcmk__clone_assign: galera-bundle-master allocation score on centos2: -INFINITY
pcmk__clone_assign: galera-bundle-master allocation score on centos3: -INFINITY
diff --git a/cts/scheduler/scores/bundle-probe-order-3.scores b/cts/scheduler/scores/bundle-probe-order-3.scores
index 3343ae3..e63fa84 100644
--- a/cts/scheduler/scores/bundle-probe-order-3.scores
+++ b/cts/scheduler/scores/bundle-probe-order-3.scores
@@ -2,36 +2,36 @@
galera:0 promotion score on none: 0
galera:1 promotion score on none: 0
galera:2 promotion score on none: 0
-pcmk__bundle_allocate: galera-bundle allocation score on centos1: -INFINITY
-pcmk__bundle_allocate: galera-bundle allocation score on centos2: 0
-pcmk__bundle_allocate: galera-bundle allocation score on centos3: -INFINITY
-pcmk__bundle_allocate: galera-bundle-0 allocation score on centos1: 0
-pcmk__bundle_allocate: galera-bundle-0 allocation score on centos2: 0
-pcmk__bundle_allocate: galera-bundle-0 allocation score on centos3: 0
-pcmk__bundle_allocate: galera-bundle-1 allocation score on centos1: 0
-pcmk__bundle_allocate: galera-bundle-1 allocation score on centos2: 0
-pcmk__bundle_allocate: galera-bundle-1 allocation score on centos3: 0
-pcmk__bundle_allocate: galera-bundle-2 allocation score on centos1: 0
-pcmk__bundle_allocate: galera-bundle-2 allocation score on centos2: 0
-pcmk__bundle_allocate: galera-bundle-2 allocation score on centos3: 0
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on centos1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on centos2: INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on centos3: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on centos1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on centos2: 0
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on centos3: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on centos1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on centos2: 0
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on centos3: -INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on centos1: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on centos2: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on centos3: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-2: -INFINITY
-pcmk__bundle_allocate: galera:0 allocation score on galera-bundle-0: 500
-pcmk__bundle_allocate: galera:1 allocation score on galera-bundle-1: 500
-pcmk__bundle_allocate: galera:2 allocation score on galera-bundle-2: 500
+pcmk__bundle_assign: galera-bundle allocation score on centos1: -INFINITY
+pcmk__bundle_assign: galera-bundle allocation score on centos2: 0
+pcmk__bundle_assign: galera-bundle allocation score on centos3: -INFINITY
+pcmk__bundle_assign: galera-bundle-0 allocation score on centos1: 0
+pcmk__bundle_assign: galera-bundle-0 allocation score on centos2: 0
+pcmk__bundle_assign: galera-bundle-0 allocation score on centos3: 0
+pcmk__bundle_assign: galera-bundle-1 allocation score on centos1: 0
+pcmk__bundle_assign: galera-bundle-1 allocation score on centos2: 0
+pcmk__bundle_assign: galera-bundle-1 allocation score on centos3: 0
+pcmk__bundle_assign: galera-bundle-2 allocation score on centos1: 0
+pcmk__bundle_assign: galera-bundle-2 allocation score on centos2: 0
+pcmk__bundle_assign: galera-bundle-2 allocation score on centos3: 0
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on centos1: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on centos2: INFINITY
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on centos3: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on centos1: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on centos2: 0
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on centos3: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on centos1: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on centos2: 0
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on centos3: -INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on centos1: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on centos2: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on centos3: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-2: -INFINITY
+pcmk__bundle_assign: galera:0 allocation score on galera-bundle-0: 500
+pcmk__bundle_assign: galera:1 allocation score on galera-bundle-1: 500
+pcmk__bundle_assign: galera:2 allocation score on galera-bundle-2: 500
pcmk__clone_assign: galera-bundle-master allocation score on centos1: -INFINITY
pcmk__clone_assign: galera-bundle-master allocation score on centos2: -INFINITY
pcmk__clone_assign: galera-bundle-master allocation score on centos3: -INFINITY
diff --git a/cts/scheduler/scores/bundle-probe-remotes.scores b/cts/scheduler/scores/bundle-probe-remotes.scores
index 3361749..13180af 100644
--- a/cts/scheduler/scores/bundle-probe-remotes.scores
+++ b/cts/scheduler/scores/bundle-probe-remotes.scores
@@ -1,100 +1,100 @@
-pcmk__bundle_allocate: dummy1:0 allocation score on scale1-bundle-0: 500
-pcmk__bundle_allocate: dummy1:1 allocation score on scale1-bundle-1: 500
-pcmk__bundle_allocate: dummy1:2 allocation score on scale1-bundle-2: 500
-pcmk__bundle_allocate: dummy1:3 allocation score on scale1-bundle-3: 500
-pcmk__bundle_allocate: dummy1:4 allocation score on scale1-bundle-4: 500
-pcmk__bundle_allocate: dummy1:5 allocation score on scale1-bundle-5: 500
-pcmk__bundle_allocate: scale1-bundle allocation score on c09-h05-r630: 0
-pcmk__bundle_allocate: scale1-bundle allocation score on c09-h06-r630: 0
-pcmk__bundle_allocate: scale1-bundle allocation score on c09-h07-r630: 0
-pcmk__bundle_allocate: scale1-bundle allocation score on c09-h08-r630: 0
-pcmk__bundle_allocate: scale1-bundle allocation score on c09-h09-r630: 0
-pcmk__bundle_allocate: scale1-bundle allocation score on c09-h10-r630: 0
-pcmk__bundle_allocate: scale1-bundle-0 allocation score on c09-h05-r630: 0
-pcmk__bundle_allocate: scale1-bundle-0 allocation score on c09-h06-r630: 0
-pcmk__bundle_allocate: scale1-bundle-0 allocation score on c09-h07-r630: 0
-pcmk__bundle_allocate: scale1-bundle-0 allocation score on c09-h08-r630: -INFINITY
-pcmk__bundle_allocate: scale1-bundle-0 allocation score on c09-h09-r630: -INFINITY
-pcmk__bundle_allocate: scale1-bundle-0 allocation score on c09-h10-r630: -INFINITY
-pcmk__bundle_allocate: scale1-bundle-1 allocation score on c09-h05-r630: 0
-pcmk__bundle_allocate: scale1-bundle-1 allocation score on c09-h06-r630: 0
-pcmk__bundle_allocate: scale1-bundle-1 allocation score on c09-h07-r630: 0
-pcmk__bundle_allocate: scale1-bundle-1 allocation score on c09-h08-r630: -INFINITY
-pcmk__bundle_allocate: scale1-bundle-1 allocation score on c09-h09-r630: -INFINITY
-pcmk__bundle_allocate: scale1-bundle-1 allocation score on c09-h10-r630: -INFINITY
-pcmk__bundle_allocate: scale1-bundle-2 allocation score on c09-h05-r630: 0
-pcmk__bundle_allocate: scale1-bundle-2 allocation score on c09-h06-r630: 0
-pcmk__bundle_allocate: scale1-bundle-2 allocation score on c09-h07-r630: 0
-pcmk__bundle_allocate: scale1-bundle-2 allocation score on c09-h08-r630: -INFINITY
-pcmk__bundle_allocate: scale1-bundle-2 allocation score on c09-h09-r630: -INFINITY
-pcmk__bundle_allocate: scale1-bundle-2 allocation score on c09-h10-r630: -INFINITY
-pcmk__bundle_allocate: scale1-bundle-3 allocation score on c09-h05-r630: 0
-pcmk__bundle_allocate: scale1-bundle-3 allocation score on c09-h06-r630: 0
-pcmk__bundle_allocate: scale1-bundle-3 allocation score on c09-h07-r630: 0
-pcmk__bundle_allocate: scale1-bundle-3 allocation score on c09-h08-r630: -INFINITY
-pcmk__bundle_allocate: scale1-bundle-3 allocation score on c09-h09-r630: -INFINITY
-pcmk__bundle_allocate: scale1-bundle-3 allocation score on c09-h10-r630: -INFINITY
-pcmk__bundle_allocate: scale1-bundle-4 allocation score on c09-h05-r630: 0
-pcmk__bundle_allocate: scale1-bundle-4 allocation score on c09-h06-r630: 0
-pcmk__bundle_allocate: scale1-bundle-4 allocation score on c09-h07-r630: 0
-pcmk__bundle_allocate: scale1-bundle-4 allocation score on c09-h08-r630: -INFINITY
-pcmk__bundle_allocate: scale1-bundle-4 allocation score on c09-h09-r630: -INFINITY
-pcmk__bundle_allocate: scale1-bundle-4 allocation score on c09-h10-r630: -INFINITY
-pcmk__bundle_allocate: scale1-bundle-5 allocation score on c09-h05-r630: 0
-pcmk__bundle_allocate: scale1-bundle-5 allocation score on c09-h06-r630: 0
-pcmk__bundle_allocate: scale1-bundle-5 allocation score on c09-h07-r630: 0
-pcmk__bundle_allocate: scale1-bundle-5 allocation score on c09-h08-r630: -INFINITY
-pcmk__bundle_allocate: scale1-bundle-5 allocation score on c09-h09-r630: -INFINITY
-pcmk__bundle_allocate: scale1-bundle-5 allocation score on c09-h10-r630: -INFINITY
-pcmk__bundle_allocate: scale1-bundle-clone allocation score on c09-h05-r630: 0
-pcmk__bundle_allocate: scale1-bundle-clone allocation score on c09-h06-r630: 0
-pcmk__bundle_allocate: scale1-bundle-clone allocation score on c09-h07-r630: 0
-pcmk__bundle_allocate: scale1-bundle-clone allocation score on c09-h08-r630: 0
-pcmk__bundle_allocate: scale1-bundle-clone allocation score on c09-h09-r630: 0
-pcmk__bundle_allocate: scale1-bundle-clone allocation score on c09-h10-r630: 0
-pcmk__bundle_allocate: scale1-bundle-clone allocation score on scale1-bundle-0: -INFINITY
-pcmk__bundle_allocate: scale1-bundle-clone allocation score on scale1-bundle-1: -INFINITY
-pcmk__bundle_allocate: scale1-bundle-clone allocation score on scale1-bundle-2: -INFINITY
-pcmk__bundle_allocate: scale1-bundle-clone allocation score on scale1-bundle-3: -INFINITY
-pcmk__bundle_allocate: scale1-bundle-clone allocation score on scale1-bundle-4: -INFINITY
-pcmk__bundle_allocate: scale1-bundle-clone allocation score on scale1-bundle-5: -INFINITY
-pcmk__bundle_allocate: scale1-bundle-docker-0 allocation score on c09-h05-r630: 0
-pcmk__bundle_allocate: scale1-bundle-docker-0 allocation score on c09-h06-r630: 0
-pcmk__bundle_allocate: scale1-bundle-docker-0 allocation score on c09-h07-r630: 0
-pcmk__bundle_allocate: scale1-bundle-docker-0 allocation score on c09-h08-r630: 0
-pcmk__bundle_allocate: scale1-bundle-docker-0 allocation score on c09-h09-r630: 0
-pcmk__bundle_allocate: scale1-bundle-docker-0 allocation score on c09-h10-r630: 0
-pcmk__bundle_allocate: scale1-bundle-docker-1 allocation score on c09-h05-r630: 0
-pcmk__bundle_allocate: scale1-bundle-docker-1 allocation score on c09-h06-r630: 0
-pcmk__bundle_allocate: scale1-bundle-docker-1 allocation score on c09-h07-r630: 0
-pcmk__bundle_allocate: scale1-bundle-docker-1 allocation score on c09-h08-r630: 0
-pcmk__bundle_allocate: scale1-bundle-docker-1 allocation score on c09-h09-r630: 0
-pcmk__bundle_allocate: scale1-bundle-docker-1 allocation score on c09-h10-r630: 0
-pcmk__bundle_allocate: scale1-bundle-docker-2 allocation score on c09-h05-r630: 0
-pcmk__bundle_allocate: scale1-bundle-docker-2 allocation score on c09-h06-r630: 0
-pcmk__bundle_allocate: scale1-bundle-docker-2 allocation score on c09-h07-r630: 0
-pcmk__bundle_allocate: scale1-bundle-docker-2 allocation score on c09-h08-r630: 0
-pcmk__bundle_allocate: scale1-bundle-docker-2 allocation score on c09-h09-r630: 0
-pcmk__bundle_allocate: scale1-bundle-docker-2 allocation score on c09-h10-r630: 0
-pcmk__bundle_allocate: scale1-bundle-docker-3 allocation score on c09-h05-r630: 0
-pcmk__bundle_allocate: scale1-bundle-docker-3 allocation score on c09-h06-r630: 0
-pcmk__bundle_allocate: scale1-bundle-docker-3 allocation score on c09-h07-r630: 0
-pcmk__bundle_allocate: scale1-bundle-docker-3 allocation score on c09-h08-r630: 0
-pcmk__bundle_allocate: scale1-bundle-docker-3 allocation score on c09-h09-r630: 0
-pcmk__bundle_allocate: scale1-bundle-docker-3 allocation score on c09-h10-r630: 0
-pcmk__bundle_allocate: scale1-bundle-docker-4 allocation score on c09-h05-r630: 0
-pcmk__bundle_allocate: scale1-bundle-docker-4 allocation score on c09-h06-r630: 0
-pcmk__bundle_allocate: scale1-bundle-docker-4 allocation score on c09-h07-r630: 0
-pcmk__bundle_allocate: scale1-bundle-docker-4 allocation score on c09-h08-r630: 0
-pcmk__bundle_allocate: scale1-bundle-docker-4 allocation score on c09-h09-r630: 0
-pcmk__bundle_allocate: scale1-bundle-docker-4 allocation score on c09-h10-r630: 0
-pcmk__bundle_allocate: scale1-bundle-docker-5 allocation score on c09-h05-r630: 0
-pcmk__bundle_allocate: scale1-bundle-docker-5 allocation score on c09-h06-r630: 0
-pcmk__bundle_allocate: scale1-bundle-docker-5 allocation score on c09-h07-r630: 0
-pcmk__bundle_allocate: scale1-bundle-docker-5 allocation score on c09-h08-r630: 0
-pcmk__bundle_allocate: scale1-bundle-docker-5 allocation score on c09-h09-r630: 0
-pcmk__bundle_allocate: scale1-bundle-docker-5 allocation score on c09-h10-r630: 0
+pcmk__bundle_assign: dummy1:0 allocation score on scale1-bundle-0: 500
+pcmk__bundle_assign: dummy1:1 allocation score on scale1-bundle-1: 500
+pcmk__bundle_assign: dummy1:2 allocation score on scale1-bundle-2: 500
+pcmk__bundle_assign: dummy1:3 allocation score on scale1-bundle-3: 500
+pcmk__bundle_assign: dummy1:4 allocation score on scale1-bundle-4: 500
+pcmk__bundle_assign: dummy1:5 allocation score on scale1-bundle-5: 500
+pcmk__bundle_assign: scale1-bundle allocation score on c09-h05-r630: 0
+pcmk__bundle_assign: scale1-bundle allocation score on c09-h06-r630: 0
+pcmk__bundle_assign: scale1-bundle allocation score on c09-h07-r630: 0
+pcmk__bundle_assign: scale1-bundle allocation score on c09-h08-r630: 0
+pcmk__bundle_assign: scale1-bundle allocation score on c09-h09-r630: 0
+pcmk__bundle_assign: scale1-bundle allocation score on c09-h10-r630: 0
+pcmk__bundle_assign: scale1-bundle-0 allocation score on c09-h05-r630: 0
+pcmk__bundle_assign: scale1-bundle-0 allocation score on c09-h06-r630: 0
+pcmk__bundle_assign: scale1-bundle-0 allocation score on c09-h07-r630: 0
+pcmk__bundle_assign: scale1-bundle-0 allocation score on c09-h08-r630: -INFINITY
+pcmk__bundle_assign: scale1-bundle-0 allocation score on c09-h09-r630: -INFINITY
+pcmk__bundle_assign: scale1-bundle-0 allocation score on c09-h10-r630: -INFINITY
+pcmk__bundle_assign: scale1-bundle-1 allocation score on c09-h05-r630: 0
+pcmk__bundle_assign: scale1-bundle-1 allocation score on c09-h06-r630: 0
+pcmk__bundle_assign: scale1-bundle-1 allocation score on c09-h07-r630: 0
+pcmk__bundle_assign: scale1-bundle-1 allocation score on c09-h08-r630: -INFINITY
+pcmk__bundle_assign: scale1-bundle-1 allocation score on c09-h09-r630: -INFINITY
+pcmk__bundle_assign: scale1-bundle-1 allocation score on c09-h10-r630: -INFINITY
+pcmk__bundle_assign: scale1-bundle-2 allocation score on c09-h05-r630: 0
+pcmk__bundle_assign: scale1-bundle-2 allocation score on c09-h06-r630: 0
+pcmk__bundle_assign: scale1-bundle-2 allocation score on c09-h07-r630: 0
+pcmk__bundle_assign: scale1-bundle-2 allocation score on c09-h08-r630: -INFINITY
+pcmk__bundle_assign: scale1-bundle-2 allocation score on c09-h09-r630: -INFINITY
+pcmk__bundle_assign: scale1-bundle-2 allocation score on c09-h10-r630: -INFINITY
+pcmk__bundle_assign: scale1-bundle-3 allocation score on c09-h05-r630: 0
+pcmk__bundle_assign: scale1-bundle-3 allocation score on c09-h06-r630: 0
+pcmk__bundle_assign: scale1-bundle-3 allocation score on c09-h07-r630: 0
+pcmk__bundle_assign: scale1-bundle-3 allocation score on c09-h08-r630: -INFINITY
+pcmk__bundle_assign: scale1-bundle-3 allocation score on c09-h09-r630: -INFINITY
+pcmk__bundle_assign: scale1-bundle-3 allocation score on c09-h10-r630: -INFINITY
+pcmk__bundle_assign: scale1-bundle-4 allocation score on c09-h05-r630: 0
+pcmk__bundle_assign: scale1-bundle-4 allocation score on c09-h06-r630: 0
+pcmk__bundle_assign: scale1-bundle-4 allocation score on c09-h07-r630: 0
+pcmk__bundle_assign: scale1-bundle-4 allocation score on c09-h08-r630: -INFINITY
+pcmk__bundle_assign: scale1-bundle-4 allocation score on c09-h09-r630: -INFINITY
+pcmk__bundle_assign: scale1-bundle-4 allocation score on c09-h10-r630: -INFINITY
+pcmk__bundle_assign: scale1-bundle-5 allocation score on c09-h05-r630: 0
+pcmk__bundle_assign: scale1-bundle-5 allocation score on c09-h06-r630: 0
+pcmk__bundle_assign: scale1-bundle-5 allocation score on c09-h07-r630: 0
+pcmk__bundle_assign: scale1-bundle-5 allocation score on c09-h08-r630: -INFINITY
+pcmk__bundle_assign: scale1-bundle-5 allocation score on c09-h09-r630: -INFINITY
+pcmk__bundle_assign: scale1-bundle-5 allocation score on c09-h10-r630: -INFINITY
+pcmk__bundle_assign: scale1-bundle-clone allocation score on c09-h05-r630: 0
+pcmk__bundle_assign: scale1-bundle-clone allocation score on c09-h06-r630: 0
+pcmk__bundle_assign: scale1-bundle-clone allocation score on c09-h07-r630: 0
+pcmk__bundle_assign: scale1-bundle-clone allocation score on c09-h08-r630: 0
+pcmk__bundle_assign: scale1-bundle-clone allocation score on c09-h09-r630: 0
+pcmk__bundle_assign: scale1-bundle-clone allocation score on c09-h10-r630: 0
+pcmk__bundle_assign: scale1-bundle-clone allocation score on scale1-bundle-0: -INFINITY
+pcmk__bundle_assign: scale1-bundle-clone allocation score on scale1-bundle-1: -INFINITY
+pcmk__bundle_assign: scale1-bundle-clone allocation score on scale1-bundle-2: -INFINITY
+pcmk__bundle_assign: scale1-bundle-clone allocation score on scale1-bundle-3: -INFINITY
+pcmk__bundle_assign: scale1-bundle-clone allocation score on scale1-bundle-4: -INFINITY
+pcmk__bundle_assign: scale1-bundle-clone allocation score on scale1-bundle-5: -INFINITY
+pcmk__bundle_assign: scale1-bundle-docker-0 allocation score on c09-h05-r630: 0
+pcmk__bundle_assign: scale1-bundle-docker-0 allocation score on c09-h06-r630: 0
+pcmk__bundle_assign: scale1-bundle-docker-0 allocation score on c09-h07-r630: 0
+pcmk__bundle_assign: scale1-bundle-docker-0 allocation score on c09-h08-r630: 0
+pcmk__bundle_assign: scale1-bundle-docker-0 allocation score on c09-h09-r630: 0
+pcmk__bundle_assign: scale1-bundle-docker-0 allocation score on c09-h10-r630: 0
+pcmk__bundle_assign: scale1-bundle-docker-1 allocation score on c09-h05-r630: 0
+pcmk__bundle_assign: scale1-bundle-docker-1 allocation score on c09-h06-r630: 0
+pcmk__bundle_assign: scale1-bundle-docker-1 allocation score on c09-h07-r630: 0
+pcmk__bundle_assign: scale1-bundle-docker-1 allocation score on c09-h08-r630: 0
+pcmk__bundle_assign: scale1-bundle-docker-1 allocation score on c09-h09-r630: 0
+pcmk__bundle_assign: scale1-bundle-docker-1 allocation score on c09-h10-r630: 0
+pcmk__bundle_assign: scale1-bundle-docker-2 allocation score on c09-h05-r630: 0
+pcmk__bundle_assign: scale1-bundle-docker-2 allocation score on c09-h06-r630: 0
+pcmk__bundle_assign: scale1-bundle-docker-2 allocation score on c09-h07-r630: 0
+pcmk__bundle_assign: scale1-bundle-docker-2 allocation score on c09-h08-r630: 0
+pcmk__bundle_assign: scale1-bundle-docker-2 allocation score on c09-h09-r630: 0
+pcmk__bundle_assign: scale1-bundle-docker-2 allocation score on c09-h10-r630: 0
+pcmk__bundle_assign: scale1-bundle-docker-3 allocation score on c09-h05-r630: 0
+pcmk__bundle_assign: scale1-bundle-docker-3 allocation score on c09-h06-r630: 0
+pcmk__bundle_assign: scale1-bundle-docker-3 allocation score on c09-h07-r630: 0
+pcmk__bundle_assign: scale1-bundle-docker-3 allocation score on c09-h08-r630: 0
+pcmk__bundle_assign: scale1-bundle-docker-3 allocation score on c09-h09-r630: 0
+pcmk__bundle_assign: scale1-bundle-docker-3 allocation score on c09-h10-r630: 0
+pcmk__bundle_assign: scale1-bundle-docker-4 allocation score on c09-h05-r630: 0
+pcmk__bundle_assign: scale1-bundle-docker-4 allocation score on c09-h06-r630: 0
+pcmk__bundle_assign: scale1-bundle-docker-4 allocation score on c09-h07-r630: 0
+pcmk__bundle_assign: scale1-bundle-docker-4 allocation score on c09-h08-r630: 0
+pcmk__bundle_assign: scale1-bundle-docker-4 allocation score on c09-h09-r630: 0
+pcmk__bundle_assign: scale1-bundle-docker-4 allocation score on c09-h10-r630: 0
+pcmk__bundle_assign: scale1-bundle-docker-5 allocation score on c09-h05-r630: 0
+pcmk__bundle_assign: scale1-bundle-docker-5 allocation score on c09-h06-r630: 0
+pcmk__bundle_assign: scale1-bundle-docker-5 allocation score on c09-h07-r630: 0
+pcmk__bundle_assign: scale1-bundle-docker-5 allocation score on c09-h08-r630: 0
+pcmk__bundle_assign: scale1-bundle-docker-5 allocation score on c09-h09-r630: 0
+pcmk__bundle_assign: scale1-bundle-docker-5 allocation score on c09-h10-r630: 0
pcmk__clone_assign: dummy1:0 allocation score on scale1-bundle-0: INFINITY
pcmk__clone_assign: dummy1:1 allocation score on scale1-bundle-1: INFINITY
pcmk__clone_assign: dummy1:2 allocation score on scale1-bundle-2: INFINITY
diff --git a/cts/scheduler/scores/bundle-promoted-anticolocation-1.scores b/cts/scheduler/scores/bundle-promoted-anticolocation-1.scores
new file mode 100644
index 0000000..cd53588
--- /dev/null
+++ b/cts/scheduler/scores/bundle-promoted-anticolocation-1.scores
@@ -0,0 +1,70 @@
+
+base:0 promotion score on base-bundle-0: 11
+base:1 promotion score on base-bundle-1: 12
+base:2 promotion score on base-bundle-2: 13
+pcmk__bundle_assign: base-bundle allocation score on node1: 0
+pcmk__bundle_assign: base-bundle allocation score on node2: 0
+pcmk__bundle_assign: base-bundle allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-0 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-0 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-0 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-0: -INFINITY
+pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-1: -INFINITY
+pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-2: -INFINITY
+pcmk__bundle_assign: base-bundle-clone allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node3: 0
+pcmk__bundle_assign: base:0 allocation score on base-bundle-0: 501
+pcmk__bundle_assign: base:1 allocation score on base-bundle-1: 501
+pcmk__bundle_assign: base:2 allocation score on base-bundle-2: 501
+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-0: 0
+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-1: 0
+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-2: 0
+pcmk__clone_assign: base-bundle-clone allocation score on node1: -INFINITY
+pcmk__clone_assign: base-bundle-clone allocation score on node2: -INFINITY
+pcmk__clone_assign: base-bundle-clone allocation score on node3: -INFINITY
+pcmk__clone_assign: base:0 allocation score on base-bundle-0: INFINITY
+pcmk__clone_assign: base:1 allocation score on base-bundle-1: INFINITY
+pcmk__clone_assign: base:2 allocation score on base-bundle-2: INFINITY
+pcmk__primitive_assign: Fencing allocation score on node1: 0
+pcmk__primitive_assign: Fencing allocation score on node2: 0
+pcmk__primitive_assign: Fencing allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-0 allocation score on node1: 10000
+pcmk__primitive_assign: base-bundle-0 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-0 allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-1 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-1 allocation score on node2: 10000
+pcmk__primitive_assign: base-bundle-1 allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-2 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-2 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-2 allocation score on node3: 10000
+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node1: -INFINITY
+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node1: -INFINITY
+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node2: -INFINITY
+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node3: 0
+pcmk__primitive_assign: base:0 allocation score on base-bundle-0: INFINITY
+pcmk__primitive_assign: base:1 allocation score on base-bundle-1: INFINITY
+pcmk__primitive_assign: base:2 allocation score on base-bundle-2: INFINITY
+pcmk__primitive_assign: vip allocation score on node1: 0
+pcmk__primitive_assign: vip allocation score on node2: 0
+pcmk__primitive_assign: vip allocation score on node3: -INFINITY
diff --git a/cts/scheduler/scores/bundle-promoted-anticolocation-2.scores b/cts/scheduler/scores/bundle-promoted-anticolocation-2.scores
new file mode 100644
index 0000000..9930eeb
--- /dev/null
+++ b/cts/scheduler/scores/bundle-promoted-anticolocation-2.scores
@@ -0,0 +1,70 @@
+
+base:0 promotion score on base-bundle-0: 11
+base:1 promotion score on base-bundle-1: 12
+base:2 promotion score on base-bundle-2: 13
+pcmk__bundle_assign: base-bundle allocation score on node1: 0
+pcmk__bundle_assign: base-bundle allocation score on node2: 0
+pcmk__bundle_assign: base-bundle allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-0 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-0 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-0 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-0: -INFINITY
+pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-1: -INFINITY
+pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-2: -INFINITY
+pcmk__bundle_assign: base-bundle-clone allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node3: 0
+pcmk__bundle_assign: base:0 allocation score on base-bundle-0: 501
+pcmk__bundle_assign: base:1 allocation score on base-bundle-1: 501
+pcmk__bundle_assign: base:2 allocation score on base-bundle-2: 501
+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-0: 0
+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-1: 0
+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-2: 0
+pcmk__clone_assign: base-bundle-clone allocation score on node1: -INFINITY
+pcmk__clone_assign: base-bundle-clone allocation score on node2: -INFINITY
+pcmk__clone_assign: base-bundle-clone allocation score on node3: -INFINITY
+pcmk__clone_assign: base:0 allocation score on base-bundle-0: INFINITY
+pcmk__clone_assign: base:1 allocation score on base-bundle-1: INFINITY
+pcmk__clone_assign: base:2 allocation score on base-bundle-2: INFINITY
+pcmk__primitive_assign: Fencing allocation score on node1: 0
+pcmk__primitive_assign: Fencing allocation score on node2: 0
+pcmk__primitive_assign: Fencing allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-0 allocation score on node1: 10000
+pcmk__primitive_assign: base-bundle-0 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-0 allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-1 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-1 allocation score on node2: 10000
+pcmk__primitive_assign: base-bundle-1 allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-2 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-2 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-2 allocation score on node3: 10000
+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node1: -INFINITY
+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node1: -INFINITY
+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node2: -INFINITY
+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node3: 0
+pcmk__primitive_assign: base:0 allocation score on base-bundle-0: INFINITY
+pcmk__primitive_assign: base:1 allocation score on base-bundle-1: INFINITY
+pcmk__primitive_assign: base:2 allocation score on base-bundle-2: INFINITY
+pcmk__primitive_assign: vip allocation score on node1: 0
+pcmk__primitive_assign: vip allocation score on node2: 0
+pcmk__primitive_assign: vip allocation score on node3: -5000
diff --git a/cts/scheduler/scores/bundle-promoted-anticolocation-3.scores b/cts/scheduler/scores/bundle-promoted-anticolocation-3.scores
new file mode 100644
index 0000000..63bea1c
--- /dev/null
+++ b/cts/scheduler/scores/bundle-promoted-anticolocation-3.scores
@@ -0,0 +1,70 @@
+
+base:0 promotion score on base-bundle-0: 11
+base:1 promotion score on base-bundle-1: 12
+base:2 promotion score on base-bundle-2: -INFINITY
+pcmk__bundle_assign: base-bundle allocation score on node1: 0
+pcmk__bundle_assign: base-bundle allocation score on node2: 0
+pcmk__bundle_assign: base-bundle allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-0 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-0 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-0 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-0: -INFINITY
+pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-1: -INFINITY
+pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-2: -INFINITY
+pcmk__bundle_assign: base-bundle-clone allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node3: 0
+pcmk__bundle_assign: base:0 allocation score on base-bundle-0: 501
+pcmk__bundle_assign: base:1 allocation score on base-bundle-1: 501
+pcmk__bundle_assign: base:2 allocation score on base-bundle-2: 501
+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-0: 0
+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-1: 0
+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-2: 0
+pcmk__clone_assign: base-bundle-clone allocation score on node1: -INFINITY
+pcmk__clone_assign: base-bundle-clone allocation score on node2: -INFINITY
+pcmk__clone_assign: base-bundle-clone allocation score on node3: -INFINITY
+pcmk__clone_assign: base:0 allocation score on base-bundle-0: INFINITY
+pcmk__clone_assign: base:1 allocation score on base-bundle-1: INFINITY
+pcmk__clone_assign: base:2 allocation score on base-bundle-2: INFINITY
+pcmk__primitive_assign: Fencing allocation score on node1: 0
+pcmk__primitive_assign: Fencing allocation score on node2: 0
+pcmk__primitive_assign: Fencing allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-0 allocation score on node1: 10000
+pcmk__primitive_assign: base-bundle-0 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-0 allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-1 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-1 allocation score on node2: 10000
+pcmk__primitive_assign: base-bundle-1 allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-2 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-2 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-2 allocation score on node3: 10000
+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node1: -INFINITY
+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node1: -INFINITY
+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node2: -INFINITY
+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node3: 0
+pcmk__primitive_assign: base:0 allocation score on base-bundle-0: INFINITY
+pcmk__primitive_assign: base:1 allocation score on base-bundle-1: INFINITY
+pcmk__primitive_assign: base:2 allocation score on base-bundle-2: INFINITY
+pcmk__primitive_assign: vip allocation score on node1: 0
+pcmk__primitive_assign: vip allocation score on node2: 0
+pcmk__primitive_assign: vip allocation score on node3: 0
diff --git a/cts/scheduler/scores/bundle-promoted-anticolocation-4.scores b/cts/scheduler/scores/bundle-promoted-anticolocation-4.scores
new file mode 100644
index 0000000..6e7cdd7
--- /dev/null
+++ b/cts/scheduler/scores/bundle-promoted-anticolocation-4.scores
@@ -0,0 +1,70 @@
+
+base:0 promotion score on base-bundle-0: 11
+base:1 promotion score on base-bundle-1: 12
+base:2 promotion score on base-bundle-2: -4987
+pcmk__bundle_assign: base-bundle allocation score on node1: 0
+pcmk__bundle_assign: base-bundle allocation score on node2: 0
+pcmk__bundle_assign: base-bundle allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-0 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-0 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-0 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-0: -INFINITY
+pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-1: -INFINITY
+pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-2: -INFINITY
+pcmk__bundle_assign: base-bundle-clone allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node3: 0
+pcmk__bundle_assign: base:0 allocation score on base-bundle-0: 501
+pcmk__bundle_assign: base:1 allocation score on base-bundle-1: 501
+pcmk__bundle_assign: base:2 allocation score on base-bundle-2: 501
+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-0: 0
+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-1: 0
+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-2: 0
+pcmk__clone_assign: base-bundle-clone allocation score on node1: -INFINITY
+pcmk__clone_assign: base-bundle-clone allocation score on node2: -INFINITY
+pcmk__clone_assign: base-bundle-clone allocation score on node3: -INFINITY
+pcmk__clone_assign: base:0 allocation score on base-bundle-0: INFINITY
+pcmk__clone_assign: base:1 allocation score on base-bundle-1: INFINITY
+pcmk__clone_assign: base:2 allocation score on base-bundle-2: INFINITY
+pcmk__primitive_assign: Fencing allocation score on node1: 0
+pcmk__primitive_assign: Fencing allocation score on node2: 0
+pcmk__primitive_assign: Fencing allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-0 allocation score on node1: 10000
+pcmk__primitive_assign: base-bundle-0 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-0 allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-1 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-1 allocation score on node2: 10000
+pcmk__primitive_assign: base-bundle-1 allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-2 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-2 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-2 allocation score on node3: 10000
+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node1: -INFINITY
+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node1: -INFINITY
+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node2: -INFINITY
+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node3: 0
+pcmk__primitive_assign: base:0 allocation score on base-bundle-0: INFINITY
+pcmk__primitive_assign: base:1 allocation score on base-bundle-1: INFINITY
+pcmk__primitive_assign: base:2 allocation score on base-bundle-2: INFINITY
+pcmk__primitive_assign: vip allocation score on node1: 0
+pcmk__primitive_assign: vip allocation score on node2: 0
+pcmk__primitive_assign: vip allocation score on node3: 0
diff --git a/cts/scheduler/scores/bundle-promoted-anticolocation-5.scores b/cts/scheduler/scores/bundle-promoted-anticolocation-5.scores
new file mode 100644
index 0000000..b36fa42
--- /dev/null
+++ b/cts/scheduler/scores/bundle-promoted-anticolocation-5.scores
@@ -0,0 +1,160 @@
+
+bundle-a-rsc:0 promotion score on bundle-a-0: 11
+bundle-a-rsc:1 promotion score on bundle-a-1: -INFINITY
+bundle-a-rsc:2 promotion score on bundle-a-2: 12
+bundle-b-rsc:0 promotion score on bundle-b-0: 12
+bundle-b-rsc:1 promotion score on bundle-b-1: 14
+bundle-b-rsc:2 promotion score on bundle-b-2: 13
+pcmk__bundle_assign: bundle-a allocation score on node1: 0
+pcmk__bundle_assign: bundle-a allocation score on node2: 0
+pcmk__bundle_assign: bundle-a allocation score on node3: 0
+pcmk__bundle_assign: bundle-a-0 allocation score on node1: 0
+pcmk__bundle_assign: bundle-a-0 allocation score on node2: 0
+pcmk__bundle_assign: bundle-a-0 allocation score on node3: 0
+pcmk__bundle_assign: bundle-a-1 allocation score on node1: 0
+pcmk__bundle_assign: bundle-a-1 allocation score on node2: 0
+pcmk__bundle_assign: bundle-a-1 allocation score on node3: 0
+pcmk__bundle_assign: bundle-a-2 allocation score on node1: 0
+pcmk__bundle_assign: bundle-a-2 allocation score on node2: 0
+pcmk__bundle_assign: bundle-a-2 allocation score on node3: 0
+pcmk__bundle_assign: bundle-a-clone allocation score on bundle-a-0: -INFINITY
+pcmk__bundle_assign: bundle-a-clone allocation score on bundle-a-1: -INFINITY
+pcmk__bundle_assign: bundle-a-clone allocation score on bundle-a-2: -INFINITY
+pcmk__bundle_assign: bundle-a-clone allocation score on node1: 0
+pcmk__bundle_assign: bundle-a-clone allocation score on node2: 0
+pcmk__bundle_assign: bundle-a-clone allocation score on node3: 0
+pcmk__bundle_assign: bundle-a-podman-0 allocation score on node1: 0
+pcmk__bundle_assign: bundle-a-podman-0 allocation score on node2: 0
+pcmk__bundle_assign: bundle-a-podman-0 allocation score on node3: 0
+pcmk__bundle_assign: bundle-a-podman-1 allocation score on node1: 0
+pcmk__bundle_assign: bundle-a-podman-1 allocation score on node2: 0
+pcmk__bundle_assign: bundle-a-podman-1 allocation score on node3: 0
+pcmk__bundle_assign: bundle-a-podman-2 allocation score on node1: 0
+pcmk__bundle_assign: bundle-a-podman-2 allocation score on node2: 0
+pcmk__bundle_assign: bundle-a-podman-2 allocation score on node3: 0
+pcmk__bundle_assign: bundle-a-rsc:0 allocation score on bundle-a-0: 501
+pcmk__bundle_assign: bundle-a-rsc:1 allocation score on bundle-a-1: 501
+pcmk__bundle_assign: bundle-a-rsc:2 allocation score on bundle-a-2: 501
+pcmk__bundle_assign: bundle-b allocation score on node1: 0
+pcmk__bundle_assign: bundle-b allocation score on node1: 0
+pcmk__bundle_assign: bundle-b allocation score on node2: 0
+pcmk__bundle_assign: bundle-b allocation score on node2: 0
+pcmk__bundle_assign: bundle-b allocation score on node3: 0
+pcmk__bundle_assign: bundle-b allocation score on node3: 0
+pcmk__bundle_assign: bundle-b-0 allocation score on node1: 0
+pcmk__bundle_assign: bundle-b-0 allocation score on node1: 10000
+pcmk__bundle_assign: bundle-b-0 allocation score on node2: 0
+pcmk__bundle_assign: bundle-b-0 allocation score on node2: 0
+pcmk__bundle_assign: bundle-b-0 allocation score on node3: 0
+pcmk__bundle_assign: bundle-b-0 allocation score on node3: 0
+pcmk__bundle_assign: bundle-b-1 allocation score on node1: 0
+pcmk__bundle_assign: bundle-b-1 allocation score on node1: 0
+pcmk__bundle_assign: bundle-b-1 allocation score on node2: 0
+pcmk__bundle_assign: bundle-b-1 allocation score on node2: 0
+pcmk__bundle_assign: bundle-b-1 allocation score on node3: 0
+pcmk__bundle_assign: bundle-b-1 allocation score on node3: 10000
+pcmk__bundle_assign: bundle-b-2 allocation score on node1: 0
+pcmk__bundle_assign: bundle-b-2 allocation score on node1: 0
+pcmk__bundle_assign: bundle-b-2 allocation score on node2: 0
+pcmk__bundle_assign: bundle-b-2 allocation score on node2: 10000
+pcmk__bundle_assign: bundle-b-2 allocation score on node3: 0
+pcmk__bundle_assign: bundle-b-2 allocation score on node3: 0
+pcmk__bundle_assign: bundle-b-clone allocation score on bundle-b-0: -INFINITY
+pcmk__bundle_assign: bundle-b-clone allocation score on bundle-b-0: 12
+pcmk__bundle_assign: bundle-b-clone allocation score on bundle-b-1: -INFINITY
+pcmk__bundle_assign: bundle-b-clone allocation score on bundle-b-1: 14
+pcmk__bundle_assign: bundle-b-clone allocation score on bundle-b-2: -INFINITY
+pcmk__bundle_assign: bundle-b-clone allocation score on bundle-b-2: 13
+pcmk__bundle_assign: bundle-b-clone allocation score on node1: -INFINITY
+pcmk__bundle_assign: bundle-b-clone allocation score on node1: 0
+pcmk__bundle_assign: bundle-b-clone allocation score on node2: -INFINITY
+pcmk__bundle_assign: bundle-b-clone allocation score on node2: 0
+pcmk__bundle_assign: bundle-b-clone allocation score on node3: -INFINITY
+pcmk__bundle_assign: bundle-b-clone allocation score on node3: 0
+pcmk__bundle_assign: bundle-b-podman-0 allocation score on node1: 0
+pcmk__bundle_assign: bundle-b-podman-0 allocation score on node1: 0
+pcmk__bundle_assign: bundle-b-podman-0 allocation score on node2: 0
+pcmk__bundle_assign: bundle-b-podman-0 allocation score on node2: 0
+pcmk__bundle_assign: bundle-b-podman-0 allocation score on node3: 0
+pcmk__bundle_assign: bundle-b-podman-0 allocation score on node3: 0
+pcmk__bundle_assign: bundle-b-podman-1 allocation score on node1: -INFINITY
+pcmk__bundle_assign: bundle-b-podman-1 allocation score on node1: 0
+pcmk__bundle_assign: bundle-b-podman-1 allocation score on node2: 0
+pcmk__bundle_assign: bundle-b-podman-1 allocation score on node2: 0
+pcmk__bundle_assign: bundle-b-podman-1 allocation score on node3: 0
+pcmk__bundle_assign: bundle-b-podman-1 allocation score on node3: 0
+pcmk__bundle_assign: bundle-b-podman-2 allocation score on node1: -INFINITY
+pcmk__bundle_assign: bundle-b-podman-2 allocation score on node1: 0
+pcmk__bundle_assign: bundle-b-podman-2 allocation score on node2: 0
+pcmk__bundle_assign: bundle-b-podman-2 allocation score on node2: 0
+pcmk__bundle_assign: bundle-b-podman-2 allocation score on node3: -INFINITY
+pcmk__bundle_assign: bundle-b-podman-2 allocation score on node3: 0
+pcmk__bundle_assign: bundle-b-rsc:0 allocation score on bundle-b-0: 501
+pcmk__bundle_assign: bundle-b-rsc:0 allocation score on bundle-b-0: INFINITY
+pcmk__bundle_assign: bundle-b-rsc:1 allocation score on bundle-b-1: 501
+pcmk__bundle_assign: bundle-b-rsc:1 allocation score on bundle-b-1: INFINITY
+pcmk__bundle_assign: bundle-b-rsc:2 allocation score on bundle-b-2: 501
+pcmk__bundle_assign: bundle-b-rsc:2 allocation score on bundle-b-2: INFINITY
+pcmk__clone_assign: bundle-a-clone allocation score on bundle-a-0: 0
+pcmk__clone_assign: bundle-a-clone allocation score on bundle-a-1: 0
+pcmk__clone_assign: bundle-a-clone allocation score on bundle-a-2: 0
+pcmk__clone_assign: bundle-a-clone allocation score on node1: -INFINITY
+pcmk__clone_assign: bundle-a-clone allocation score on node2: -INFINITY
+pcmk__clone_assign: bundle-a-clone allocation score on node3: -INFINITY
+pcmk__clone_assign: bundle-a-rsc:0 allocation score on bundle-a-0: INFINITY
+pcmk__clone_assign: bundle-a-rsc:1 allocation score on bundle-a-1: INFINITY
+pcmk__clone_assign: bundle-a-rsc:2 allocation score on bundle-a-2: INFINITY
+pcmk__clone_assign: bundle-b-clone allocation score on bundle-b-0: 0
+pcmk__clone_assign: bundle-b-clone allocation score on bundle-b-1: 0
+pcmk__clone_assign: bundle-b-clone allocation score on bundle-b-2: 0
+pcmk__clone_assign: bundle-b-clone allocation score on node1: -INFINITY
+pcmk__clone_assign: bundle-b-clone allocation score on node2: -INFINITY
+pcmk__clone_assign: bundle-b-clone allocation score on node3: -INFINITY
+pcmk__clone_assign: bundle-b-rsc:0 allocation score on bundle-b-0: INFINITY
+pcmk__clone_assign: bundle-b-rsc:1 allocation score on bundle-b-1: INFINITY
+pcmk__clone_assign: bundle-b-rsc:2 allocation score on bundle-b-2: INFINITY
+pcmk__primitive_assign: Fencing allocation score on node1: 0
+pcmk__primitive_assign: Fencing allocation score on node2: 0
+pcmk__primitive_assign: Fencing allocation score on node3: 0
+pcmk__primitive_assign: bundle-a-0 allocation score on node1: 10000
+pcmk__primitive_assign: bundle-a-0 allocation score on node2: 0
+pcmk__primitive_assign: bundle-a-0 allocation score on node3: 0
+pcmk__primitive_assign: bundle-a-1 allocation score on node1: 0
+pcmk__primitive_assign: bundle-a-1 allocation score on node2: 0
+pcmk__primitive_assign: bundle-a-1 allocation score on node3: 10000
+pcmk__primitive_assign: bundle-a-2 allocation score on node1: 0
+pcmk__primitive_assign: bundle-a-2 allocation score on node2: 10000
+pcmk__primitive_assign: bundle-a-2 allocation score on node3: 0
+pcmk__primitive_assign: bundle-a-podman-0 allocation score on node1: 0
+pcmk__primitive_assign: bundle-a-podman-0 allocation score on node2: 0
+pcmk__primitive_assign: bundle-a-podman-0 allocation score on node3: 0
+pcmk__primitive_assign: bundle-a-podman-1 allocation score on node1: -INFINITY
+pcmk__primitive_assign: bundle-a-podman-1 allocation score on node2: 0
+pcmk__primitive_assign: bundle-a-podman-1 allocation score on node3: 0
+pcmk__primitive_assign: bundle-a-podman-2 allocation score on node1: -INFINITY
+pcmk__primitive_assign: bundle-a-podman-2 allocation score on node2: 0
+pcmk__primitive_assign: bundle-a-podman-2 allocation score on node3: -INFINITY
+pcmk__primitive_assign: bundle-a-rsc:0 allocation score on bundle-a-0: INFINITY
+pcmk__primitive_assign: bundle-a-rsc:1 allocation score on bundle-a-1: INFINITY
+pcmk__primitive_assign: bundle-a-rsc:2 allocation score on bundle-a-2: INFINITY
+pcmk__primitive_assign: bundle-b-0 allocation score on node1: 10000
+pcmk__primitive_assign: bundle-b-0 allocation score on node2: 0
+pcmk__primitive_assign: bundle-b-0 allocation score on node3: 0
+pcmk__primitive_assign: bundle-b-1 allocation score on node1: 0
+pcmk__primitive_assign: bundle-b-1 allocation score on node2: 0
+pcmk__primitive_assign: bundle-b-1 allocation score on node3: 10000
+pcmk__primitive_assign: bundle-b-2 allocation score on node1: 0
+pcmk__primitive_assign: bundle-b-2 allocation score on node2: 10000
+pcmk__primitive_assign: bundle-b-2 allocation score on node3: 0
+pcmk__primitive_assign: bundle-b-podman-0 allocation score on node1: 0
+pcmk__primitive_assign: bundle-b-podman-0 allocation score on node2: 0
+pcmk__primitive_assign: bundle-b-podman-0 allocation score on node3: 0
+pcmk__primitive_assign: bundle-b-podman-1 allocation score on node1: -INFINITY
+pcmk__primitive_assign: bundle-b-podman-1 allocation score on node2: 0
+pcmk__primitive_assign: bundle-b-podman-1 allocation score on node3: 0
+pcmk__primitive_assign: bundle-b-podman-2 allocation score on node1: -INFINITY
+pcmk__primitive_assign: bundle-b-podman-2 allocation score on node2: 0
+pcmk__primitive_assign: bundle-b-podman-2 allocation score on node3: -INFINITY
+pcmk__primitive_assign: bundle-b-rsc:0 allocation score on bundle-b-0: INFINITY
+pcmk__primitive_assign: bundle-b-rsc:1 allocation score on bundle-b-1: INFINITY
+pcmk__primitive_assign: bundle-b-rsc:2 allocation score on bundle-b-2: INFINITY
diff --git a/cts/scheduler/scores/bundle-promoted-anticolocation-6.scores b/cts/scheduler/scores/bundle-promoted-anticolocation-6.scores
new file mode 100644
index 0000000..779495e
--- /dev/null
+++ b/cts/scheduler/scores/bundle-promoted-anticolocation-6.scores
@@ -0,0 +1,160 @@
+
+bundle-a-rsc:0 promotion score on bundle-a-0: 11
+bundle-a-rsc:1 promotion score on bundle-a-1: -4987
+bundle-a-rsc:2 promotion score on bundle-a-2: 12
+bundle-b-rsc:0 promotion score on bundle-b-0: 12
+bundle-b-rsc:1 promotion score on bundle-b-1: 14
+bundle-b-rsc:2 promotion score on bundle-b-2: 13
+pcmk__bundle_assign: bundle-a allocation score on node1: 0
+pcmk__bundle_assign: bundle-a allocation score on node2: 0
+pcmk__bundle_assign: bundle-a allocation score on node3: 0
+pcmk__bundle_assign: bundle-a-0 allocation score on node1: 0
+pcmk__bundle_assign: bundle-a-0 allocation score on node2: 0
+pcmk__bundle_assign: bundle-a-0 allocation score on node3: 0
+pcmk__bundle_assign: bundle-a-1 allocation score on node1: 0
+pcmk__bundle_assign: bundle-a-1 allocation score on node2: 0
+pcmk__bundle_assign: bundle-a-1 allocation score on node3: 0
+pcmk__bundle_assign: bundle-a-2 allocation score on node1: 0
+pcmk__bundle_assign: bundle-a-2 allocation score on node2: 0
+pcmk__bundle_assign: bundle-a-2 allocation score on node3: 0
+pcmk__bundle_assign: bundle-a-clone allocation score on bundle-a-0: -INFINITY
+pcmk__bundle_assign: bundle-a-clone allocation score on bundle-a-1: -INFINITY
+pcmk__bundle_assign: bundle-a-clone allocation score on bundle-a-2: -INFINITY
+pcmk__bundle_assign: bundle-a-clone allocation score on node1: 0
+pcmk__bundle_assign: bundle-a-clone allocation score on node2: 0
+pcmk__bundle_assign: bundle-a-clone allocation score on node3: 0
+pcmk__bundle_assign: bundle-a-podman-0 allocation score on node1: 0
+pcmk__bundle_assign: bundle-a-podman-0 allocation score on node2: 0
+pcmk__bundle_assign: bundle-a-podman-0 allocation score on node3: 0
+pcmk__bundle_assign: bundle-a-podman-1 allocation score on node1: 0
+pcmk__bundle_assign: bundle-a-podman-1 allocation score on node2: 0
+pcmk__bundle_assign: bundle-a-podman-1 allocation score on node3: 0
+pcmk__bundle_assign: bundle-a-podman-2 allocation score on node1: 0
+pcmk__bundle_assign: bundle-a-podman-2 allocation score on node2: 0
+pcmk__bundle_assign: bundle-a-podman-2 allocation score on node3: 0
+pcmk__bundle_assign: bundle-a-rsc:0 allocation score on bundle-a-0: 501
+pcmk__bundle_assign: bundle-a-rsc:1 allocation score on bundle-a-1: 501
+pcmk__bundle_assign: bundle-a-rsc:2 allocation score on bundle-a-2: 501
+pcmk__bundle_assign: bundle-b allocation score on node1: 0
+pcmk__bundle_assign: bundle-b allocation score on node1: 0
+pcmk__bundle_assign: bundle-b allocation score on node2: 0
+pcmk__bundle_assign: bundle-b allocation score on node2: 0
+pcmk__bundle_assign: bundle-b allocation score on node3: 0
+pcmk__bundle_assign: bundle-b allocation score on node3: 0
+pcmk__bundle_assign: bundle-b-0 allocation score on node1: 0
+pcmk__bundle_assign: bundle-b-0 allocation score on node1: 10000
+pcmk__bundle_assign: bundle-b-0 allocation score on node2: 0
+pcmk__bundle_assign: bundle-b-0 allocation score on node2: 0
+pcmk__bundle_assign: bundle-b-0 allocation score on node3: 0
+pcmk__bundle_assign: bundle-b-0 allocation score on node3: 0
+pcmk__bundle_assign: bundle-b-1 allocation score on node1: 0
+pcmk__bundle_assign: bundle-b-1 allocation score on node1: 0
+pcmk__bundle_assign: bundle-b-1 allocation score on node2: 0
+pcmk__bundle_assign: bundle-b-1 allocation score on node2: 0
+pcmk__bundle_assign: bundle-b-1 allocation score on node3: 0
+pcmk__bundle_assign: bundle-b-1 allocation score on node3: 10000
+pcmk__bundle_assign: bundle-b-2 allocation score on node1: 0
+pcmk__bundle_assign: bundle-b-2 allocation score on node1: 0
+pcmk__bundle_assign: bundle-b-2 allocation score on node2: 0
+pcmk__bundle_assign: bundle-b-2 allocation score on node2: 10000
+pcmk__bundle_assign: bundle-b-2 allocation score on node3: 0
+pcmk__bundle_assign: bundle-b-2 allocation score on node3: 0
+pcmk__bundle_assign: bundle-b-clone allocation score on bundle-b-0: -INFINITY
+pcmk__bundle_assign: bundle-b-clone allocation score on bundle-b-0: 12
+pcmk__bundle_assign: bundle-b-clone allocation score on bundle-b-1: -INFINITY
+pcmk__bundle_assign: bundle-b-clone allocation score on bundle-b-1: 14
+pcmk__bundle_assign: bundle-b-clone allocation score on bundle-b-2: -INFINITY
+pcmk__bundle_assign: bundle-b-clone allocation score on bundle-b-2: 13
+pcmk__bundle_assign: bundle-b-clone allocation score on node1: -INFINITY
+pcmk__bundle_assign: bundle-b-clone allocation score on node1: 0
+pcmk__bundle_assign: bundle-b-clone allocation score on node2: -INFINITY
+pcmk__bundle_assign: bundle-b-clone allocation score on node2: 0
+pcmk__bundle_assign: bundle-b-clone allocation score on node3: -INFINITY
+pcmk__bundle_assign: bundle-b-clone allocation score on node3: 0
+pcmk__bundle_assign: bundle-b-podman-0 allocation score on node1: 0
+pcmk__bundle_assign: bundle-b-podman-0 allocation score on node1: 0
+pcmk__bundle_assign: bundle-b-podman-0 allocation score on node2: 0
+pcmk__bundle_assign: bundle-b-podman-0 allocation score on node2: 0
+pcmk__bundle_assign: bundle-b-podman-0 allocation score on node3: 0
+pcmk__bundle_assign: bundle-b-podman-0 allocation score on node3: 0
+pcmk__bundle_assign: bundle-b-podman-1 allocation score on node1: -INFINITY
+pcmk__bundle_assign: bundle-b-podman-1 allocation score on node1: 0
+pcmk__bundle_assign: bundle-b-podman-1 allocation score on node2: 0
+pcmk__bundle_assign: bundle-b-podman-1 allocation score on node2: 0
+pcmk__bundle_assign: bundle-b-podman-1 allocation score on node3: 0
+pcmk__bundle_assign: bundle-b-podman-1 allocation score on node3: 0
+pcmk__bundle_assign: bundle-b-podman-2 allocation score on node1: -INFINITY
+pcmk__bundle_assign: bundle-b-podman-2 allocation score on node1: 0
+pcmk__bundle_assign: bundle-b-podman-2 allocation score on node2: 0
+pcmk__bundle_assign: bundle-b-podman-2 allocation score on node2: 0
+pcmk__bundle_assign: bundle-b-podman-2 allocation score on node3: -INFINITY
+pcmk__bundle_assign: bundle-b-podman-2 allocation score on node3: 0
+pcmk__bundle_assign: bundle-b-rsc:0 allocation score on bundle-b-0: 501
+pcmk__bundle_assign: bundle-b-rsc:0 allocation score on bundle-b-0: INFINITY
+pcmk__bundle_assign: bundle-b-rsc:1 allocation score on bundle-b-1: 501
+pcmk__bundle_assign: bundle-b-rsc:1 allocation score on bundle-b-1: INFINITY
+pcmk__bundle_assign: bundle-b-rsc:2 allocation score on bundle-b-2: 501
+pcmk__bundle_assign: bundle-b-rsc:2 allocation score on bundle-b-2: INFINITY
+pcmk__clone_assign: bundle-a-clone allocation score on bundle-a-0: 0
+pcmk__clone_assign: bundle-a-clone allocation score on bundle-a-1: 0
+pcmk__clone_assign: bundle-a-clone allocation score on bundle-a-2: 0
+pcmk__clone_assign: bundle-a-clone allocation score on node1: -INFINITY
+pcmk__clone_assign: bundle-a-clone allocation score on node2: -INFINITY
+pcmk__clone_assign: bundle-a-clone allocation score on node3: -INFINITY
+pcmk__clone_assign: bundle-a-rsc:0 allocation score on bundle-a-0: INFINITY
+pcmk__clone_assign: bundle-a-rsc:1 allocation score on bundle-a-1: INFINITY
+pcmk__clone_assign: bundle-a-rsc:2 allocation score on bundle-a-2: INFINITY
+pcmk__clone_assign: bundle-b-clone allocation score on bundle-b-0: 0
+pcmk__clone_assign: bundle-b-clone allocation score on bundle-b-1: 0
+pcmk__clone_assign: bundle-b-clone allocation score on bundle-b-2: 0
+pcmk__clone_assign: bundle-b-clone allocation score on node1: -INFINITY
+pcmk__clone_assign: bundle-b-clone allocation score on node2: -INFINITY
+pcmk__clone_assign: bundle-b-clone allocation score on node3: -INFINITY
+pcmk__clone_assign: bundle-b-rsc:0 allocation score on bundle-b-0: INFINITY
+pcmk__clone_assign: bundle-b-rsc:1 allocation score on bundle-b-1: INFINITY
+pcmk__clone_assign: bundle-b-rsc:2 allocation score on bundle-b-2: INFINITY
+pcmk__primitive_assign: Fencing allocation score on node1: 0
+pcmk__primitive_assign: Fencing allocation score on node2: 0
+pcmk__primitive_assign: Fencing allocation score on node3: 0
+pcmk__primitive_assign: bundle-a-0 allocation score on node1: 10000
+pcmk__primitive_assign: bundle-a-0 allocation score on node2: 0
+pcmk__primitive_assign: bundle-a-0 allocation score on node3: 0
+pcmk__primitive_assign: bundle-a-1 allocation score on node1: 0
+pcmk__primitive_assign: bundle-a-1 allocation score on node2: 0
+pcmk__primitive_assign: bundle-a-1 allocation score on node3: 10000
+pcmk__primitive_assign: bundle-a-2 allocation score on node1: 0
+pcmk__primitive_assign: bundle-a-2 allocation score on node2: 10000
+pcmk__primitive_assign: bundle-a-2 allocation score on node3: 0
+pcmk__primitive_assign: bundle-a-podman-0 allocation score on node1: 0
+pcmk__primitive_assign: bundle-a-podman-0 allocation score on node2: 0
+pcmk__primitive_assign: bundle-a-podman-0 allocation score on node3: 0
+pcmk__primitive_assign: bundle-a-podman-1 allocation score on node1: -INFINITY
+pcmk__primitive_assign: bundle-a-podman-1 allocation score on node2: 0
+pcmk__primitive_assign: bundle-a-podman-1 allocation score on node3: 0
+pcmk__primitive_assign: bundle-a-podman-2 allocation score on node1: -INFINITY
+pcmk__primitive_assign: bundle-a-podman-2 allocation score on node2: 0
+pcmk__primitive_assign: bundle-a-podman-2 allocation score on node3: -INFINITY
+pcmk__primitive_assign: bundle-a-rsc:0 allocation score on bundle-a-0: INFINITY
+pcmk__primitive_assign: bundle-a-rsc:1 allocation score on bundle-a-1: INFINITY
+pcmk__primitive_assign: bundle-a-rsc:2 allocation score on bundle-a-2: INFINITY
+pcmk__primitive_assign: bundle-b-0 allocation score on node1: 10000
+pcmk__primitive_assign: bundle-b-0 allocation score on node2: 0
+pcmk__primitive_assign: bundle-b-0 allocation score on node3: 0
+pcmk__primitive_assign: bundle-b-1 allocation score on node1: 0
+pcmk__primitive_assign: bundle-b-1 allocation score on node2: 0
+pcmk__primitive_assign: bundle-b-1 allocation score on node3: 10000
+pcmk__primitive_assign: bundle-b-2 allocation score on node1: 0
+pcmk__primitive_assign: bundle-b-2 allocation score on node2: 10000
+pcmk__primitive_assign: bundle-b-2 allocation score on node3: 0
+pcmk__primitive_assign: bundle-b-podman-0 allocation score on node1: 0
+pcmk__primitive_assign: bundle-b-podman-0 allocation score on node2: 0
+pcmk__primitive_assign: bundle-b-podman-0 allocation score on node3: 0
+pcmk__primitive_assign: bundle-b-podman-1 allocation score on node1: -INFINITY
+pcmk__primitive_assign: bundle-b-podman-1 allocation score on node2: 0
+pcmk__primitive_assign: bundle-b-podman-1 allocation score on node3: 0
+pcmk__primitive_assign: bundle-b-podman-2 allocation score on node1: -INFINITY
+pcmk__primitive_assign: bundle-b-podman-2 allocation score on node2: 0
+pcmk__primitive_assign: bundle-b-podman-2 allocation score on node3: -INFINITY
+pcmk__primitive_assign: bundle-b-rsc:0 allocation score on bundle-b-0: INFINITY
+pcmk__primitive_assign: bundle-b-rsc:1 allocation score on bundle-b-1: INFINITY
+pcmk__primitive_assign: bundle-b-rsc:2 allocation score on bundle-b-2: INFINITY
diff --git a/cts/scheduler/scores/bundle-promoted-colocation-1.scores b/cts/scheduler/scores/bundle-promoted-colocation-1.scores
new file mode 100644
index 0000000..36f2bc5
--- /dev/null
+++ b/cts/scheduler/scores/bundle-promoted-colocation-1.scores
@@ -0,0 +1,70 @@
+
+base:0 promotion score on base-bundle-0: 11
+base:1 promotion score on base-bundle-1: 12
+base:2 promotion score on base-bundle-2: 13
+pcmk__bundle_assign: base-bundle allocation score on node1: 0
+pcmk__bundle_assign: base-bundle allocation score on node2: 0
+pcmk__bundle_assign: base-bundle allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-0 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-0 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-0 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-0: -INFINITY
+pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-1: -INFINITY
+pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-2: -INFINITY
+pcmk__bundle_assign: base-bundle-clone allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node3: 0
+pcmk__bundle_assign: base:0 allocation score on base-bundle-0: 501
+pcmk__bundle_assign: base:1 allocation score on base-bundle-1: 501
+pcmk__bundle_assign: base:2 allocation score on base-bundle-2: 501
+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-0: 0
+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-1: 0
+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-2: 0
+pcmk__clone_assign: base-bundle-clone allocation score on node1: -INFINITY
+pcmk__clone_assign: base-bundle-clone allocation score on node2: -INFINITY
+pcmk__clone_assign: base-bundle-clone allocation score on node3: -INFINITY
+pcmk__clone_assign: base:0 allocation score on base-bundle-0: INFINITY
+pcmk__clone_assign: base:1 allocation score on base-bundle-1: INFINITY
+pcmk__clone_assign: base:2 allocation score on base-bundle-2: INFINITY
+pcmk__primitive_assign: Fencing allocation score on node1: 0
+pcmk__primitive_assign: Fencing allocation score on node2: 0
+pcmk__primitive_assign: Fencing allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-0 allocation score on node1: 10000
+pcmk__primitive_assign: base-bundle-0 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-0 allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-1 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-1 allocation score on node2: 10000
+pcmk__primitive_assign: base-bundle-1 allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-2 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-2 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-2 allocation score on node3: 10000
+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node1: -INFINITY
+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node1: -INFINITY
+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node2: -INFINITY
+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node3: 0
+pcmk__primitive_assign: base:0 allocation score on base-bundle-0: INFINITY
+pcmk__primitive_assign: base:1 allocation score on base-bundle-1: INFINITY
+pcmk__primitive_assign: base:2 allocation score on base-bundle-2: INFINITY
+pcmk__primitive_assign: vip allocation score on node1: -INFINITY
+pcmk__primitive_assign: vip allocation score on node2: -INFINITY
+pcmk__primitive_assign: vip allocation score on node3: 0
diff --git a/cts/scheduler/scores/bundle-promoted-colocation-2.scores b/cts/scheduler/scores/bundle-promoted-colocation-2.scores
new file mode 100644
index 0000000..384fbbb
--- /dev/null
+++ b/cts/scheduler/scores/bundle-promoted-colocation-2.scores
@@ -0,0 +1,70 @@
+
+base:0 promotion score on base-bundle-0: 11
+base:1 promotion score on base-bundle-1: 12
+base:2 promotion score on base-bundle-2: 13
+pcmk__bundle_assign: base-bundle allocation score on node1: 0
+pcmk__bundle_assign: base-bundle allocation score on node2: 0
+pcmk__bundle_assign: base-bundle allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-0 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-0 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-0 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-0: -INFINITY
+pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-1: -INFINITY
+pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-2: -INFINITY
+pcmk__bundle_assign: base-bundle-clone allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node3: 0
+pcmk__bundle_assign: base:0 allocation score on base-bundle-0: 501
+pcmk__bundle_assign: base:1 allocation score on base-bundle-1: 501
+pcmk__bundle_assign: base:2 allocation score on base-bundle-2: 501
+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-0: 0
+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-1: 0
+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-2: 0
+pcmk__clone_assign: base-bundle-clone allocation score on node1: -INFINITY
+pcmk__clone_assign: base-bundle-clone allocation score on node2: -INFINITY
+pcmk__clone_assign: base-bundle-clone allocation score on node3: -INFINITY
+pcmk__clone_assign: base:0 allocation score on base-bundle-0: INFINITY
+pcmk__clone_assign: base:1 allocation score on base-bundle-1: INFINITY
+pcmk__clone_assign: base:2 allocation score on base-bundle-2: INFINITY
+pcmk__primitive_assign: Fencing allocation score on node1: 0
+pcmk__primitive_assign: Fencing allocation score on node2: 0
+pcmk__primitive_assign: Fencing allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-0 allocation score on node1: 10000
+pcmk__primitive_assign: base-bundle-0 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-0 allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-1 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-1 allocation score on node2: 10000
+pcmk__primitive_assign: base-bundle-1 allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-2 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-2 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-2 allocation score on node3: 10000
+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node1: -INFINITY
+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node1: -INFINITY
+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node2: -INFINITY
+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node3: 0
+pcmk__primitive_assign: base:0 allocation score on base-bundle-0: INFINITY
+pcmk__primitive_assign: base:1 allocation score on base-bundle-1: INFINITY
+pcmk__primitive_assign: base:2 allocation score on base-bundle-2: INFINITY
+pcmk__primitive_assign: vip allocation score on node1: 0
+pcmk__primitive_assign: vip allocation score on node2: 0
+pcmk__primitive_assign: vip allocation score on node3: 5000
diff --git a/cts/scheduler/scores/bundle-promoted-colocation-3.scores b/cts/scheduler/scores/bundle-promoted-colocation-3.scores
new file mode 100644
index 0000000..1792152
--- /dev/null
+++ b/cts/scheduler/scores/bundle-promoted-colocation-3.scores
@@ -0,0 +1,70 @@
+
+base:0 promotion score on base-bundle-0: INFINITY
+base:1 promotion score on base-bundle-1: -INFINITY
+base:2 promotion score on base-bundle-2: -INFINITY
+pcmk__bundle_assign: base-bundle allocation score on node1: 0
+pcmk__bundle_assign: base-bundle allocation score on node2: 0
+pcmk__bundle_assign: base-bundle allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-0 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-0 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-0 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-0: -INFINITY
+pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-1: -INFINITY
+pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-2: -INFINITY
+pcmk__bundle_assign: base-bundle-clone allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node3: 0
+pcmk__bundle_assign: base:0 allocation score on base-bundle-0: 501
+pcmk__bundle_assign: base:1 allocation score on base-bundle-1: 501
+pcmk__bundle_assign: base:2 allocation score on base-bundle-2: 501
+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-0: 0
+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-1: 0
+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-2: 0
+pcmk__clone_assign: base-bundle-clone allocation score on node1: -INFINITY
+pcmk__clone_assign: base-bundle-clone allocation score on node2: -INFINITY
+pcmk__clone_assign: base-bundle-clone allocation score on node3: -INFINITY
+pcmk__clone_assign: base:0 allocation score on base-bundle-0: INFINITY
+pcmk__clone_assign: base:1 allocation score on base-bundle-1: INFINITY
+pcmk__clone_assign: base:2 allocation score on base-bundle-2: INFINITY
+pcmk__primitive_assign: Fencing allocation score on node1: 0
+pcmk__primitive_assign: Fencing allocation score on node2: 0
+pcmk__primitive_assign: Fencing allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-0 allocation score on node1: 10000
+pcmk__primitive_assign: base-bundle-0 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-0 allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-1 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-1 allocation score on node2: 10000
+pcmk__primitive_assign: base-bundle-1 allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-2 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-2 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-2 allocation score on node3: 10000
+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node1: -INFINITY
+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node1: -INFINITY
+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node2: -INFINITY
+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node3: 0
+pcmk__primitive_assign: base:0 allocation score on base-bundle-0: INFINITY
+pcmk__primitive_assign: base:1 allocation score on base-bundle-1: INFINITY
+pcmk__primitive_assign: base:2 allocation score on base-bundle-2: INFINITY
+pcmk__primitive_assign: vip allocation score on node1: 0
+pcmk__primitive_assign: vip allocation score on node2: 0
+pcmk__primitive_assign: vip allocation score on node3: 0
diff --git a/cts/scheduler/scores/bundle-promoted-colocation-4.scores b/cts/scheduler/scores/bundle-promoted-colocation-4.scores
new file mode 100644
index 0000000..3cb1ed9
--- /dev/null
+++ b/cts/scheduler/scores/bundle-promoted-colocation-4.scores
@@ -0,0 +1,70 @@
+
+base:0 promotion score on base-bundle-0: 5011
+base:1 promotion score on base-bundle-1: 12
+base:2 promotion score on base-bundle-2: 13
+pcmk__bundle_assign: base-bundle allocation score on node1: 0
+pcmk__bundle_assign: base-bundle allocation score on node2: 0
+pcmk__bundle_assign: base-bundle allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-0 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-0 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-0 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-0: -INFINITY
+pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-1: -INFINITY
+pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-2: -INFINITY
+pcmk__bundle_assign: base-bundle-clone allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node3: 0
+pcmk__bundle_assign: base:0 allocation score on base-bundle-0: 501
+pcmk__bundle_assign: base:1 allocation score on base-bundle-1: 501
+pcmk__bundle_assign: base:2 allocation score on base-bundle-2: 501
+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-0: 0
+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-1: 0
+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-2: 0
+pcmk__clone_assign: base-bundle-clone allocation score on node1: -INFINITY
+pcmk__clone_assign: base-bundle-clone allocation score on node2: -INFINITY
+pcmk__clone_assign: base-bundle-clone allocation score on node3: -INFINITY
+pcmk__clone_assign: base:0 allocation score on base-bundle-0: INFINITY
+pcmk__clone_assign: base:1 allocation score on base-bundle-1: INFINITY
+pcmk__clone_assign: base:2 allocation score on base-bundle-2: INFINITY
+pcmk__primitive_assign: Fencing allocation score on node1: 0
+pcmk__primitive_assign: Fencing allocation score on node2: 0
+pcmk__primitive_assign: Fencing allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-0 allocation score on node1: 10000
+pcmk__primitive_assign: base-bundle-0 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-0 allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-1 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-1 allocation score on node2: 10000
+pcmk__primitive_assign: base-bundle-1 allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-2 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-2 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-2 allocation score on node3: 10000
+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node1: -INFINITY
+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node1: -INFINITY
+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node2: -INFINITY
+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node3: 0
+pcmk__primitive_assign: base:0 allocation score on base-bundle-0: INFINITY
+pcmk__primitive_assign: base:1 allocation score on base-bundle-1: INFINITY
+pcmk__primitive_assign: base:2 allocation score on base-bundle-2: INFINITY
+pcmk__primitive_assign: vip allocation score on node1: 0
+pcmk__primitive_assign: vip allocation score on node2: 0
+pcmk__primitive_assign: vip allocation score on node3: 0
diff --git a/cts/scheduler/scores/bundle-promoted-colocation-5.scores b/cts/scheduler/scores/bundle-promoted-colocation-5.scores
new file mode 100644
index 0000000..a5bbab9
--- /dev/null
+++ b/cts/scheduler/scores/bundle-promoted-colocation-5.scores
@@ -0,0 +1,160 @@
+
+bundle-a-rsc:0 promotion score on bundle-a-0: 11
+bundle-a-rsc:1 promotion score on bundle-a-1: 13
+bundle-a-rsc:2 promotion score on bundle-a-2: INFINITY
+bundle-b-rsc:0 promotion score on bundle-b-0: 12
+bundle-b-rsc:1 promotion score on bundle-b-1: 11
+bundle-b-rsc:2 promotion score on bundle-b-2: 13
+pcmk__bundle_assign: bundle-a allocation score on node1: 0
+pcmk__bundle_assign: bundle-a allocation score on node2: 0
+pcmk__bundle_assign: bundle-a allocation score on node3: 0
+pcmk__bundle_assign: bundle-a-0 allocation score on node1: 0
+pcmk__bundle_assign: bundle-a-0 allocation score on node2: 0
+pcmk__bundle_assign: bundle-a-0 allocation score on node3: 0
+pcmk__bundle_assign: bundle-a-1 allocation score on node1: 0
+pcmk__bundle_assign: bundle-a-1 allocation score on node2: 0
+pcmk__bundle_assign: bundle-a-1 allocation score on node3: 0
+pcmk__bundle_assign: bundle-a-2 allocation score on node1: 0
+pcmk__bundle_assign: bundle-a-2 allocation score on node2: 0
+pcmk__bundle_assign: bundle-a-2 allocation score on node3: 0
+pcmk__bundle_assign: bundle-a-clone allocation score on bundle-a-0: -INFINITY
+pcmk__bundle_assign: bundle-a-clone allocation score on bundle-a-1: -INFINITY
+pcmk__bundle_assign: bundle-a-clone allocation score on bundle-a-2: -INFINITY
+pcmk__bundle_assign: bundle-a-clone allocation score on node1: 0
+pcmk__bundle_assign: bundle-a-clone allocation score on node2: 0
+pcmk__bundle_assign: bundle-a-clone allocation score on node3: 0
+pcmk__bundle_assign: bundle-a-podman-0 allocation score on node1: 0
+pcmk__bundle_assign: bundle-a-podman-0 allocation score on node2: 0
+pcmk__bundle_assign: bundle-a-podman-0 allocation score on node3: 0
+pcmk__bundle_assign: bundle-a-podman-1 allocation score on node1: 0
+pcmk__bundle_assign: bundle-a-podman-1 allocation score on node2: 0
+pcmk__bundle_assign: bundle-a-podman-1 allocation score on node3: 0
+pcmk__bundle_assign: bundle-a-podman-2 allocation score on node1: 0
+pcmk__bundle_assign: bundle-a-podman-2 allocation score on node2: 0
+pcmk__bundle_assign: bundle-a-podman-2 allocation score on node3: 0
+pcmk__bundle_assign: bundle-a-rsc:0 allocation score on bundle-a-0: 501
+pcmk__bundle_assign: bundle-a-rsc:1 allocation score on bundle-a-1: 501
+pcmk__bundle_assign: bundle-a-rsc:2 allocation score on bundle-a-2: 501
+pcmk__bundle_assign: bundle-b allocation score on node1: 0
+pcmk__bundle_assign: bundle-b allocation score on node1: 0
+pcmk__bundle_assign: bundle-b allocation score on node2: 0
+pcmk__bundle_assign: bundle-b allocation score on node2: 0
+pcmk__bundle_assign: bundle-b allocation score on node3: 0
+pcmk__bundle_assign: bundle-b allocation score on node3: 0
+pcmk__bundle_assign: bundle-b-0 allocation score on node1: 0
+pcmk__bundle_assign: bundle-b-0 allocation score on node1: 10000
+pcmk__bundle_assign: bundle-b-0 allocation score on node2: 0
+pcmk__bundle_assign: bundle-b-0 allocation score on node2: 0
+pcmk__bundle_assign: bundle-b-0 allocation score on node3: 0
+pcmk__bundle_assign: bundle-b-0 allocation score on node3: 0
+pcmk__bundle_assign: bundle-b-1 allocation score on node1: 0
+pcmk__bundle_assign: bundle-b-1 allocation score on node1: 0
+pcmk__bundle_assign: bundle-b-1 allocation score on node2: 0
+pcmk__bundle_assign: bundle-b-1 allocation score on node2: 0
+pcmk__bundle_assign: bundle-b-1 allocation score on node3: 0
+pcmk__bundle_assign: bundle-b-1 allocation score on node3: 10000
+pcmk__bundle_assign: bundle-b-2 allocation score on node1: 0
+pcmk__bundle_assign: bundle-b-2 allocation score on node1: 0
+pcmk__bundle_assign: bundle-b-2 allocation score on node2: 0
+pcmk__bundle_assign: bundle-b-2 allocation score on node2: 10000
+pcmk__bundle_assign: bundle-b-2 allocation score on node3: 0
+pcmk__bundle_assign: bundle-b-2 allocation score on node3: 0
+pcmk__bundle_assign: bundle-b-clone allocation score on bundle-b-0: -INFINITY
+pcmk__bundle_assign: bundle-b-clone allocation score on bundle-b-0: 12
+pcmk__bundle_assign: bundle-b-clone allocation score on bundle-b-1: -INFINITY
+pcmk__bundle_assign: bundle-b-clone allocation score on bundle-b-1: 11
+pcmk__bundle_assign: bundle-b-clone allocation score on bundle-b-2: -INFINITY
+pcmk__bundle_assign: bundle-b-clone allocation score on bundle-b-2: 13
+pcmk__bundle_assign: bundle-b-clone allocation score on node1: -INFINITY
+pcmk__bundle_assign: bundle-b-clone allocation score on node1: 0
+pcmk__bundle_assign: bundle-b-clone allocation score on node2: -INFINITY
+pcmk__bundle_assign: bundle-b-clone allocation score on node2: 0
+pcmk__bundle_assign: bundle-b-clone allocation score on node3: -INFINITY
+pcmk__bundle_assign: bundle-b-clone allocation score on node3: 0
+pcmk__bundle_assign: bundle-b-podman-0 allocation score on node1: 0
+pcmk__bundle_assign: bundle-b-podman-0 allocation score on node1: 0
+pcmk__bundle_assign: bundle-b-podman-0 allocation score on node2: 0
+pcmk__bundle_assign: bundle-b-podman-0 allocation score on node2: 0
+pcmk__bundle_assign: bundle-b-podman-0 allocation score on node3: 0
+pcmk__bundle_assign: bundle-b-podman-0 allocation score on node3: 0
+pcmk__bundle_assign: bundle-b-podman-1 allocation score on node1: -INFINITY
+pcmk__bundle_assign: bundle-b-podman-1 allocation score on node1: 0
+pcmk__bundle_assign: bundle-b-podman-1 allocation score on node2: 0
+pcmk__bundle_assign: bundle-b-podman-1 allocation score on node2: 0
+pcmk__bundle_assign: bundle-b-podman-1 allocation score on node3: 0
+pcmk__bundle_assign: bundle-b-podman-1 allocation score on node3: 0
+pcmk__bundle_assign: bundle-b-podman-2 allocation score on node1: -INFINITY
+pcmk__bundle_assign: bundle-b-podman-2 allocation score on node1: 0
+pcmk__bundle_assign: bundle-b-podman-2 allocation score on node2: 0
+pcmk__bundle_assign: bundle-b-podman-2 allocation score on node2: 0
+pcmk__bundle_assign: bundle-b-podman-2 allocation score on node3: -INFINITY
+pcmk__bundle_assign: bundle-b-podman-2 allocation score on node3: 0
+pcmk__bundle_assign: bundle-b-rsc:0 allocation score on bundle-b-0: 501
+pcmk__bundle_assign: bundle-b-rsc:0 allocation score on bundle-b-0: INFINITY
+pcmk__bundle_assign: bundle-b-rsc:1 allocation score on bundle-b-1: 501
+pcmk__bundle_assign: bundle-b-rsc:1 allocation score on bundle-b-1: INFINITY
+pcmk__bundle_assign: bundle-b-rsc:2 allocation score on bundle-b-2: 501
+pcmk__bundle_assign: bundle-b-rsc:2 allocation score on bundle-b-2: INFINITY
+pcmk__clone_assign: bundle-a-clone allocation score on bundle-a-0: 0
+pcmk__clone_assign: bundle-a-clone allocation score on bundle-a-1: 0
+pcmk__clone_assign: bundle-a-clone allocation score on bundle-a-2: 0
+pcmk__clone_assign: bundle-a-clone allocation score on node1: -INFINITY
+pcmk__clone_assign: bundle-a-clone allocation score on node2: -INFINITY
+pcmk__clone_assign: bundle-a-clone allocation score on node3: -INFINITY
+pcmk__clone_assign: bundle-a-rsc:0 allocation score on bundle-a-0: INFINITY
+pcmk__clone_assign: bundle-a-rsc:1 allocation score on bundle-a-1: INFINITY
+pcmk__clone_assign: bundle-a-rsc:2 allocation score on bundle-a-2: INFINITY
+pcmk__clone_assign: bundle-b-clone allocation score on bundle-b-0: 0
+pcmk__clone_assign: bundle-b-clone allocation score on bundle-b-1: 0
+pcmk__clone_assign: bundle-b-clone allocation score on bundle-b-2: 0
+pcmk__clone_assign: bundle-b-clone allocation score on node1: -INFINITY
+pcmk__clone_assign: bundle-b-clone allocation score on node2: -INFINITY
+pcmk__clone_assign: bundle-b-clone allocation score on node3: -INFINITY
+pcmk__clone_assign: bundle-b-rsc:0 allocation score on bundle-b-0: INFINITY
+pcmk__clone_assign: bundle-b-rsc:1 allocation score on bundle-b-1: INFINITY
+pcmk__clone_assign: bundle-b-rsc:2 allocation score on bundle-b-2: INFINITY
+pcmk__primitive_assign: Fencing allocation score on node1: 0
+pcmk__primitive_assign: Fencing allocation score on node2: 0
+pcmk__primitive_assign: Fencing allocation score on node3: 0
+pcmk__primitive_assign: bundle-a-0 allocation score on node1: 10000
+pcmk__primitive_assign: bundle-a-0 allocation score on node2: 0
+pcmk__primitive_assign: bundle-a-0 allocation score on node3: 0
+pcmk__primitive_assign: bundle-a-1 allocation score on node1: 0
+pcmk__primitive_assign: bundle-a-1 allocation score on node2: 0
+pcmk__primitive_assign: bundle-a-1 allocation score on node3: 10000
+pcmk__primitive_assign: bundle-a-2 allocation score on node1: 0
+pcmk__primitive_assign: bundle-a-2 allocation score on node2: 10000
+pcmk__primitive_assign: bundle-a-2 allocation score on node3: 0
+pcmk__primitive_assign: bundle-a-podman-0 allocation score on node1: 0
+pcmk__primitive_assign: bundle-a-podman-0 allocation score on node2: 0
+pcmk__primitive_assign: bundle-a-podman-0 allocation score on node3: 0
+pcmk__primitive_assign: bundle-a-podman-1 allocation score on node1: -INFINITY
+pcmk__primitive_assign: bundle-a-podman-1 allocation score on node2: 0
+pcmk__primitive_assign: bundle-a-podman-1 allocation score on node3: 0
+pcmk__primitive_assign: bundle-a-podman-2 allocation score on node1: -INFINITY
+pcmk__primitive_assign: bundle-a-podman-2 allocation score on node2: 0
+pcmk__primitive_assign: bundle-a-podman-2 allocation score on node3: -INFINITY
+pcmk__primitive_assign: bundle-a-rsc:0 allocation score on bundle-a-0: INFINITY
+pcmk__primitive_assign: bundle-a-rsc:1 allocation score on bundle-a-1: INFINITY
+pcmk__primitive_assign: bundle-a-rsc:2 allocation score on bundle-a-2: INFINITY
+pcmk__primitive_assign: bundle-b-0 allocation score on node1: 10000
+pcmk__primitive_assign: bundle-b-0 allocation score on node2: 0
+pcmk__primitive_assign: bundle-b-0 allocation score on node3: 0
+pcmk__primitive_assign: bundle-b-1 allocation score on node1: 0
+pcmk__primitive_assign: bundle-b-1 allocation score on node2: 0
+pcmk__primitive_assign: bundle-b-1 allocation score on node3: 10000
+pcmk__primitive_assign: bundle-b-2 allocation score on node1: 0
+pcmk__primitive_assign: bundle-b-2 allocation score on node2: 10000
+pcmk__primitive_assign: bundle-b-2 allocation score on node3: 0
+pcmk__primitive_assign: bundle-b-podman-0 allocation score on node1: 0
+pcmk__primitive_assign: bundle-b-podman-0 allocation score on node2: 0
+pcmk__primitive_assign: bundle-b-podman-0 allocation score on node3: 0
+pcmk__primitive_assign: bundle-b-podman-1 allocation score on node1: -INFINITY
+pcmk__primitive_assign: bundle-b-podman-1 allocation score on node2: 0
+pcmk__primitive_assign: bundle-b-podman-1 allocation score on node3: 0
+pcmk__primitive_assign: bundle-b-podman-2 allocation score on node1: -INFINITY
+pcmk__primitive_assign: bundle-b-podman-2 allocation score on node2: 0
+pcmk__primitive_assign: bundle-b-podman-2 allocation score on node3: -INFINITY
+pcmk__primitive_assign: bundle-b-rsc:0 allocation score on bundle-b-0: INFINITY
+pcmk__primitive_assign: bundle-b-rsc:1 allocation score on bundle-b-1: INFINITY
+pcmk__primitive_assign: bundle-b-rsc:2 allocation score on bundle-b-2: INFINITY
diff --git a/cts/scheduler/scores/bundle-promoted-colocation-6.scores b/cts/scheduler/scores/bundle-promoted-colocation-6.scores
new file mode 100644
index 0000000..f31a870
--- /dev/null
+++ b/cts/scheduler/scores/bundle-promoted-colocation-6.scores
@@ -0,0 +1,160 @@
+
+bundle-a-rsc:0 promotion score on bundle-a-0: 11
+bundle-a-rsc:1 promotion score on bundle-a-1: 13
+bundle-a-rsc:2 promotion score on bundle-a-2: 5012
+bundle-b-rsc:0 promotion score on bundle-b-0: 12
+bundle-b-rsc:1 promotion score on bundle-b-1: 11
+bundle-b-rsc:2 promotion score on bundle-b-2: 13
+pcmk__bundle_assign: bundle-a allocation score on node1: 0
+pcmk__bundle_assign: bundle-a allocation score on node2: 0
+pcmk__bundle_assign: bundle-a allocation score on node3: 0
+pcmk__bundle_assign: bundle-a-0 allocation score on node1: 0
+pcmk__bundle_assign: bundle-a-0 allocation score on node2: 0
+pcmk__bundle_assign: bundle-a-0 allocation score on node3: 0
+pcmk__bundle_assign: bundle-a-1 allocation score on node1: 0
+pcmk__bundle_assign: bundle-a-1 allocation score on node2: 0
+pcmk__bundle_assign: bundle-a-1 allocation score on node3: 0
+pcmk__bundle_assign: bundle-a-2 allocation score on node1: 0
+pcmk__bundle_assign: bundle-a-2 allocation score on node2: 0
+pcmk__bundle_assign: bundle-a-2 allocation score on node3: 0
+pcmk__bundle_assign: bundle-a-clone allocation score on bundle-a-0: -INFINITY
+pcmk__bundle_assign: bundle-a-clone allocation score on bundle-a-1: -INFINITY
+pcmk__bundle_assign: bundle-a-clone allocation score on bundle-a-2: -INFINITY
+pcmk__bundle_assign: bundle-a-clone allocation score on node1: 0
+pcmk__bundle_assign: bundle-a-clone allocation score on node2: 0
+pcmk__bundle_assign: bundle-a-clone allocation score on node3: 0
+pcmk__bundle_assign: bundle-a-podman-0 allocation score on node1: 0
+pcmk__bundle_assign: bundle-a-podman-0 allocation score on node2: 0
+pcmk__bundle_assign: bundle-a-podman-0 allocation score on node3: 0
+pcmk__bundle_assign: bundle-a-podman-1 allocation score on node1: 0
+pcmk__bundle_assign: bundle-a-podman-1 allocation score on node2: 0
+pcmk__bundle_assign: bundle-a-podman-1 allocation score on node3: 0
+pcmk__bundle_assign: bundle-a-podman-2 allocation score on node1: 0
+pcmk__bundle_assign: bundle-a-podman-2 allocation score on node2: 0
+pcmk__bundle_assign: bundle-a-podman-2 allocation score on node3: 0
+pcmk__bundle_assign: bundle-a-rsc:0 allocation score on bundle-a-0: 501
+pcmk__bundle_assign: bundle-a-rsc:1 allocation score on bundle-a-1: 501
+pcmk__bundle_assign: bundle-a-rsc:2 allocation score on bundle-a-2: 501
+pcmk__bundle_assign: bundle-b allocation score on node1: 0
+pcmk__bundle_assign: bundle-b allocation score on node1: 0
+pcmk__bundle_assign: bundle-b allocation score on node2: 0
+pcmk__bundle_assign: bundle-b allocation score on node2: 0
+pcmk__bundle_assign: bundle-b allocation score on node3: 0
+pcmk__bundle_assign: bundle-b allocation score on node3: 0
+pcmk__bundle_assign: bundle-b-0 allocation score on node1: 0
+pcmk__bundle_assign: bundle-b-0 allocation score on node1: 10000
+pcmk__bundle_assign: bundle-b-0 allocation score on node2: 0
+pcmk__bundle_assign: bundle-b-0 allocation score on node2: 0
+pcmk__bundle_assign: bundle-b-0 allocation score on node3: 0
+pcmk__bundle_assign: bundle-b-0 allocation score on node3: 0
+pcmk__bundle_assign: bundle-b-1 allocation score on node1: 0
+pcmk__bundle_assign: bundle-b-1 allocation score on node1: 0
+pcmk__bundle_assign: bundle-b-1 allocation score on node2: 0
+pcmk__bundle_assign: bundle-b-1 allocation score on node2: 0
+pcmk__bundle_assign: bundle-b-1 allocation score on node3: 0
+pcmk__bundle_assign: bundle-b-1 allocation score on node3: 10000
+pcmk__bundle_assign: bundle-b-2 allocation score on node1: 0
+pcmk__bundle_assign: bundle-b-2 allocation score on node1: 0
+pcmk__bundle_assign: bundle-b-2 allocation score on node2: 0
+pcmk__bundle_assign: bundle-b-2 allocation score on node2: 10000
+pcmk__bundle_assign: bundle-b-2 allocation score on node3: 0
+pcmk__bundle_assign: bundle-b-2 allocation score on node3: 0
+pcmk__bundle_assign: bundle-b-clone allocation score on bundle-b-0: -INFINITY
+pcmk__bundle_assign: bundle-b-clone allocation score on bundle-b-0: 12
+pcmk__bundle_assign: bundle-b-clone allocation score on bundle-b-1: -INFINITY
+pcmk__bundle_assign: bundle-b-clone allocation score on bundle-b-1: 11
+pcmk__bundle_assign: bundle-b-clone allocation score on bundle-b-2: -INFINITY
+pcmk__bundle_assign: bundle-b-clone allocation score on bundle-b-2: 13
+pcmk__bundle_assign: bundle-b-clone allocation score on node1: -INFINITY
+pcmk__bundle_assign: bundle-b-clone allocation score on node1: 0
+pcmk__bundle_assign: bundle-b-clone allocation score on node2: -INFINITY
+pcmk__bundle_assign: bundle-b-clone allocation score on node2: 0
+pcmk__bundle_assign: bundle-b-clone allocation score on node3: -INFINITY
+pcmk__bundle_assign: bundle-b-clone allocation score on node3: 0
+pcmk__bundle_assign: bundle-b-podman-0 allocation score on node1: 0
+pcmk__bundle_assign: bundle-b-podman-0 allocation score on node1: 0
+pcmk__bundle_assign: bundle-b-podman-0 allocation score on node2: 0
+pcmk__bundle_assign: bundle-b-podman-0 allocation score on node2: 0
+pcmk__bundle_assign: bundle-b-podman-0 allocation score on node3: 0
+pcmk__bundle_assign: bundle-b-podman-0 allocation score on node3: 0
+pcmk__bundle_assign: bundle-b-podman-1 allocation score on node1: -INFINITY
+pcmk__bundle_assign: bundle-b-podman-1 allocation score on node1: 0
+pcmk__bundle_assign: bundle-b-podman-1 allocation score on node2: 0
+pcmk__bundle_assign: bundle-b-podman-1 allocation score on node2: 0
+pcmk__bundle_assign: bundle-b-podman-1 allocation score on node3: 0
+pcmk__bundle_assign: bundle-b-podman-1 allocation score on node3: 0
+pcmk__bundle_assign: bundle-b-podman-2 allocation score on node1: -INFINITY
+pcmk__bundle_assign: bundle-b-podman-2 allocation score on node1: 0
+pcmk__bundle_assign: bundle-b-podman-2 allocation score on node2: 0
+pcmk__bundle_assign: bundle-b-podman-2 allocation score on node2: 0
+pcmk__bundle_assign: bundle-b-podman-2 allocation score on node3: -INFINITY
+pcmk__bundle_assign: bundle-b-podman-2 allocation score on node3: 0
+pcmk__bundle_assign: bundle-b-rsc:0 allocation score on bundle-b-0: 501
+pcmk__bundle_assign: bundle-b-rsc:0 allocation score on bundle-b-0: INFINITY
+pcmk__bundle_assign: bundle-b-rsc:1 allocation score on bundle-b-1: 501
+pcmk__bundle_assign: bundle-b-rsc:1 allocation score on bundle-b-1: INFINITY
+pcmk__bundle_assign: bundle-b-rsc:2 allocation score on bundle-b-2: 501
+pcmk__bundle_assign: bundle-b-rsc:2 allocation score on bundle-b-2: INFINITY
+pcmk__clone_assign: bundle-a-clone allocation score on bundle-a-0: 0
+pcmk__clone_assign: bundle-a-clone allocation score on bundle-a-1: 0
+pcmk__clone_assign: bundle-a-clone allocation score on bundle-a-2: 0
+pcmk__clone_assign: bundle-a-clone allocation score on node1: -INFINITY
+pcmk__clone_assign: bundle-a-clone allocation score on node2: -INFINITY
+pcmk__clone_assign: bundle-a-clone allocation score on node3: -INFINITY
+pcmk__clone_assign: bundle-a-rsc:0 allocation score on bundle-a-0: INFINITY
+pcmk__clone_assign: bundle-a-rsc:1 allocation score on bundle-a-1: INFINITY
+pcmk__clone_assign: bundle-a-rsc:2 allocation score on bundle-a-2: INFINITY
+pcmk__clone_assign: bundle-b-clone allocation score on bundle-b-0: 0
+pcmk__clone_assign: bundle-b-clone allocation score on bundle-b-1: 0
+pcmk__clone_assign: bundle-b-clone allocation score on bundle-b-2: 0
+pcmk__clone_assign: bundle-b-clone allocation score on node1: -INFINITY
+pcmk__clone_assign: bundle-b-clone allocation score on node2: -INFINITY
+pcmk__clone_assign: bundle-b-clone allocation score on node3: -INFINITY
+pcmk__clone_assign: bundle-b-rsc:0 allocation score on bundle-b-0: INFINITY
+pcmk__clone_assign: bundle-b-rsc:1 allocation score on bundle-b-1: INFINITY
+pcmk__clone_assign: bundle-b-rsc:2 allocation score on bundle-b-2: INFINITY
+pcmk__primitive_assign: Fencing allocation score on node1: 0
+pcmk__primitive_assign: Fencing allocation score on node2: 0
+pcmk__primitive_assign: Fencing allocation score on node3: 0
+pcmk__primitive_assign: bundle-a-0 allocation score on node1: 10000
+pcmk__primitive_assign: bundle-a-0 allocation score on node2: 0
+pcmk__primitive_assign: bundle-a-0 allocation score on node3: 0
+pcmk__primitive_assign: bundle-a-1 allocation score on node1: 0
+pcmk__primitive_assign: bundle-a-1 allocation score on node2: 0
+pcmk__primitive_assign: bundle-a-1 allocation score on node3: 10000
+pcmk__primitive_assign: bundle-a-2 allocation score on node1: 0
+pcmk__primitive_assign: bundle-a-2 allocation score on node2: 10000
+pcmk__primitive_assign: bundle-a-2 allocation score on node3: 0
+pcmk__primitive_assign: bundle-a-podman-0 allocation score on node1: 0
+pcmk__primitive_assign: bundle-a-podman-0 allocation score on node2: 0
+pcmk__primitive_assign: bundle-a-podman-0 allocation score on node3: 0
+pcmk__primitive_assign: bundle-a-podman-1 allocation score on node1: -INFINITY
+pcmk__primitive_assign: bundle-a-podman-1 allocation score on node2: 0
+pcmk__primitive_assign: bundle-a-podman-1 allocation score on node3: 0
+pcmk__primitive_assign: bundle-a-podman-2 allocation score on node1: -INFINITY
+pcmk__primitive_assign: bundle-a-podman-2 allocation score on node2: 0
+pcmk__primitive_assign: bundle-a-podman-2 allocation score on node3: -INFINITY
+pcmk__primitive_assign: bundle-a-rsc:0 allocation score on bundle-a-0: INFINITY
+pcmk__primitive_assign: bundle-a-rsc:1 allocation score on bundle-a-1: INFINITY
+pcmk__primitive_assign: bundle-a-rsc:2 allocation score on bundle-a-2: INFINITY
+pcmk__primitive_assign: bundle-b-0 allocation score on node1: 10000
+pcmk__primitive_assign: bundle-b-0 allocation score on node2: 0
+pcmk__primitive_assign: bundle-b-0 allocation score on node3: 0
+pcmk__primitive_assign: bundle-b-1 allocation score on node1: 0
+pcmk__primitive_assign: bundle-b-1 allocation score on node2: 0
+pcmk__primitive_assign: bundle-b-1 allocation score on node3: 10000
+pcmk__primitive_assign: bundle-b-2 allocation score on node1: 0
+pcmk__primitive_assign: bundle-b-2 allocation score on node2: 10000
+pcmk__primitive_assign: bundle-b-2 allocation score on node3: 0
+pcmk__primitive_assign: bundle-b-podman-0 allocation score on node1: 0
+pcmk__primitive_assign: bundle-b-podman-0 allocation score on node2: 0
+pcmk__primitive_assign: bundle-b-podman-0 allocation score on node3: 0
+pcmk__primitive_assign: bundle-b-podman-1 allocation score on node1: -INFINITY
+pcmk__primitive_assign: bundle-b-podman-1 allocation score on node2: 0
+pcmk__primitive_assign: bundle-b-podman-1 allocation score on node3: 0
+pcmk__primitive_assign: bundle-b-podman-2 allocation score on node1: -INFINITY
+pcmk__primitive_assign: bundle-b-podman-2 allocation score on node2: 0
+pcmk__primitive_assign: bundle-b-podman-2 allocation score on node3: -INFINITY
+pcmk__primitive_assign: bundle-b-rsc:0 allocation score on bundle-b-0: INFINITY
+pcmk__primitive_assign: bundle-b-rsc:1 allocation score on bundle-b-1: INFINITY
+pcmk__primitive_assign: bundle-b-rsc:2 allocation score on bundle-b-2: INFINITY
diff --git a/cts/scheduler/scores/bundle-promoted-location-1.scores b/cts/scheduler/scores/bundle-promoted-location-1.scores
new file mode 100644
index 0000000..6bf9a23
--- /dev/null
+++ b/cts/scheduler/scores/bundle-promoted-location-1.scores
@@ -0,0 +1,70 @@
+
+base:0 promotion score on base-bundle-0: 10
+base:1 promotion score on base-bundle-1: 5
+base:2 promotion score on base-bundle-2: 5
+pcmk__bundle_assign: base-bundle allocation score on node1: 0
+pcmk__bundle_assign: base-bundle allocation score on node2: 5000
+pcmk__bundle_assign: base-bundle allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-0 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-0 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-0 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-0: -INFINITY
+pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-1: -INFINITY
+pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-2: -INFINITY
+pcmk__bundle_assign: base-bundle-clone allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node2: 5000
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node2: 5000
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node2: 5000
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node3: 0
+pcmk__bundle_assign: base:0 allocation score on base-bundle-0: 501
+pcmk__bundle_assign: base:1 allocation score on base-bundle-1: 501
+pcmk__bundle_assign: base:2 allocation score on base-bundle-2: 501
+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-0: 0
+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-1: 0
+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-2: 0
+pcmk__clone_assign: base-bundle-clone allocation score on node1: -INFINITY
+pcmk__clone_assign: base-bundle-clone allocation score on node2: -INFINITY
+pcmk__clone_assign: base-bundle-clone allocation score on node3: -INFINITY
+pcmk__clone_assign: base:0 allocation score on base-bundle-0: INFINITY
+pcmk__clone_assign: base:1 allocation score on base-bundle-1: INFINITY
+pcmk__clone_assign: base:2 allocation score on base-bundle-2: INFINITY
+pcmk__primitive_assign: Fencing allocation score on node1: 0
+pcmk__primitive_assign: Fencing allocation score on node2: 0
+pcmk__primitive_assign: Fencing allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-0 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-0 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-0 allocation score on node3: 10000
+pcmk__primitive_assign: base-bundle-1 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-1 allocation score on node2: 10000
+pcmk__primitive_assign: base-bundle-1 allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-2 allocation score on node1: 10000
+pcmk__primitive_assign: base-bundle-2 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-2 allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node2: -INFINITY
+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node2: 5000
+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node2: 5000
+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node3: -INFINITY
+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node2: -INFINITY
+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node3: -INFINITY
+pcmk__primitive_assign: base:0 allocation score on base-bundle-0: INFINITY
+pcmk__primitive_assign: base:1 allocation score on base-bundle-1: INFINITY
+pcmk__primitive_assign: base:2 allocation score on base-bundle-2: INFINITY
diff --git a/cts/scheduler/scores/bundle-promoted-location-2.scores b/cts/scheduler/scores/bundle-promoted-location-2.scores
new file mode 100644
index 0000000..468a131
--- /dev/null
+++ b/cts/scheduler/scores/bundle-promoted-location-2.scores
@@ -0,0 +1,67 @@
+
+base:0 promotion score on base-bundle-0: -1
+base:1 promotion score on base-bundle-1: 5
+base:2 promotion score on base-bundle-2: 5
+pcmk__bundle_assign: base-bundle allocation score on node1: 0
+pcmk__bundle_assign: base-bundle allocation score on node2: 0
+pcmk__bundle_assign: base-bundle allocation score on node3: -INFINITY
+pcmk__bundle_assign: base-bundle-0 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-0 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-0 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-0: -INFINITY
+pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-1: -INFINITY
+pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-2: -INFINITY
+pcmk__bundle_assign: base-bundle-clone allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node3: -INFINITY
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node3: -INFINITY
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node3: -INFINITY
+pcmk__bundle_assign: base:0 allocation score on base-bundle-0: 501
+pcmk__bundle_assign: base:1 allocation score on base-bundle-1: 501
+pcmk__bundle_assign: base:2 allocation score on base-bundle-2: 501
+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-0: 0
+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-1: 0
+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-2: 0
+pcmk__clone_assign: base-bundle-clone allocation score on node1: -INFINITY
+pcmk__clone_assign: base-bundle-clone allocation score on node2: -INFINITY
+pcmk__clone_assign: base-bundle-clone allocation score on node3: -INFINITY
+pcmk__clone_assign: base:0 allocation score on base-bundle-0: INFINITY
+pcmk__clone_assign: base:1 allocation score on base-bundle-1: INFINITY
+pcmk__clone_assign: base:2 allocation score on base-bundle-2: INFINITY
+pcmk__primitive_assign: Fencing allocation score on node1: 0
+pcmk__primitive_assign: Fencing allocation score on node2: 0
+pcmk__primitive_assign: Fencing allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-0 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-0 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-0 allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-1 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-1 allocation score on node2: 10000
+pcmk__primitive_assign: base-bundle-1 allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-2 allocation score on node1: 10000
+pcmk__primitive_assign: base-bundle-2 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-2 allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node1: -INFINITY
+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node2: -INFINITY
+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node3: -INFINITY
+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node3: -INFINITY
+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node2: -INFINITY
+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node3: -INFINITY
+pcmk__primitive_assign: base:0 allocation score on base-bundle-0: INFINITY
+pcmk__primitive_assign: base:1 allocation score on base-bundle-1: INFINITY
+pcmk__primitive_assign: base:2 allocation score on base-bundle-2: INFINITY
diff --git a/cts/scheduler/scores/bundle-promoted-location-3.scores b/cts/scheduler/scores/bundle-promoted-location-3.scores
new file mode 100644
index 0000000..fa937e9
--- /dev/null
+++ b/cts/scheduler/scores/bundle-promoted-location-3.scores
@@ -0,0 +1,67 @@
+
+base:0 promotion score on base-bundle-0: 10
+base:1 promotion score on base-bundle-1: 5
+base:2 promotion score on base-bundle-2: 5
+pcmk__bundle_assign: base-bundle allocation score on node1: 0
+pcmk__bundle_assign: base-bundle allocation score on node2: 0
+pcmk__bundle_assign: base-bundle allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-0 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-0 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-0 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-0: -INFINITY
+pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-1: -INFINITY
+pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-2: -INFINITY
+pcmk__bundle_assign: base-bundle-clone allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node3: 0
+pcmk__bundle_assign: base:0 allocation score on base-bundle-0: 501
+pcmk__bundle_assign: base:1 allocation score on base-bundle-1: 501
+pcmk__bundle_assign: base:2 allocation score on base-bundle-2: 501
+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-0: 0
+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-1: 0
+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-2: 0
+pcmk__clone_assign: base-bundle-clone allocation score on node1: -INFINITY
+pcmk__clone_assign: base-bundle-clone allocation score on node2: -INFINITY
+pcmk__clone_assign: base-bundle-clone allocation score on node3: -INFINITY
+pcmk__clone_assign: base:0 allocation score on base-bundle-0: INFINITY
+pcmk__clone_assign: base:1 allocation score on base-bundle-1: INFINITY
+pcmk__clone_assign: base:2 allocation score on base-bundle-2: INFINITY
+pcmk__primitive_assign: Fencing allocation score on node1: 0
+pcmk__primitive_assign: Fencing allocation score on node2: 0
+pcmk__primitive_assign: Fencing allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-0 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-0 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-0 allocation score on node3: 10000
+pcmk__primitive_assign: base-bundle-1 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-1 allocation score on node2: 10000
+pcmk__primitive_assign: base-bundle-1 allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-2 allocation score on node1: 10000
+pcmk__primitive_assign: base-bundle-2 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-2 allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node3: -INFINITY
+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node2: -INFINITY
+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node3: -INFINITY
+pcmk__primitive_assign: base:0 allocation score on base-bundle-0: INFINITY
+pcmk__primitive_assign: base:1 allocation score on base-bundle-1: INFINITY
+pcmk__primitive_assign: base:2 allocation score on base-bundle-2: INFINITY
diff --git a/cts/scheduler/scores/bundle-promoted-location-4.scores b/cts/scheduler/scores/bundle-promoted-location-4.scores
new file mode 100644
index 0000000..fa937e9
--- /dev/null
+++ b/cts/scheduler/scores/bundle-promoted-location-4.scores
@@ -0,0 +1,67 @@
+
+base:0 promotion score on base-bundle-0: 10
+base:1 promotion score on base-bundle-1: 5
+base:2 promotion score on base-bundle-2: 5
+pcmk__bundle_assign: base-bundle allocation score on node1: 0
+pcmk__bundle_assign: base-bundle allocation score on node2: 0
+pcmk__bundle_assign: base-bundle allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-0 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-0 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-0 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-0: -INFINITY
+pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-1: -INFINITY
+pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-2: -INFINITY
+pcmk__bundle_assign: base-bundle-clone allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node3: 0
+pcmk__bundle_assign: base:0 allocation score on base-bundle-0: 501
+pcmk__bundle_assign: base:1 allocation score on base-bundle-1: 501
+pcmk__bundle_assign: base:2 allocation score on base-bundle-2: 501
+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-0: 0
+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-1: 0
+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-2: 0
+pcmk__clone_assign: base-bundle-clone allocation score on node1: -INFINITY
+pcmk__clone_assign: base-bundle-clone allocation score on node2: -INFINITY
+pcmk__clone_assign: base-bundle-clone allocation score on node3: -INFINITY
+pcmk__clone_assign: base:0 allocation score on base-bundle-0: INFINITY
+pcmk__clone_assign: base:1 allocation score on base-bundle-1: INFINITY
+pcmk__clone_assign: base:2 allocation score on base-bundle-2: INFINITY
+pcmk__primitive_assign: Fencing allocation score on node1: 0
+pcmk__primitive_assign: Fencing allocation score on node2: 0
+pcmk__primitive_assign: Fencing allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-0 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-0 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-0 allocation score on node3: 10000
+pcmk__primitive_assign: base-bundle-1 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-1 allocation score on node2: 10000
+pcmk__primitive_assign: base-bundle-1 allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-2 allocation score on node1: 10000
+pcmk__primitive_assign: base-bundle-2 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-2 allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node3: -INFINITY
+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node2: -INFINITY
+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node3: -INFINITY
+pcmk__primitive_assign: base:0 allocation score on base-bundle-0: INFINITY
+pcmk__primitive_assign: base:1 allocation score on base-bundle-1: INFINITY
+pcmk__primitive_assign: base:2 allocation score on base-bundle-2: INFINITY
diff --git a/cts/scheduler/scores/bundle-promoted-location-5.scores b/cts/scheduler/scores/bundle-promoted-location-5.scores
new file mode 100644
index 0000000..eccb072
--- /dev/null
+++ b/cts/scheduler/scores/bundle-promoted-location-5.scores
@@ -0,0 +1,67 @@
+
+base:0 promotion score on base-bundle-0: 10
+base:1 promotion score on base-bundle-1: 5
+base:2 promotion score on base-bundle-2: 5
+pcmk__bundle_assign: base-bundle allocation score on node1: 0
+pcmk__bundle_assign: base-bundle allocation score on node2: 0
+pcmk__bundle_assign: base-bundle allocation score on node3: 5000
+pcmk__bundle_assign: base-bundle-0 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-0 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-0 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-0: -INFINITY
+pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-1: -INFINITY
+pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-2: -INFINITY
+pcmk__bundle_assign: base-bundle-clone allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node3: 5000
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node3: 5000
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node3: 5000
+pcmk__bundle_assign: base:0 allocation score on base-bundle-0: 501
+pcmk__bundle_assign: base:1 allocation score on base-bundle-1: 501
+pcmk__bundle_assign: base:2 allocation score on base-bundle-2: 501
+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-0: 0
+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-1: 0
+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-2: 0
+pcmk__clone_assign: base-bundle-clone allocation score on node1: -INFINITY
+pcmk__clone_assign: base-bundle-clone allocation score on node2: -INFINITY
+pcmk__clone_assign: base-bundle-clone allocation score on node3: -INFINITY
+pcmk__clone_assign: base:0 allocation score on base-bundle-0: INFINITY
+pcmk__clone_assign: base:1 allocation score on base-bundle-1: INFINITY
+pcmk__clone_assign: base:2 allocation score on base-bundle-2: INFINITY
+pcmk__primitive_assign: Fencing allocation score on node1: 0
+pcmk__primitive_assign: Fencing allocation score on node2: 0
+pcmk__primitive_assign: Fencing allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-0 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-0 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-0 allocation score on node3: 10000
+pcmk__primitive_assign: base-bundle-1 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-1 allocation score on node2: 10000
+pcmk__primitive_assign: base-bundle-1 allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-2 allocation score on node1: 10000
+pcmk__primitive_assign: base-bundle-2 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-2 allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node3: 5000
+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node3: -INFINITY
+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node2: -INFINITY
+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node3: -INFINITY
+pcmk__primitive_assign: base:0 allocation score on base-bundle-0: INFINITY
+pcmk__primitive_assign: base:1 allocation score on base-bundle-1: INFINITY
+pcmk__primitive_assign: base:2 allocation score on base-bundle-2: INFINITY
diff --git a/cts/scheduler/scores/bundle-promoted-location-6.scores b/cts/scheduler/scores/bundle-promoted-location-6.scores
new file mode 100644
index 0000000..0eb1d51
--- /dev/null
+++ b/cts/scheduler/scores/bundle-promoted-location-6.scores
@@ -0,0 +1,67 @@
+
+base:0 promotion score on base-bundle-0: 10
+base:1 promotion score on base-bundle-1: -1
+base:2 promotion score on base-bundle-2: 5
+pcmk__bundle_assign: base-bundle allocation score on node1: 0
+pcmk__bundle_assign: base-bundle allocation score on node2: -INFINITY
+pcmk__bundle_assign: base-bundle allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-0 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-0 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-0 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-0: -INFINITY
+pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-1: -INFINITY
+pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-2: -INFINITY
+pcmk__bundle_assign: base-bundle-clone allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node2: -INFINITY
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node2: -INFINITY
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node2: -INFINITY
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node3: 0
+pcmk__bundle_assign: base:0 allocation score on base-bundle-0: 501
+pcmk__bundle_assign: base:1 allocation score on base-bundle-1: 501
+pcmk__bundle_assign: base:2 allocation score on base-bundle-2: 501
+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-0: 0
+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-1: 0
+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-2: 0
+pcmk__clone_assign: base-bundle-clone allocation score on node1: -INFINITY
+pcmk__clone_assign: base-bundle-clone allocation score on node2: -INFINITY
+pcmk__clone_assign: base-bundle-clone allocation score on node3: -INFINITY
+pcmk__clone_assign: base:0 allocation score on base-bundle-0: INFINITY
+pcmk__clone_assign: base:1 allocation score on base-bundle-1: INFINITY
+pcmk__clone_assign: base:2 allocation score on base-bundle-2: INFINITY
+pcmk__primitive_assign: Fencing allocation score on node1: 0
+pcmk__primitive_assign: Fencing allocation score on node2: 0
+pcmk__primitive_assign: Fencing allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-0 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-0 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-0 allocation score on node3: 10000
+pcmk__primitive_assign: base-bundle-1 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-1 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-1 allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-2 allocation score on node1: 10000
+pcmk__primitive_assign: base-bundle-2 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-2 allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node2: -INFINITY
+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node1: -INFINITY
+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node2: -INFINITY
+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node3: -INFINITY
+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node2: -INFINITY
+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node3: -INFINITY
+pcmk__primitive_assign: base:0 allocation score on base-bundle-0: INFINITY
+pcmk__primitive_assign: base:1 allocation score on base-bundle-1: INFINITY
+pcmk__primitive_assign: base:2 allocation score on base-bundle-2: INFINITY
diff --git a/cts/scheduler/scores/bundle-replicas-change.scores b/cts/scheduler/scores/bundle-replicas-change.scores
index ade2466..01b1d5b 100644
--- a/cts/scheduler/scores/bundle-replicas-change.scores
+++ b/cts/scheduler/scores/bundle-replicas-change.scores
@@ -1,21 +1,21 @@
-pcmk__bundle_allocate: httpd-bundle allocation score on rh74-test: 0
-pcmk__bundle_allocate: httpd-bundle-0 allocation score on rh74-test: INFINITY
-pcmk__bundle_allocate: httpd-bundle-1 allocation score on rh74-test: 0
-pcmk__bundle_allocate: httpd-bundle-2 allocation score on rh74-test: 0
-pcmk__bundle_allocate: httpd-bundle-clone allocation score on httpd-bundle-0: -INFINITY
-pcmk__bundle_allocate: httpd-bundle-clone allocation score on httpd-bundle-1: -INFINITY
-pcmk__bundle_allocate: httpd-bundle-clone allocation score on httpd-bundle-2: -INFINITY
-pcmk__bundle_allocate: httpd-bundle-clone allocation score on rh74-test: 0
-pcmk__bundle_allocate: httpd-bundle-docker-0 allocation score on rh74-test: INFINITY
-pcmk__bundle_allocate: httpd-bundle-docker-1 allocation score on rh74-test: 0
-pcmk__bundle_allocate: httpd-bundle-docker-2 allocation score on rh74-test: 0
-pcmk__bundle_allocate: httpd-bundle-ip-192.168.20.188 allocation score on rh74-test: INFINITY
-pcmk__bundle_allocate: httpd-bundle-ip-192.168.20.189 allocation score on rh74-test: 0
-pcmk__bundle_allocate: httpd-bundle-ip-192.168.20.190 allocation score on rh74-test: 0
-pcmk__bundle_allocate: httpd:0 allocation score on httpd-bundle-0: 500
-pcmk__bundle_allocate: httpd:1 allocation score on httpd-bundle-1: 500
-pcmk__bundle_allocate: httpd:2 allocation score on httpd-bundle-2: 500
+pcmk__bundle_assign: httpd-bundle allocation score on rh74-test: 0
+pcmk__bundle_assign: httpd-bundle-0 allocation score on rh74-test: INFINITY
+pcmk__bundle_assign: httpd-bundle-1 allocation score on rh74-test: 0
+pcmk__bundle_assign: httpd-bundle-2 allocation score on rh74-test: 0
+pcmk__bundle_assign: httpd-bundle-clone allocation score on httpd-bundle-0: -INFINITY
+pcmk__bundle_assign: httpd-bundle-clone allocation score on httpd-bundle-1: -INFINITY
+pcmk__bundle_assign: httpd-bundle-clone allocation score on httpd-bundle-2: -INFINITY
+pcmk__bundle_assign: httpd-bundle-clone allocation score on rh74-test: 0
+pcmk__bundle_assign: httpd-bundle-docker-0 allocation score on rh74-test: INFINITY
+pcmk__bundle_assign: httpd-bundle-docker-1 allocation score on rh74-test: 0
+pcmk__bundle_assign: httpd-bundle-docker-2 allocation score on rh74-test: 0
+pcmk__bundle_assign: httpd-bundle-ip-192.168.20.188 allocation score on rh74-test: INFINITY
+pcmk__bundle_assign: httpd-bundle-ip-192.168.20.189 allocation score on rh74-test: 0
+pcmk__bundle_assign: httpd-bundle-ip-192.168.20.190 allocation score on rh74-test: 0
+pcmk__bundle_assign: httpd:0 allocation score on httpd-bundle-0: 500
+pcmk__bundle_assign: httpd:1 allocation score on httpd-bundle-1: 500
+pcmk__bundle_assign: httpd:2 allocation score on httpd-bundle-2: 500
pcmk__clone_assign: httpd-bundle-clone allocation score on httpd-bundle-0: 0
pcmk__clone_assign: httpd-bundle-clone allocation score on httpd-bundle-1: 0
pcmk__clone_assign: httpd-bundle-clone allocation score on httpd-bundle-2: 0
diff --git a/cts/scheduler/scores/cancel-behind-moving-remote.scores b/cts/scheduler/scores/cancel-behind-moving-remote.scores
index 0dfd78c..e79d28c 100644
--- a/cts/scheduler/scores/cancel-behind-moving-remote.scores
+++ b/cts/scheduler/scores/cancel-behind-moving-remote.scores
@@ -2,495 +2,495 @@
galera:0 promotion score on galera-bundle-0: 100
galera:1 promotion score on galera-bundle-1: 100
galera:2 promotion score on galera-bundle-2: 100
-ovndb_servers:0 promotion score on ovn-dbs-bundle-0: -1
-ovndb_servers:1 promotion score on ovn-dbs-bundle-1: 5
+ovndb_servers:0 promotion score on ovn-dbs-bundle-0: 5
+ovndb_servers:1 promotion score on ovn-dbs-bundle-1: 1
ovndb_servers:2 promotion score on ovn-dbs-bundle-2: 5
-pcmk__bundle_allocate: galera-bundle allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle allocation score on controller-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle allocation score on controller-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle allocation score on controller-2: -INFINITY
-pcmk__bundle_allocate: galera-bundle allocation score on database-0: 0
-pcmk__bundle_allocate: galera-bundle allocation score on database-1: 0
-pcmk__bundle_allocate: galera-bundle allocation score on database-2: 0
-pcmk__bundle_allocate: galera-bundle allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: galera-bundle-0 allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-0 allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-0: 0
-pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: galera-bundle-0 allocation score on database-0: 0
-pcmk__bundle_allocate: galera-bundle-0 allocation score on database-1: 0
-pcmk__bundle_allocate: galera-bundle-0 allocation score on database-2: 0
-pcmk__bundle_allocate: galera-bundle-0 allocation score on messaging-0: 0
-pcmk__bundle_allocate: galera-bundle-0 allocation score on messaging-1: 0
-pcmk__bundle_allocate: galera-bundle-0 allocation score on messaging-2: 0
-pcmk__bundle_allocate: galera-bundle-1 allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-1 allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-1: 0
-pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-2: 0
-pcmk__bundle_allocate: galera-bundle-1 allocation score on database-0: 0
-pcmk__bundle_allocate: galera-bundle-1 allocation score on database-1: 0
-pcmk__bundle_allocate: galera-bundle-1 allocation score on database-2: 0
-pcmk__bundle_allocate: galera-bundle-1 allocation score on messaging-0: 0
-pcmk__bundle_allocate: galera-bundle-1 allocation score on messaging-1: 0
-pcmk__bundle_allocate: galera-bundle-1 allocation score on messaging-2: 0
-pcmk__bundle_allocate: galera-bundle-2 allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-2 allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-2: 0
-pcmk__bundle_allocate: galera-bundle-2 allocation score on database-0: 0
-pcmk__bundle_allocate: galera-bundle-2 allocation score on database-1: 0
-pcmk__bundle_allocate: galera-bundle-2 allocation score on database-2: 0
-pcmk__bundle_allocate: galera-bundle-2 allocation score on messaging-0: 0
-pcmk__bundle_allocate: galera-bundle-2 allocation score on messaging-1: 0
-pcmk__bundle_allocate: galera-bundle-2 allocation score on messaging-2: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on compute-0: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on compute-1: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on controller-0: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on controller-1: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on controller-2: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on database-0: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on database-1: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on database-2: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-2: -INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on messaging-0: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on messaging-1: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on messaging-2: 0
-pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on controller-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on controller-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on controller-2: -INFINITY
-pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on database-0: 0
-pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on database-1: 0
-pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on database-2: 0
-pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on controller-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on controller-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on controller-2: -INFINITY
-pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on database-0: 0
-pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on database-1: 0
-pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on database-2: 0
-pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on controller-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on controller-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on controller-2: -INFINITY
-pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on database-0: 0
-pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on database-1: 0
-pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on database-2: 0
-pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: galera:0 allocation score on galera-bundle-0: 501
-pcmk__bundle_allocate: galera:1 allocation score on galera-bundle-1: 501
-pcmk__bundle_allocate: galera:2 allocation score on galera-bundle-2: 501
-pcmk__bundle_allocate: haproxy-bundle allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on controller-0: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on controller-0: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on controller-1: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on controller-1: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on controller-2: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on controller-2: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-0: 0
-pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-0: 0
-pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-1: INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-2: INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-1: 0
-pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-2: 0
-pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-1: INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-2: 0
-pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: openstack-cinder-volume allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: openstack-cinder-volume allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: openstack-cinder-volume allocation score on controller-0: 0
-pcmk__bundle_allocate: openstack-cinder-volume allocation score on controller-1: 0
-pcmk__bundle_allocate: openstack-cinder-volume allocation score on controller-2: 0
-pcmk__bundle_allocate: openstack-cinder-volume allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: openstack-cinder-volume allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: openstack-cinder-volume allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: openstack-cinder-volume allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: openstack-cinder-volume allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: openstack-cinder-volume allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on controller-0: 0
-pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle allocation score on controller-0: 0
-pcmk__bundle_allocate: ovn-dbs-bundle allocation score on controller-1: 0
-pcmk__bundle_allocate: ovn-dbs-bundle allocation score on controller-2: 0
-pcmk__bundle_allocate: ovn-dbs-bundle allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on controller-0: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on database-0: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on database-1: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on database-2: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on messaging-0: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on messaging-1: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on messaging-2: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on controller-1: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on controller-2: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on database-0: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on database-1: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on database-2: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on messaging-0: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on messaging-1: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on messaging-2: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on controller-2: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on database-0: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on database-1: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on database-2: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on messaging-0: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on messaging-1: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on messaging-2: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on compute-0: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on compute-1: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on controller-0: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on controller-1: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on controller-2: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on database-0: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on database-1: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on database-2: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on messaging-0: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on messaging-1: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on messaging-2: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-0: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-1: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-2: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on controller-0: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on controller-1: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on controller-2: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on controller-2: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: ovndb_servers:0 allocation score on ovn-dbs-bundle-0: 500
-pcmk__bundle_allocate: ovndb_servers:1 allocation score on ovn-dbs-bundle-1: 501
-pcmk__bundle_allocate: ovndb_servers:2 allocation score on ovn-dbs-bundle-2: 501
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on messaging-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on messaging-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on messaging-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on database-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on database-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on database-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on messaging-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on messaging-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on messaging-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on database-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on database-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on database-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on messaging-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on messaging-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on messaging-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on database-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on database-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on database-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on messaging-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on messaging-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on messaging-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on compute-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on compute-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on database-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on database-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on database-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on messaging-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on messaging-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on messaging-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on controller-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on controller-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on controller-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on messaging-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on messaging-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on messaging-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on controller-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on controller-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on controller-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on messaging-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on messaging-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on messaging-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on controller-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on controller-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on controller-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on messaging-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on messaging-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on messaging-2: 0
-pcmk__bundle_allocate: rabbitmq:0 allocation score on rabbitmq-bundle-0: 501
-pcmk__bundle_allocate: rabbitmq:1 allocation score on rabbitmq-bundle-1: 500
-pcmk__bundle_allocate: rabbitmq:2 allocation score on rabbitmq-bundle-2: 501
-pcmk__bundle_allocate: redis-bundle allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle allocation score on controller-2: 0
-pcmk__bundle_allocate: redis-bundle allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: redis-bundle allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: redis-bundle-0 allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-0 allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: redis-bundle-0 allocation score on database-0: 0
-pcmk__bundle_allocate: redis-bundle-0 allocation score on database-1: 0
-pcmk__bundle_allocate: redis-bundle-0 allocation score on database-2: 0
-pcmk__bundle_allocate: redis-bundle-0 allocation score on messaging-0: 0
-pcmk__bundle_allocate: redis-bundle-0 allocation score on messaging-1: 0
-pcmk__bundle_allocate: redis-bundle-0 allocation score on messaging-2: 0
-pcmk__bundle_allocate: redis-bundle-1 allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-1 allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-2: 0
-pcmk__bundle_allocate: redis-bundle-1 allocation score on database-0: 0
-pcmk__bundle_allocate: redis-bundle-1 allocation score on database-1: 0
-pcmk__bundle_allocate: redis-bundle-1 allocation score on database-2: 0
-pcmk__bundle_allocate: redis-bundle-1 allocation score on messaging-0: 0
-pcmk__bundle_allocate: redis-bundle-1 allocation score on messaging-1: 0
-pcmk__bundle_allocate: redis-bundle-1 allocation score on messaging-2: 0
-pcmk__bundle_allocate: redis-bundle-2 allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-2 allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-2: 0
-pcmk__bundle_allocate: redis-bundle-2 allocation score on database-0: 0
-pcmk__bundle_allocate: redis-bundle-2 allocation score on database-1: 0
-pcmk__bundle_allocate: redis-bundle-2 allocation score on database-2: 0
-pcmk__bundle_allocate: redis-bundle-2 allocation score on messaging-0: 0
-pcmk__bundle_allocate: redis-bundle-2 allocation score on messaging-1: 0
-pcmk__bundle_allocate: redis-bundle-2 allocation score on messaging-2: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on compute-0: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on compute-1: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on controller-2: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on database-0: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on database-1: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on database-2: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on messaging-0: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on messaging-1: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on messaging-2: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-2: -INFINITY
-pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on controller-2: 0
-pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on controller-2: 0
-pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: redis:0 allocation score on redis-bundle-0: 501
-pcmk__bundle_allocate: redis:1 allocation score on redis-bundle-1: 501
-pcmk__bundle_allocate: redis:2 allocation score on redis-bundle-2: 501
+pcmk__bundle_assign: galera-bundle allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: galera-bundle allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: galera-bundle allocation score on controller-0: -INFINITY
+pcmk__bundle_assign: galera-bundle allocation score on controller-1: -INFINITY
+pcmk__bundle_assign: galera-bundle allocation score on controller-2: -INFINITY
+pcmk__bundle_assign: galera-bundle allocation score on database-0: 0
+pcmk__bundle_assign: galera-bundle allocation score on database-1: 0
+pcmk__bundle_assign: galera-bundle allocation score on database-2: 0
+pcmk__bundle_assign: galera-bundle allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: galera-bundle allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: galera-bundle allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: galera-bundle-0 allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-0 allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-0 allocation score on controller-0: 0
+pcmk__bundle_assign: galera-bundle-0 allocation score on controller-1: 0
+pcmk__bundle_assign: galera-bundle-0 allocation score on controller-2: 0
+pcmk__bundle_assign: galera-bundle-0 allocation score on database-0: 0
+pcmk__bundle_assign: galera-bundle-0 allocation score on database-1: 0
+pcmk__bundle_assign: galera-bundle-0 allocation score on database-2: 0
+pcmk__bundle_assign: galera-bundle-0 allocation score on messaging-0: 0
+pcmk__bundle_assign: galera-bundle-0 allocation score on messaging-1: 0
+pcmk__bundle_assign: galera-bundle-0 allocation score on messaging-2: 0
+pcmk__bundle_assign: galera-bundle-1 allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-1 allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-1 allocation score on controller-0: 0
+pcmk__bundle_assign: galera-bundle-1 allocation score on controller-1: 0
+pcmk__bundle_assign: galera-bundle-1 allocation score on controller-2: 0
+pcmk__bundle_assign: galera-bundle-1 allocation score on database-0: 0
+pcmk__bundle_assign: galera-bundle-1 allocation score on database-1: 0
+pcmk__bundle_assign: galera-bundle-1 allocation score on database-2: 0
+pcmk__bundle_assign: galera-bundle-1 allocation score on messaging-0: 0
+pcmk__bundle_assign: galera-bundle-1 allocation score on messaging-1: 0
+pcmk__bundle_assign: galera-bundle-1 allocation score on messaging-2: 0
+pcmk__bundle_assign: galera-bundle-2 allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-2 allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-2 allocation score on controller-0: 0
+pcmk__bundle_assign: galera-bundle-2 allocation score on controller-1: 0
+pcmk__bundle_assign: galera-bundle-2 allocation score on controller-2: 0
+pcmk__bundle_assign: galera-bundle-2 allocation score on database-0: 0
+pcmk__bundle_assign: galera-bundle-2 allocation score on database-1: 0
+pcmk__bundle_assign: galera-bundle-2 allocation score on database-2: 0
+pcmk__bundle_assign: galera-bundle-2 allocation score on messaging-0: 0
+pcmk__bundle_assign: galera-bundle-2 allocation score on messaging-1: 0
+pcmk__bundle_assign: galera-bundle-2 allocation score on messaging-2: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on compute-0: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on compute-1: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on controller-0: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on controller-1: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on controller-2: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on database-0: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on database-1: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on database-2: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-2: -INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on messaging-0: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on messaging-1: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on messaging-2: 0
+pcmk__bundle_assign: galera-bundle-podman-0 allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-podman-0 allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-podman-0 allocation score on controller-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-podman-0 allocation score on controller-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-podman-0 allocation score on controller-2: -INFINITY
+pcmk__bundle_assign: galera-bundle-podman-0 allocation score on database-0: 0
+pcmk__bundle_assign: galera-bundle-podman-0 allocation score on database-1: 0
+pcmk__bundle_assign: galera-bundle-podman-0 allocation score on database-2: 0
+pcmk__bundle_assign: galera-bundle-podman-0 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-podman-0 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-podman-0 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: galera-bundle-podman-1 allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-podman-1 allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-podman-1 allocation score on controller-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-podman-1 allocation score on controller-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-podman-1 allocation score on controller-2: -INFINITY
+pcmk__bundle_assign: galera-bundle-podman-1 allocation score on database-0: 0
+pcmk__bundle_assign: galera-bundle-podman-1 allocation score on database-1: 0
+pcmk__bundle_assign: galera-bundle-podman-1 allocation score on database-2: 0
+pcmk__bundle_assign: galera-bundle-podman-1 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-podman-1 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-podman-1 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: galera-bundle-podman-2 allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-podman-2 allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-podman-2 allocation score on controller-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-podman-2 allocation score on controller-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-podman-2 allocation score on controller-2: -INFINITY
+pcmk__bundle_assign: galera-bundle-podman-2 allocation score on database-0: 0
+pcmk__bundle_assign: galera-bundle-podman-2 allocation score on database-1: 0
+pcmk__bundle_assign: galera-bundle-podman-2 allocation score on database-2: 0
+pcmk__bundle_assign: galera-bundle-podman-2 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-podman-2 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-podman-2 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: galera:0 allocation score on galera-bundle-0: 501
+pcmk__bundle_assign: galera:1 allocation score on galera-bundle-1: 501
+pcmk__bundle_assign: galera:2 allocation score on galera-bundle-2: 501
+pcmk__bundle_assign: haproxy-bundle allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on controller-0: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on controller-0: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on controller-1: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on controller-1: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on controller-2: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on controller-2: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on database-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on database-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on database-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on database-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on database-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on database-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on controller-0: 0
+pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on controller-0: 0
+pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on controller-1: 0
+pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on controller-1: INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on controller-2: 0
+pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on controller-2: INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on controller-0: 0
+pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on controller-0: 0
+pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on controller-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on controller-1: 0
+pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on controller-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on controller-2: 0
+pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on controller-0: 0
+pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on controller-0: 0
+pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on controller-1: 0
+pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on controller-1: INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on controller-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on controller-2: 0
+pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: openstack-cinder-volume allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: openstack-cinder-volume allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: openstack-cinder-volume allocation score on controller-0: 0
+pcmk__bundle_assign: openstack-cinder-volume allocation score on controller-1: 0
+pcmk__bundle_assign: openstack-cinder-volume allocation score on controller-2: 0
+pcmk__bundle_assign: openstack-cinder-volume allocation score on database-0: -INFINITY
+pcmk__bundle_assign: openstack-cinder-volume allocation score on database-1: -INFINITY
+pcmk__bundle_assign: openstack-cinder-volume allocation score on database-2: -INFINITY
+pcmk__bundle_assign: openstack-cinder-volume allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: openstack-cinder-volume allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: openstack-cinder-volume allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: openstack-cinder-volume-podman-0 allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: openstack-cinder-volume-podman-0 allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: openstack-cinder-volume-podman-0 allocation score on controller-0: 0
+pcmk__bundle_assign: openstack-cinder-volume-podman-0 allocation score on controller-1: 0
+pcmk__bundle_assign: openstack-cinder-volume-podman-0 allocation score on controller-2: 0
+pcmk__bundle_assign: openstack-cinder-volume-podman-0 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: openstack-cinder-volume-podman-0 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: openstack-cinder-volume-podman-0 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: openstack-cinder-volume-podman-0 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: openstack-cinder-volume-podman-0 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: openstack-cinder-volume-podman-0 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle allocation score on controller-0: 0
+pcmk__bundle_assign: ovn-dbs-bundle allocation score on controller-1: 0
+pcmk__bundle_assign: ovn-dbs-bundle allocation score on controller-2: 0
+pcmk__bundle_assign: ovn-dbs-bundle allocation score on database-0: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle allocation score on database-1: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle allocation score on database-2: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-0 allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-0 allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-0 allocation score on controller-0: 0
+pcmk__bundle_assign: ovn-dbs-bundle-0 allocation score on controller-1: 0
+pcmk__bundle_assign: ovn-dbs-bundle-0 allocation score on controller-2: 0
+pcmk__bundle_assign: ovn-dbs-bundle-0 allocation score on database-0: 0
+pcmk__bundle_assign: ovn-dbs-bundle-0 allocation score on database-1: 0
+pcmk__bundle_assign: ovn-dbs-bundle-0 allocation score on database-2: 0
+pcmk__bundle_assign: ovn-dbs-bundle-0 allocation score on messaging-0: 0
+pcmk__bundle_assign: ovn-dbs-bundle-0 allocation score on messaging-1: 0
+pcmk__bundle_assign: ovn-dbs-bundle-0 allocation score on messaging-2: 0
+pcmk__bundle_assign: ovn-dbs-bundle-1 allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-1 allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-1 allocation score on controller-0: 0
+pcmk__bundle_assign: ovn-dbs-bundle-1 allocation score on controller-1: 0
+pcmk__bundle_assign: ovn-dbs-bundle-1 allocation score on controller-2: 0
+pcmk__bundle_assign: ovn-dbs-bundle-1 allocation score on database-0: 0
+pcmk__bundle_assign: ovn-dbs-bundle-1 allocation score on database-1: 0
+pcmk__bundle_assign: ovn-dbs-bundle-1 allocation score on database-2: 0
+pcmk__bundle_assign: ovn-dbs-bundle-1 allocation score on messaging-0: 0
+pcmk__bundle_assign: ovn-dbs-bundle-1 allocation score on messaging-1: 0
+pcmk__bundle_assign: ovn-dbs-bundle-1 allocation score on messaging-2: 0
+pcmk__bundle_assign: ovn-dbs-bundle-2 allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-2 allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-2 allocation score on controller-0: 0
+pcmk__bundle_assign: ovn-dbs-bundle-2 allocation score on controller-1: 0
+pcmk__bundle_assign: ovn-dbs-bundle-2 allocation score on controller-2: 0
+pcmk__bundle_assign: ovn-dbs-bundle-2 allocation score on database-0: 0
+pcmk__bundle_assign: ovn-dbs-bundle-2 allocation score on database-1: 0
+pcmk__bundle_assign: ovn-dbs-bundle-2 allocation score on database-2: 0
+pcmk__bundle_assign: ovn-dbs-bundle-2 allocation score on messaging-0: 0
+pcmk__bundle_assign: ovn-dbs-bundle-2 allocation score on messaging-1: 0
+pcmk__bundle_assign: ovn-dbs-bundle-2 allocation score on messaging-2: 0
+pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on compute-0: 0
+pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on compute-1: 0
+pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on controller-0: 0
+pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on controller-1: 0
+pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on controller-2: 0
+pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on database-0: 0
+pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on database-1: 0
+pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on database-2: 0
+pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on messaging-0: 0
+pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on messaging-1: 0
+pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on messaging-2: 0
+pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-0: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-1: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-2: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-podman-0 allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-podman-0 allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-podman-0 allocation score on controller-0: 0
+pcmk__bundle_assign: ovn-dbs-bundle-podman-0 allocation score on controller-1: 0
+pcmk__bundle_assign: ovn-dbs-bundle-podman-0 allocation score on controller-2: 0
+pcmk__bundle_assign: ovn-dbs-bundle-podman-0 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-podman-0 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-podman-0 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-podman-0 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-podman-0 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-podman-0 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-podman-1 allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-podman-1 allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-podman-1 allocation score on controller-0: 0
+pcmk__bundle_assign: ovn-dbs-bundle-podman-1 allocation score on controller-1: 0
+pcmk__bundle_assign: ovn-dbs-bundle-podman-1 allocation score on controller-2: 0
+pcmk__bundle_assign: ovn-dbs-bundle-podman-1 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-podman-1 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-podman-1 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-podman-1 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-podman-1 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-podman-1 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-podman-2 allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-podman-2 allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-podman-2 allocation score on controller-0: 0
+pcmk__bundle_assign: ovn-dbs-bundle-podman-2 allocation score on controller-1: 0
+pcmk__bundle_assign: ovn-dbs-bundle-podman-2 allocation score on controller-2: 0
+pcmk__bundle_assign: ovn-dbs-bundle-podman-2 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-podman-2 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-podman-2 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-podman-2 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-podman-2 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-podman-2 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: ovndb_servers:0 allocation score on ovn-dbs-bundle-0: 500
+pcmk__bundle_assign: ovndb_servers:1 allocation score on ovn-dbs-bundle-1: 501
+pcmk__bundle_assign: ovndb_servers:2 allocation score on ovn-dbs-bundle-2: 501
+pcmk__bundle_assign: rabbitmq-bundle allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle allocation score on controller-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle allocation score on controller-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle allocation score on controller-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle allocation score on database-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle allocation score on database-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle allocation score on database-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle allocation score on messaging-0: 0
+pcmk__bundle_assign: rabbitmq-bundle allocation score on messaging-1: 0
+pcmk__bundle_assign: rabbitmq-bundle allocation score on messaging-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on database-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on database-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on database-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on messaging-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on messaging-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on messaging-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on database-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on database-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on database-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on messaging-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on messaging-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on messaging-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on database-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on database-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on database-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on messaging-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on messaging-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on messaging-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on compute-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on compute-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on database-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on database-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on database-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on messaging-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on messaging-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on messaging-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-podman-0 allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-podman-0 allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-podman-0 allocation score on controller-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-podman-0 allocation score on controller-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-podman-0 allocation score on controller-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-podman-0 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-podman-0 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-podman-0 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-podman-0 allocation score on messaging-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-podman-0 allocation score on messaging-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-podman-0 allocation score on messaging-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-podman-1 allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-podman-1 allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-podman-1 allocation score on controller-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-podman-1 allocation score on controller-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-podman-1 allocation score on controller-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-podman-1 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-podman-1 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-podman-1 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-podman-1 allocation score on messaging-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-podman-1 allocation score on messaging-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-podman-1 allocation score on messaging-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-podman-2 allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-podman-2 allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-podman-2 allocation score on controller-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-podman-2 allocation score on controller-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-podman-2 allocation score on controller-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-podman-2 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-podman-2 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-podman-2 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-podman-2 allocation score on messaging-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-podman-2 allocation score on messaging-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-podman-2 allocation score on messaging-2: 0
+pcmk__bundle_assign: rabbitmq:0 allocation score on rabbitmq-bundle-0: 501
+pcmk__bundle_assign: rabbitmq:1 allocation score on rabbitmq-bundle-1: 500
+pcmk__bundle_assign: rabbitmq:2 allocation score on rabbitmq-bundle-2: 501
+pcmk__bundle_assign: redis-bundle allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: redis-bundle allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: redis-bundle allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle allocation score on controller-2: 0
+pcmk__bundle_assign: redis-bundle allocation score on database-0: -INFINITY
+pcmk__bundle_assign: redis-bundle allocation score on database-1: -INFINITY
+pcmk__bundle_assign: redis-bundle allocation score on database-2: -INFINITY
+pcmk__bundle_assign: redis-bundle allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: redis-bundle allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: redis-bundle allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: redis-bundle-0 allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-0 allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-0 allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle-0 allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle-0 allocation score on controller-2: 0
+pcmk__bundle_assign: redis-bundle-0 allocation score on database-0: 0
+pcmk__bundle_assign: redis-bundle-0 allocation score on database-1: 0
+pcmk__bundle_assign: redis-bundle-0 allocation score on database-2: 0
+pcmk__bundle_assign: redis-bundle-0 allocation score on messaging-0: 0
+pcmk__bundle_assign: redis-bundle-0 allocation score on messaging-1: 0
+pcmk__bundle_assign: redis-bundle-0 allocation score on messaging-2: 0
+pcmk__bundle_assign: redis-bundle-1 allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-1 allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-1 allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle-1 allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle-1 allocation score on controller-2: 0
+pcmk__bundle_assign: redis-bundle-1 allocation score on database-0: 0
+pcmk__bundle_assign: redis-bundle-1 allocation score on database-1: 0
+pcmk__bundle_assign: redis-bundle-1 allocation score on database-2: 0
+pcmk__bundle_assign: redis-bundle-1 allocation score on messaging-0: 0
+pcmk__bundle_assign: redis-bundle-1 allocation score on messaging-1: 0
+pcmk__bundle_assign: redis-bundle-1 allocation score on messaging-2: 0
+pcmk__bundle_assign: redis-bundle-2 allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-2 allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-2 allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle-2 allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle-2 allocation score on controller-2: 0
+pcmk__bundle_assign: redis-bundle-2 allocation score on database-0: 0
+pcmk__bundle_assign: redis-bundle-2 allocation score on database-1: 0
+pcmk__bundle_assign: redis-bundle-2 allocation score on database-2: 0
+pcmk__bundle_assign: redis-bundle-2 allocation score on messaging-0: 0
+pcmk__bundle_assign: redis-bundle-2 allocation score on messaging-1: 0
+pcmk__bundle_assign: redis-bundle-2 allocation score on messaging-2: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on compute-0: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on compute-1: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on controller-2: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on database-0: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on database-1: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on database-2: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on messaging-0: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on messaging-1: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on messaging-2: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-2: -INFINITY
+pcmk__bundle_assign: redis-bundle-podman-0 allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-podman-0 allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-podman-0 allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle-podman-0 allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle-podman-0 allocation score on controller-2: 0
+pcmk__bundle_assign: redis-bundle-podman-0 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-podman-0 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-podman-0 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: redis-bundle-podman-0 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-podman-0 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-podman-0 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: redis-bundle-podman-1 allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-podman-1 allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-podman-1 allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle-podman-1 allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle-podman-1 allocation score on controller-2: 0
+pcmk__bundle_assign: redis-bundle-podman-1 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-podman-1 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-podman-1 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: redis-bundle-podman-1 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-podman-1 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-podman-1 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: redis-bundle-podman-2 allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-podman-2 allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-podman-2 allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle-podman-2 allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle-podman-2 allocation score on controller-2: 0
+pcmk__bundle_assign: redis-bundle-podman-2 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-podman-2 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-podman-2 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: redis-bundle-podman-2 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-podman-2 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-podman-2 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: redis:0 allocation score on redis-bundle-0: 501
+pcmk__bundle_assign: redis:1 allocation score on redis-bundle-1: 501
+pcmk__bundle_assign: redis:2 allocation score on redis-bundle-2: 501
pcmk__clone_assign: compute-unfence-trigger-clone allocation score on compute-0: 0
pcmk__clone_assign: compute-unfence-trigger-clone allocation score on compute-1: 0
pcmk__clone_assign: compute-unfence-trigger-clone allocation score on controller-0: -INFINITY
@@ -1799,8 +1799,8 @@ pcmk__primitive_assign: ip-172.17.1.151 allocation score on messaging-1: -INFINI
pcmk__primitive_assign: ip-172.17.1.151 allocation score on messaging-2: -INFINITY
pcmk__primitive_assign: ip-172.17.1.87 allocation score on compute-0: -INFINITY
pcmk__primitive_assign: ip-172.17.1.87 allocation score on compute-1: -INFINITY
-pcmk__primitive_assign: ip-172.17.1.87 allocation score on controller-0: 0
-pcmk__primitive_assign: ip-172.17.1.87 allocation score on controller-1: -INFINITY
+pcmk__primitive_assign: ip-172.17.1.87 allocation score on controller-0: -INFINITY
+pcmk__primitive_assign: ip-172.17.1.87 allocation score on controller-1: 0
pcmk__primitive_assign: ip-172.17.1.87 allocation score on controller-2: -INFINITY
pcmk__primitive_assign: ip-172.17.1.87 allocation score on database-0: -INFINITY
pcmk__primitive_assign: ip-172.17.1.87 allocation score on database-1: -INFINITY
@@ -1865,9 +1865,9 @@ pcmk__primitive_assign: openstack-cinder-volume-podman-0 allocation score on mes
pcmk__primitive_assign: openstack-cinder-volume-podman-0 allocation score on messaging-2: -INFINITY
pcmk__primitive_assign: ovn-dbs-bundle-0 allocation score on compute-0: -INFINITY
pcmk__primitive_assign: ovn-dbs-bundle-0 allocation score on compute-1: -INFINITY
-pcmk__primitive_assign: ovn-dbs-bundle-0 allocation score on controller-0: 0
+pcmk__primitive_assign: ovn-dbs-bundle-0 allocation score on controller-0: 10000
pcmk__primitive_assign: ovn-dbs-bundle-0 allocation score on controller-1: 0
-pcmk__primitive_assign: ovn-dbs-bundle-0 allocation score on controller-2: 10000
+pcmk__primitive_assign: ovn-dbs-bundle-0 allocation score on controller-2: 0
pcmk__primitive_assign: ovn-dbs-bundle-0 allocation score on database-0: 0
pcmk__primitive_assign: ovn-dbs-bundle-0 allocation score on database-1: 0
pcmk__primitive_assign: ovn-dbs-bundle-0 allocation score on database-2: 0
@@ -1876,9 +1876,9 @@ pcmk__primitive_assign: ovn-dbs-bundle-0 allocation score on messaging-1: 0
pcmk__primitive_assign: ovn-dbs-bundle-0 allocation score on messaging-2: 0
pcmk__primitive_assign: ovn-dbs-bundle-1 allocation score on compute-0: -INFINITY
pcmk__primitive_assign: ovn-dbs-bundle-1 allocation score on compute-1: -INFINITY
-pcmk__primitive_assign: ovn-dbs-bundle-1 allocation score on controller-0: 10000
+pcmk__primitive_assign: ovn-dbs-bundle-1 allocation score on controller-0: 0
pcmk__primitive_assign: ovn-dbs-bundle-1 allocation score on controller-1: 0
-pcmk__primitive_assign: ovn-dbs-bundle-1 allocation score on controller-2: 0
+pcmk__primitive_assign: ovn-dbs-bundle-1 allocation score on controller-2: 10000
pcmk__primitive_assign: ovn-dbs-bundle-1 allocation score on database-0: 0
pcmk__primitive_assign: ovn-dbs-bundle-1 allocation score on database-1: 0
pcmk__primitive_assign: ovn-dbs-bundle-1 allocation score on database-2: 0
@@ -1898,9 +1898,9 @@ pcmk__primitive_assign: ovn-dbs-bundle-2 allocation score on messaging-1: 0
pcmk__primitive_assign: ovn-dbs-bundle-2 allocation score on messaging-2: 0
pcmk__primitive_assign: ovn-dbs-bundle-podman-0 allocation score on compute-0: -INFINITY
pcmk__primitive_assign: ovn-dbs-bundle-podman-0 allocation score on compute-1: -INFINITY
-pcmk__primitive_assign: ovn-dbs-bundle-podman-0 allocation score on controller-0: -INFINITY
+pcmk__primitive_assign: ovn-dbs-bundle-podman-0 allocation score on controller-0: 0
pcmk__primitive_assign: ovn-dbs-bundle-podman-0 allocation score on controller-1: -INFINITY
-pcmk__primitive_assign: ovn-dbs-bundle-podman-0 allocation score on controller-2: 0
+pcmk__primitive_assign: ovn-dbs-bundle-podman-0 allocation score on controller-2: -INFINITY
pcmk__primitive_assign: ovn-dbs-bundle-podman-0 allocation score on database-0: -INFINITY
pcmk__primitive_assign: ovn-dbs-bundle-podman-0 allocation score on database-1: -INFINITY
pcmk__primitive_assign: ovn-dbs-bundle-podman-0 allocation score on database-2: -INFINITY
@@ -1909,24 +1909,35 @@ pcmk__primitive_assign: ovn-dbs-bundle-podman-0 allocation score on messaging-1:
pcmk__primitive_assign: ovn-dbs-bundle-podman-0 allocation score on messaging-2: -INFINITY
pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on compute-0: -INFINITY
pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on compute-0: -INFINITY
+pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on compute-0: -INFINITY
pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on compute-1: -INFINITY
pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on compute-1: -INFINITY
-pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on controller-0: 0
+pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on compute-1: -INFINITY
+pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on controller-0: -INFINITY
+pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on controller-0: -INFINITY
pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on controller-0: 0
pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on controller-1: -INFINITY
pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on controller-1: 0
+pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on controller-1: 0
pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on controller-2: -INFINITY
pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on controller-2: -INFINITY
+pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on controller-2: 0
+pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on database-0: -INFINITY
pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on database-0: -INFINITY
pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on database-0: -INFINITY
pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on database-1: -INFINITY
pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on database-1: -INFINITY
+pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on database-1: -INFINITY
+pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on database-2: -INFINITY
pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on database-2: -INFINITY
pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on database-2: -INFINITY
pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on messaging-0: -INFINITY
pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on messaging-0: -INFINITY
+pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on messaging-0: -INFINITY
pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on messaging-1: -INFINITY
pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on messaging-1: -INFINITY
+pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on messaging-1: -INFINITY
+pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on messaging-2: -INFINITY
pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on messaging-2: -INFINITY
pcmk__primitive_assign: ovn-dbs-bundle-podman-1 allocation score on messaging-2: -INFINITY
pcmk__primitive_assign: ovn-dbs-bundle-podman-2 allocation score on compute-0: -INFINITY
diff --git a/cts/scheduler/scores/clone-anon-failcount.scores b/cts/scheduler/scores/clone-anon-failcount.scores
index a01e0f3..435546d 100644
--- a/cts/scheduler/scores/clone-anon-failcount.scores
+++ b/cts/scheduler/scores/clone-anon-failcount.scores
@@ -224,69 +224,85 @@ pcmk__primitive_assign: UmVIPcheck allocation score on srv02: -INFINITY
pcmk__primitive_assign: UmVIPcheck allocation score on srv03: -INFINITY
pcmk__primitive_assign: UmVIPcheck allocation score on srv04: 100
pcmk__primitive_assign: clnG3dummy01:0 allocation score on srv01: -INFINITY
-pcmk__primitive_assign: clnG3dummy01:0 allocation score on srv02: 100
-pcmk__primitive_assign: clnG3dummy01:0 allocation score on srv03: 0
+pcmk__primitive_assign: clnG3dummy01:0 allocation score on srv02: 101
+pcmk__primitive_assign: clnG3dummy01:0 allocation score on srv03: -1000
pcmk__primitive_assign: clnG3dummy01:0 allocation score on srv04: -INFINITY
pcmk__primitive_assign: clnG3dummy01:1 allocation score on srv01: -INFINITY
pcmk__primitive_assign: clnG3dummy01:1 allocation score on srv02: -INFINITY
-pcmk__primitive_assign: clnG3dummy01:1 allocation score on srv03: 100
+pcmk__primitive_assign: clnG3dummy01:1 allocation score on srv03: 101
pcmk__primitive_assign: clnG3dummy01:1 allocation score on srv04: -INFINITY
pcmk__primitive_assign: clnG3dummy01:2 allocation score on srv01: -INFINITY
-pcmk__primitive_assign: clnG3dummy01:2 allocation score on srv02: 0
-pcmk__primitive_assign: clnG3dummy01:2 allocation score on srv03: 0
-pcmk__primitive_assign: clnG3dummy01:2 allocation score on srv04: 100
-pcmk__primitive_assign: clnG3dummy01:3 allocation score on srv01: 100
-pcmk__primitive_assign: clnG3dummy01:3 allocation score on srv02: 0
-pcmk__primitive_assign: clnG3dummy01:3 allocation score on srv03: 0
-pcmk__primitive_assign: clnG3dummy01:3 allocation score on srv04: 0
+pcmk__primitive_assign: clnG3dummy01:2 allocation score on srv02: -2999
+pcmk__primitive_assign: clnG3dummy01:2 allocation score on srv03: -2999
+pcmk__primitive_assign: clnG3dummy01:2 allocation score on srv04: 104
+pcmk__primitive_assign: clnG3dummy01:3 allocation score on srv01: -1894
+pcmk__primitive_assign: clnG3dummy01:3 allocation score on srv01: 106
+pcmk__primitive_assign: clnG3dummy01:3 allocation score on srv02: -2000
+pcmk__primitive_assign: clnG3dummy01:3 allocation score on srv02: -2999
+pcmk__primitive_assign: clnG3dummy01:3 allocation score on srv03: -2000
+pcmk__primitive_assign: clnG3dummy01:3 allocation score on srv03: -2999
+pcmk__primitive_assign: clnG3dummy01:3 allocation score on srv04: -INFINITY
+pcmk__primitive_assign: clnG3dummy01:3 allocation score on srv04: 4
pcmk__primitive_assign: clnG3dummy02:0 allocation score on srv01: -INFINITY
-pcmk__primitive_assign: clnG3dummy02:0 allocation score on srv02: 100
-pcmk__primitive_assign: clnG3dummy02:0 allocation score on srv03: 0
+pcmk__primitive_assign: clnG3dummy02:0 allocation score on srv02: 101
+pcmk__primitive_assign: clnG3dummy02:0 allocation score on srv03: -1000
pcmk__primitive_assign: clnG3dummy02:0 allocation score on srv04: -INFINITY
pcmk__primitive_assign: clnG3dummy02:1 allocation score on srv01: -INFINITY
pcmk__primitive_assign: clnG3dummy02:1 allocation score on srv02: -INFINITY
-pcmk__primitive_assign: clnG3dummy02:1 allocation score on srv03: 100
+pcmk__primitive_assign: clnG3dummy02:1 allocation score on srv03: 101
pcmk__primitive_assign: clnG3dummy02:1 allocation score on srv04: -INFINITY
pcmk__primitive_assign: clnG3dummy02:2 allocation score on srv01: -INFINITY
-pcmk__primitive_assign: clnG3dummy02:2 allocation score on srv02: 0
-pcmk__primitive_assign: clnG3dummy02:2 allocation score on srv03: 0
-pcmk__primitive_assign: clnG3dummy02:2 allocation score on srv04: 100
-pcmk__primitive_assign: clnG3dummy02:3 allocation score on srv01: 100
-pcmk__primitive_assign: clnG3dummy02:3 allocation score on srv02: 0
-pcmk__primitive_assign: clnG3dummy02:3 allocation score on srv03: 0
-pcmk__primitive_assign: clnG3dummy02:3 allocation score on srv04: 0
+pcmk__primitive_assign: clnG3dummy02:2 allocation score on srv02: -2999
+pcmk__primitive_assign: clnG3dummy02:2 allocation score on srv03: -2999
+pcmk__primitive_assign: clnG3dummy02:2 allocation score on srv04: 105
+pcmk__primitive_assign: clnG3dummy02:3 allocation score on srv01: -1893
+pcmk__primitive_assign: clnG3dummy02:3 allocation score on srv01: 107
+pcmk__primitive_assign: clnG3dummy02:3 allocation score on srv02: -2000
+pcmk__primitive_assign: clnG3dummy02:3 allocation score on srv02: -2999
+pcmk__primitive_assign: clnG3dummy02:3 allocation score on srv03: -2000
+pcmk__primitive_assign: clnG3dummy02:3 allocation score on srv03: -2999
+pcmk__primitive_assign: clnG3dummy02:3 allocation score on srv04: -INFINITY
+pcmk__primitive_assign: clnG3dummy02:3 allocation score on srv04: 5
pcmk__primitive_assign: clnPrmDiskd1:0 allocation score on srv01: -INFINITY
-pcmk__primitive_assign: clnPrmDiskd1:0 allocation score on srv02: 100
-pcmk__primitive_assign: clnPrmDiskd1:0 allocation score on srv03: 0
+pcmk__primitive_assign: clnPrmDiskd1:0 allocation score on srv02: 101
+pcmk__primitive_assign: clnPrmDiskd1:0 allocation score on srv03: -1000
pcmk__primitive_assign: clnPrmDiskd1:0 allocation score on srv04: -INFINITY
pcmk__primitive_assign: clnPrmDiskd1:1 allocation score on srv01: -INFINITY
pcmk__primitive_assign: clnPrmDiskd1:1 allocation score on srv02: -INFINITY
-pcmk__primitive_assign: clnPrmDiskd1:1 allocation score on srv03: 100
+pcmk__primitive_assign: clnPrmDiskd1:1 allocation score on srv03: 101
pcmk__primitive_assign: clnPrmDiskd1:1 allocation score on srv04: -INFINITY
pcmk__primitive_assign: clnPrmDiskd1:2 allocation score on srv01: -INFINITY
-pcmk__primitive_assign: clnPrmDiskd1:2 allocation score on srv02: 0
-pcmk__primitive_assign: clnPrmDiskd1:2 allocation score on srv03: 0
-pcmk__primitive_assign: clnPrmDiskd1:2 allocation score on srv04: 100
-pcmk__primitive_assign: clnPrmDiskd1:3 allocation score on srv01: 100
-pcmk__primitive_assign: clnPrmDiskd1:3 allocation score on srv02: 0
-pcmk__primitive_assign: clnPrmDiskd1:3 allocation score on srv03: 0
-pcmk__primitive_assign: clnPrmDiskd1:3 allocation score on srv04: 0
+pcmk__primitive_assign: clnPrmDiskd1:2 allocation score on srv02: -2999
+pcmk__primitive_assign: clnPrmDiskd1:2 allocation score on srv03: -2999
+pcmk__primitive_assign: clnPrmDiskd1:2 allocation score on srv04: 104
+pcmk__primitive_assign: clnPrmDiskd1:3 allocation score on srv01: -1895
+pcmk__primitive_assign: clnPrmDiskd1:3 allocation score on srv01: 105
+pcmk__primitive_assign: clnPrmDiskd1:3 allocation score on srv02: -2000
+pcmk__primitive_assign: clnPrmDiskd1:3 allocation score on srv02: -2999
+pcmk__primitive_assign: clnPrmDiskd1:3 allocation score on srv03: -2000
+pcmk__primitive_assign: clnPrmDiskd1:3 allocation score on srv03: -2999
+pcmk__primitive_assign: clnPrmDiskd1:3 allocation score on srv04: -INFINITY
+pcmk__primitive_assign: clnPrmDiskd1:3 allocation score on srv04: 4
pcmk__primitive_assign: clnPrmPingd:0 allocation score on srv01: -INFINITY
-pcmk__primitive_assign: clnPrmPingd:0 allocation score on srv02: 100
-pcmk__primitive_assign: clnPrmPingd:0 allocation score on srv03: 0
+pcmk__primitive_assign: clnPrmPingd:0 allocation score on srv02: 101
+pcmk__primitive_assign: clnPrmPingd:0 allocation score on srv03: -1000
pcmk__primitive_assign: clnPrmPingd:0 allocation score on srv04: -INFINITY
pcmk__primitive_assign: clnPrmPingd:1 allocation score on srv01: -INFINITY
pcmk__primitive_assign: clnPrmPingd:1 allocation score on srv02: -INFINITY
-pcmk__primitive_assign: clnPrmPingd:1 allocation score on srv03: 100
+pcmk__primitive_assign: clnPrmPingd:1 allocation score on srv03: 101
pcmk__primitive_assign: clnPrmPingd:1 allocation score on srv04: -INFINITY
pcmk__primitive_assign: clnPrmPingd:2 allocation score on srv01: -INFINITY
-pcmk__primitive_assign: clnPrmPingd:2 allocation score on srv02: 0
-pcmk__primitive_assign: clnPrmPingd:2 allocation score on srv03: 0
-pcmk__primitive_assign: clnPrmPingd:2 allocation score on srv04: 100
-pcmk__primitive_assign: clnPrmPingd:3 allocation score on srv01: 100
-pcmk__primitive_assign: clnPrmPingd:3 allocation score on srv02: 0
-pcmk__primitive_assign: clnPrmPingd:3 allocation score on srv03: 0
-pcmk__primitive_assign: clnPrmPingd:3 allocation score on srv04: 0
+pcmk__primitive_assign: clnPrmPingd:2 allocation score on srv02: -2999
+pcmk__primitive_assign: clnPrmPingd:2 allocation score on srv03: -2999
+pcmk__primitive_assign: clnPrmPingd:2 allocation score on srv04: 106
+pcmk__primitive_assign: clnPrmPingd:3 allocation score on srv01: -1892
+pcmk__primitive_assign: clnPrmPingd:3 allocation score on srv01: 108
+pcmk__primitive_assign: clnPrmPingd:3 allocation score on srv02: -2000
+pcmk__primitive_assign: clnPrmPingd:3 allocation score on srv02: -2999
+pcmk__primitive_assign: clnPrmPingd:3 allocation score on srv03: -2000
+pcmk__primitive_assign: clnPrmPingd:3 allocation score on srv03: -2999
+pcmk__primitive_assign: clnPrmPingd:3 allocation score on srv04: -INFINITY
+pcmk__primitive_assign: clnPrmPingd:3 allocation score on srv04: 6
pcmk__primitive_assign: clnUMdummy01:0 allocation score on srv01: -INFINITY
pcmk__primitive_assign: clnUMdummy01:0 allocation score on srv02: -INFINITY
pcmk__primitive_assign: clnUMdummy01:0 allocation score on srv03: -INFINITY
diff --git a/cts/scheduler/scores/clone-fail-block-colocation.scores b/cts/scheduler/scores/clone-fail-block-colocation.scores
index 1925eeb..c4cee8c 100644
--- a/cts/scheduler/scores/clone-fail-block-colocation.scores
+++ b/cts/scheduler/scores/clone-fail-block-colocation.scores
@@ -37,7 +37,9 @@ pcmk__primitive_assign: d_bird:1 allocation score on DEM-1: -INFINITY
pcmk__primitive_assign: d_bird:1 allocation score on DEM-2: 1
pcmk__primitive_assign: d_bird_subnet_state allocation score on DEM-1: -INFINITY
pcmk__primitive_assign: d_bird_subnet_state allocation score on DEM-2: 0
+pcmk__primitive_assign: d_tomcat_nms:0 allocation score on DEM-1: -INFINITY
pcmk__primitive_assign: d_tomcat_nms:0 allocation score on DEM-1: 1
+pcmk__primitive_assign: d_tomcat_nms:0 allocation score on DEM-2: -INFINITY
pcmk__primitive_assign: d_tomcat_nms:0 allocation score on DEM-2: 0
pcmk__primitive_assign: d_tomcat_nms:1 allocation score on DEM-1: -INFINITY
pcmk__primitive_assign: d_tomcat_nms:1 allocation score on DEM-2: 1
diff --git a/cts/scheduler/scores/clone-max-zero.scores b/cts/scheduler/scores/clone-max-zero.scores
index f1711b7..bd116a2 100644
--- a/cts/scheduler/scores/clone-max-zero.scores
+++ b/cts/scheduler/scores/clone-max-zero.scores
@@ -26,10 +26,18 @@ pcmk__primitive_assign: drbd0:1 allocation score on c001n12: -INFINITY
pcmk__primitive_assign: fencing allocation score on c001n11: 0
pcmk__primitive_assign: fencing allocation score on c001n12: 0
pcmk__primitive_assign: o2cb:0 allocation score on c001n11: -INFINITY
+pcmk__primitive_assign: o2cb:0 allocation score on c001n11: -INFINITY
+pcmk__primitive_assign: o2cb:0 allocation score on c001n12: -INFINITY
pcmk__primitive_assign: o2cb:0 allocation score on c001n12: -INFINITY
pcmk__primitive_assign: o2cb:1 allocation score on c001n11: -INFINITY
+pcmk__primitive_assign: o2cb:1 allocation score on c001n11: -INFINITY
+pcmk__primitive_assign: o2cb:1 allocation score on c001n12: -INFINITY
pcmk__primitive_assign: o2cb:1 allocation score on c001n12: -INFINITY
pcmk__primitive_assign: ocfs2-1:0 allocation score on c001n11: -INFINITY
+pcmk__primitive_assign: ocfs2-1:0 allocation score on c001n11: -INFINITY
+pcmk__primitive_assign: ocfs2-1:0 allocation score on c001n12: -INFINITY
pcmk__primitive_assign: ocfs2-1:0 allocation score on c001n12: -INFINITY
pcmk__primitive_assign: ocfs2-1:1 allocation score on c001n11: -INFINITY
+pcmk__primitive_assign: ocfs2-1:1 allocation score on c001n11: -INFINITY
+pcmk__primitive_assign: ocfs2-1:1 allocation score on c001n12: -INFINITY
pcmk__primitive_assign: ocfs2-1:1 allocation score on c001n12: -INFINITY
diff --git a/cts/scheduler/scores/clone-recover-no-shuffle-1.scores b/cts/scheduler/scores/clone-recover-no-shuffle-1.scores
new file mode 100644
index 0000000..c1d60b2
--- /dev/null
+++ b/cts/scheduler/scores/clone-recover-no-shuffle-1.scores
@@ -0,0 +1,25 @@
+
+pcmk__clone_assign: dummy-clone allocation score on node1: 0
+pcmk__clone_assign: dummy-clone allocation score on node2: 0
+pcmk__clone_assign: dummy-clone allocation score on node3: 0
+pcmk__clone_assign: dummy:0 allocation score on node1: 0
+pcmk__clone_assign: dummy:0 allocation score on node2: 1
+pcmk__clone_assign: dummy:0 allocation score on node3: 0
+pcmk__clone_assign: dummy:1 allocation score on node1: 0
+pcmk__clone_assign: dummy:1 allocation score on node2: 0
+pcmk__clone_assign: dummy:1 allocation score on node3: 1
+pcmk__clone_assign: dummy:2 allocation score on node1: 0
+pcmk__clone_assign: dummy:2 allocation score on node2: 0
+pcmk__clone_assign: dummy:2 allocation score on node3: 0
+pcmk__primitive_assign: Fencing allocation score on node1: 0
+pcmk__primitive_assign: Fencing allocation score on node2: 0
+pcmk__primitive_assign: Fencing allocation score on node3: 0
+pcmk__primitive_assign: dummy:0 allocation score on node1: 0
+pcmk__primitive_assign: dummy:0 allocation score on node2: 1
+pcmk__primitive_assign: dummy:0 allocation score on node3: 0
+pcmk__primitive_assign: dummy:1 allocation score on node1: 0
+pcmk__primitive_assign: dummy:1 allocation score on node2: -INFINITY
+pcmk__primitive_assign: dummy:1 allocation score on node3: 1
+pcmk__primitive_assign: dummy:2 allocation score on node1: 0
+pcmk__primitive_assign: dummy:2 allocation score on node2: -INFINITY
+pcmk__primitive_assign: dummy:2 allocation score on node3: -INFINITY
diff --git a/cts/scheduler/scores/clone-recover-no-shuffle-10.scores b/cts/scheduler/scores/clone-recover-no-shuffle-10.scores
new file mode 100644
index 0000000..4f4c29e
--- /dev/null
+++ b/cts/scheduler/scores/clone-recover-no-shuffle-10.scores
@@ -0,0 +1,31 @@
+
+dummy:0 promotion score on node3: 5
+dummy:1 promotion score on node2: 15
+dummy:2 promotion score on node1: 10
+pcmk__clone_assign: dummy-clone allocation score on node1: 0
+pcmk__clone_assign: dummy-clone allocation score on node2: 0
+pcmk__clone_assign: dummy-clone allocation score on node3: 0
+pcmk__clone_assign: dummy:0 allocation score on node1: 10
+pcmk__clone_assign: dummy:0 allocation score on node2: 0
+pcmk__clone_assign: dummy:0 allocation score on node3: 6
+pcmk__clone_assign: dummy:1 allocation score on node1: 10
+pcmk__clone_assign: dummy:1 allocation score on node2: 16
+pcmk__clone_assign: dummy:1 allocation score on node3: 0
+pcmk__clone_assign: dummy:2 allocation score on node1: 10
+pcmk__clone_assign: dummy:2 allocation score on node2: 15
+pcmk__clone_assign: dummy:2 allocation score on node3: 5
+pcmk__primitive_assign: Fencing allocation score on node1: 0
+pcmk__primitive_assign: Fencing allocation score on node2: 0
+pcmk__primitive_assign: Fencing allocation score on node3: 0
+pcmk__primitive_assign: dummy:0 allocation score on node1: -INFINITY
+pcmk__primitive_assign: dummy:0 allocation score on node1: 10
+pcmk__primitive_assign: dummy:0 allocation score on node2: -INFINITY
+pcmk__primitive_assign: dummy:0 allocation score on node2: -INFINITY
+pcmk__primitive_assign: dummy:0 allocation score on node3: 6
+pcmk__primitive_assign: dummy:0 allocation score on node3: 6
+pcmk__primitive_assign: dummy:1 allocation score on node1: 10
+pcmk__primitive_assign: dummy:1 allocation score on node2: 16
+pcmk__primitive_assign: dummy:1 allocation score on node3: 0
+pcmk__primitive_assign: dummy:2 allocation score on node1: 10
+pcmk__primitive_assign: dummy:2 allocation score on node2: -INFINITY
+pcmk__primitive_assign: dummy:2 allocation score on node3: -INFINITY
diff --git a/cts/scheduler/scores/clone-recover-no-shuffle-11.scores b/cts/scheduler/scores/clone-recover-no-shuffle-11.scores
new file mode 100644
index 0000000..1216dba
--- /dev/null
+++ b/cts/scheduler/scores/clone-recover-no-shuffle-11.scores
@@ -0,0 +1,82 @@
+
+grp:0 promotion score on node3: 10
+grp:1 promotion score on node2: 30
+grp:2 promotion score on node1: 20
+pcmk__clone_assign: grp-clone allocation score on node1: 0
+pcmk__clone_assign: grp-clone allocation score on node2: 0
+pcmk__clone_assign: grp-clone allocation score on node3: 0
+pcmk__clone_assign: grp:0 allocation score on node1: 20
+pcmk__clone_assign: grp:0 allocation score on node2: 0
+pcmk__clone_assign: grp:0 allocation score on node3: 10
+pcmk__clone_assign: grp:1 allocation score on node1: 20
+pcmk__clone_assign: grp:1 allocation score on node2: 30
+pcmk__clone_assign: grp:1 allocation score on node3: 0
+pcmk__clone_assign: grp:2 allocation score on node1: 20
+pcmk__clone_assign: grp:2 allocation score on node2: 30
+pcmk__clone_assign: grp:2 allocation score on node3: 10
+pcmk__clone_assign: rsc1:0 allocation score on node1: 0
+pcmk__clone_assign: rsc1:0 allocation score on node2: 0
+pcmk__clone_assign: rsc1:0 allocation score on node3: 1
+pcmk__clone_assign: rsc1:1 allocation score on node1: 0
+pcmk__clone_assign: rsc1:1 allocation score on node2: 1
+pcmk__clone_assign: rsc1:1 allocation score on node3: 0
+pcmk__clone_assign: rsc1:2 allocation score on node1: 0
+pcmk__clone_assign: rsc1:2 allocation score on node2: 0
+pcmk__clone_assign: rsc1:2 allocation score on node3: 0
+pcmk__clone_assign: rsc2:0 allocation score on node1: 0
+pcmk__clone_assign: rsc2:0 allocation score on node2: 0
+pcmk__clone_assign: rsc2:0 allocation score on node3: 1
+pcmk__clone_assign: rsc2:1 allocation score on node1: 0
+pcmk__clone_assign: rsc2:1 allocation score on node2: 1
+pcmk__clone_assign: rsc2:1 allocation score on node3: 0
+pcmk__clone_assign: rsc2:2 allocation score on node1: 0
+pcmk__clone_assign: rsc2:2 allocation score on node2: 0
+pcmk__clone_assign: rsc2:2 allocation score on node3: 0
+pcmk__group_assign: grp:0 allocation score on node1: 20
+pcmk__group_assign: grp:0 allocation score on node2: -INFINITY
+pcmk__group_assign: grp:0 allocation score on node3: 10
+pcmk__group_assign: grp:1 allocation score on node1: 20
+pcmk__group_assign: grp:1 allocation score on node2: 30
+pcmk__group_assign: grp:1 allocation score on node3: 0
+pcmk__group_assign: grp:2 allocation score on node1: 20
+pcmk__group_assign: grp:2 allocation score on node2: -INFINITY
+pcmk__group_assign: grp:2 allocation score on node3: -INFINITY
+pcmk__group_assign: rsc1:0 allocation score on node1: 0
+pcmk__group_assign: rsc1:0 allocation score on node2: -INFINITY
+pcmk__group_assign: rsc1:0 allocation score on node3: 1
+pcmk__group_assign: rsc1:1 allocation score on node1: 0
+pcmk__group_assign: rsc1:1 allocation score on node2: 1
+pcmk__group_assign: rsc1:1 allocation score on node3: 0
+pcmk__group_assign: rsc1:2 allocation score on node1: 0
+pcmk__group_assign: rsc1:2 allocation score on node2: -INFINITY
+pcmk__group_assign: rsc1:2 allocation score on node3: -INFINITY
+pcmk__group_assign: rsc2:0 allocation score on node1: 0
+pcmk__group_assign: rsc2:0 allocation score on node2: -INFINITY
+pcmk__group_assign: rsc2:0 allocation score on node3: 1
+pcmk__group_assign: rsc2:1 allocation score on node1: 0
+pcmk__group_assign: rsc2:1 allocation score on node2: 1
+pcmk__group_assign: rsc2:1 allocation score on node3: 0
+pcmk__group_assign: rsc2:2 allocation score on node1: 0
+pcmk__group_assign: rsc2:2 allocation score on node2: -INFINITY
+pcmk__group_assign: rsc2:2 allocation score on node3: -INFINITY
+pcmk__primitive_assign: Fencing allocation score on node1: 0
+pcmk__primitive_assign: Fencing allocation score on node2: 0
+pcmk__primitive_assign: Fencing allocation score on node3: 0
+pcmk__primitive_assign: rsc1:0 allocation score on node1: 0
+pcmk__primitive_assign: rsc1:0 allocation score on node2: -INFINITY
+pcmk__primitive_assign: rsc1:0 allocation score on node3: 2
+pcmk__primitive_assign: rsc1:1 allocation score on node1: 0
+pcmk__primitive_assign: rsc1:1 allocation score on node2: 2
+pcmk__primitive_assign: rsc1:1 allocation score on node3: 0
+pcmk__primitive_assign: rsc1:2 allocation score on node1: 0
+pcmk__primitive_assign: rsc1:2 allocation score on node2: -INFINITY
+pcmk__primitive_assign: rsc1:2 allocation score on node3: -INFINITY
+pcmk__primitive_assign: rsc2:0 allocation score on node1: -INFINITY
+pcmk__primitive_assign: rsc2:0 allocation score on node2: -INFINITY
+pcmk__primitive_assign: rsc2:0 allocation score on node3: 1
+pcmk__primitive_assign: rsc2:1 allocation score on node1: -INFINITY
+pcmk__primitive_assign: rsc2:1 allocation score on node2: 1
+pcmk__primitive_assign: rsc2:1 allocation score on node3: -INFINITY
+pcmk__primitive_assign: rsc2:2 allocation score on node1: 0
+pcmk__primitive_assign: rsc2:2 allocation score on node2: -INFINITY
+pcmk__primitive_assign: rsc2:2 allocation score on node3: -INFINITY
diff --git a/cts/scheduler/scores/clone-recover-no-shuffle-12.scores b/cts/scheduler/scores/clone-recover-no-shuffle-12.scores
new file mode 100644
index 0000000..fb96134
--- /dev/null
+++ b/cts/scheduler/scores/clone-recover-no-shuffle-12.scores
@@ -0,0 +1,67 @@
+
+base:0 promotion score on base-bundle-0: 5
+base:1 promotion score on base-bundle-1: 15
+base:2 promotion score on base-bundle-2: 10
+pcmk__bundle_assign: base-bundle allocation score on node1: 0
+pcmk__bundle_assign: base-bundle allocation score on node2: 0
+pcmk__bundle_assign: base-bundle allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-0 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-0 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-0 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-0: -INFINITY
+pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-1: -INFINITY
+pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-2: -INFINITY
+pcmk__bundle_assign: base-bundle-clone allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node3: 0
+pcmk__bundle_assign: base:0 allocation score on base-bundle-0: 501
+pcmk__bundle_assign: base:1 allocation score on base-bundle-1: 501
+pcmk__bundle_assign: base:2 allocation score on base-bundle-2: 500
+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-0: 0
+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-1: 0
+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-2: 0
+pcmk__clone_assign: base-bundle-clone allocation score on node1: -INFINITY
+pcmk__clone_assign: base-bundle-clone allocation score on node2: -INFINITY
+pcmk__clone_assign: base-bundle-clone allocation score on node3: -INFINITY
+pcmk__clone_assign: base:0 allocation score on base-bundle-0: INFINITY
+pcmk__clone_assign: base:1 allocation score on base-bundle-1: INFINITY
+pcmk__clone_assign: base:2 allocation score on base-bundle-2: INFINITY
+pcmk__primitive_assign: Fencing allocation score on node1: 0
+pcmk__primitive_assign: Fencing allocation score on node2: 0
+pcmk__primitive_assign: Fencing allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-0 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-0 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-0 allocation score on node3: 10000
+pcmk__primitive_assign: base-bundle-1 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-1 allocation score on node2: 10000
+pcmk__primitive_assign: base-bundle-1 allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-2 allocation score on node1: 10000
+pcmk__primitive_assign: base-bundle-2 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-2 allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node3: -INFINITY
+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node2: -INFINITY
+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node3: -INFINITY
+pcmk__primitive_assign: base:0 allocation score on base-bundle-0: INFINITY
+pcmk__primitive_assign: base:1 allocation score on base-bundle-1: INFINITY
+pcmk__primitive_assign: base:2 allocation score on base-bundle-2: INFINITY
diff --git a/cts/scheduler/scores/clone-recover-no-shuffle-2.scores b/cts/scheduler/scores/clone-recover-no-shuffle-2.scores
new file mode 100644
index 0000000..cfbd5bf
--- /dev/null
+++ b/cts/scheduler/scores/clone-recover-no-shuffle-2.scores
@@ -0,0 +1,79 @@
+
+pcmk__clone_assign: grp-clone allocation score on node1: 0
+pcmk__clone_assign: grp-clone allocation score on node2: 0
+pcmk__clone_assign: grp-clone allocation score on node3: 0
+pcmk__clone_assign: grp:0 allocation score on node1: 0
+pcmk__clone_assign: grp:0 allocation score on node2: 0
+pcmk__clone_assign: grp:0 allocation score on node3: 0
+pcmk__clone_assign: grp:1 allocation score on node1: 0
+pcmk__clone_assign: grp:1 allocation score on node2: 0
+pcmk__clone_assign: grp:1 allocation score on node3: 0
+pcmk__clone_assign: grp:2 allocation score on node1: 0
+pcmk__clone_assign: grp:2 allocation score on node2: 0
+pcmk__clone_assign: grp:2 allocation score on node3: 0
+pcmk__clone_assign: rsc1:0 allocation score on node1: 0
+pcmk__clone_assign: rsc1:0 allocation score on node2: 1
+pcmk__clone_assign: rsc1:0 allocation score on node3: 0
+pcmk__clone_assign: rsc1:1 allocation score on node1: 0
+pcmk__clone_assign: rsc1:1 allocation score on node2: 0
+pcmk__clone_assign: rsc1:1 allocation score on node3: 1
+pcmk__clone_assign: rsc1:2 allocation score on node1: 0
+pcmk__clone_assign: rsc1:2 allocation score on node2: 0
+pcmk__clone_assign: rsc1:2 allocation score on node3: 0
+pcmk__clone_assign: rsc2:0 allocation score on node1: 0
+pcmk__clone_assign: rsc2:0 allocation score on node2: 1
+pcmk__clone_assign: rsc2:0 allocation score on node3: 0
+pcmk__clone_assign: rsc2:1 allocation score on node1: 0
+pcmk__clone_assign: rsc2:1 allocation score on node2: 0
+pcmk__clone_assign: rsc2:1 allocation score on node3: 1
+pcmk__clone_assign: rsc2:2 allocation score on node1: 0
+pcmk__clone_assign: rsc2:2 allocation score on node2: 0
+pcmk__clone_assign: rsc2:2 allocation score on node3: 0
+pcmk__group_assign: grp:0 allocation score on node1: 0
+pcmk__group_assign: grp:0 allocation score on node2: 0
+pcmk__group_assign: grp:0 allocation score on node3: 0
+pcmk__group_assign: grp:1 allocation score on node1: 0
+pcmk__group_assign: grp:1 allocation score on node2: -INFINITY
+pcmk__group_assign: grp:1 allocation score on node3: 0
+pcmk__group_assign: grp:2 allocation score on node1: 0
+pcmk__group_assign: grp:2 allocation score on node2: -INFINITY
+pcmk__group_assign: grp:2 allocation score on node3: -INFINITY
+pcmk__group_assign: rsc1:0 allocation score on node1: 0
+pcmk__group_assign: rsc1:0 allocation score on node2: 1
+pcmk__group_assign: rsc1:0 allocation score on node3: 0
+pcmk__group_assign: rsc1:1 allocation score on node1: 0
+pcmk__group_assign: rsc1:1 allocation score on node2: -INFINITY
+pcmk__group_assign: rsc1:1 allocation score on node3: 1
+pcmk__group_assign: rsc1:2 allocation score on node1: 0
+pcmk__group_assign: rsc1:2 allocation score on node2: -INFINITY
+pcmk__group_assign: rsc1:2 allocation score on node3: -INFINITY
+pcmk__group_assign: rsc2:0 allocation score on node1: 0
+pcmk__group_assign: rsc2:0 allocation score on node2: 1
+pcmk__group_assign: rsc2:0 allocation score on node3: 0
+pcmk__group_assign: rsc2:1 allocation score on node1: 0
+pcmk__group_assign: rsc2:1 allocation score on node2: -INFINITY
+pcmk__group_assign: rsc2:1 allocation score on node3: 1
+pcmk__group_assign: rsc2:2 allocation score on node1: 0
+pcmk__group_assign: rsc2:2 allocation score on node2: -INFINITY
+pcmk__group_assign: rsc2:2 allocation score on node3: -INFINITY
+pcmk__primitive_assign: Fencing allocation score on node1: 0
+pcmk__primitive_assign: Fencing allocation score on node2: 0
+pcmk__primitive_assign: Fencing allocation score on node3: 0
+pcmk__primitive_assign: rsc1:0 allocation score on node1: 0
+pcmk__primitive_assign: rsc1:0 allocation score on node2: 2
+pcmk__primitive_assign: rsc1:0 allocation score on node3: 0
+pcmk__primitive_assign: rsc1:1 allocation score on node1: 0
+pcmk__primitive_assign: rsc1:1 allocation score on node2: -INFINITY
+pcmk__primitive_assign: rsc1:1 allocation score on node3: 2
+pcmk__primitive_assign: rsc1:2 allocation score on node1: 0
+pcmk__primitive_assign: rsc1:2 allocation score on node2: -INFINITY
+pcmk__primitive_assign: rsc1:2 allocation score on node3: -INFINITY
+pcmk__primitive_assign: rsc2:0 allocation score on node1: -INFINITY
+pcmk__primitive_assign: rsc2:0 allocation score on node2: 1
+pcmk__primitive_assign: rsc2:0 allocation score on node3: -INFINITY
+pcmk__primitive_assign: rsc2:1 allocation score on node1: -INFINITY
+pcmk__primitive_assign: rsc2:1 allocation score on node2: -INFINITY
+pcmk__primitive_assign: rsc2:1 allocation score on node3: 1
+pcmk__primitive_assign: rsc2:2 allocation score on node1: 0
+pcmk__primitive_assign: rsc2:2 allocation score on node2: -INFINITY
+pcmk__primitive_assign: rsc2:2 allocation score on node3: -INFINITY
diff --git a/cts/scheduler/scores/clone-recover-no-shuffle-3.scores b/cts/scheduler/scores/clone-recover-no-shuffle-3.scores
new file mode 100644
index 0000000..91fe06c
--- /dev/null
+++ b/cts/scheduler/scores/clone-recover-no-shuffle-3.scores
@@ -0,0 +1,64 @@
+
+pcmk__bundle_assign: base-bundle allocation score on node1: 0
+pcmk__bundle_assign: base-bundle allocation score on node2: 0
+pcmk__bundle_assign: base-bundle allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-0 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-0 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-0 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-0: -INFINITY
+pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-1: -INFINITY
+pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-2: -INFINITY
+pcmk__bundle_assign: base-bundle-clone allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node3: 0
+pcmk__bundle_assign: base:0 allocation score on base-bundle-0: 501
+pcmk__bundle_assign: base:1 allocation score on base-bundle-1: 501
+pcmk__bundle_assign: base:2 allocation score on base-bundle-2: 500
+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-0: 0
+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-1: 0
+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-2: 0
+pcmk__clone_assign: base-bundle-clone allocation score on node1: -INFINITY
+pcmk__clone_assign: base-bundle-clone allocation score on node2: -INFINITY
+pcmk__clone_assign: base-bundle-clone allocation score on node3: -INFINITY
+pcmk__clone_assign: base:0 allocation score on base-bundle-0: INFINITY
+pcmk__clone_assign: base:1 allocation score on base-bundle-1: INFINITY
+pcmk__clone_assign: base:2 allocation score on base-bundle-2: INFINITY
+pcmk__primitive_assign: Fencing allocation score on node1: 0
+pcmk__primitive_assign: Fencing allocation score on node2: 0
+pcmk__primitive_assign: Fencing allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-0 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-0 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-0 allocation score on node3: 10000
+pcmk__primitive_assign: base-bundle-1 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-1 allocation score on node2: 10000
+pcmk__primitive_assign: base-bundle-1 allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-2 allocation score on node1: 10000
+pcmk__primitive_assign: base-bundle-2 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-2 allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node3: -INFINITY
+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node2: -INFINITY
+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node3: -INFINITY
+pcmk__primitive_assign: base:0 allocation score on base-bundle-0: INFINITY
+pcmk__primitive_assign: base:1 allocation score on base-bundle-1: INFINITY
+pcmk__primitive_assign: base:2 allocation score on base-bundle-2: INFINITY
diff --git a/cts/scheduler/scores/clone-recover-no-shuffle-4.scores b/cts/scheduler/scores/clone-recover-no-shuffle-4.scores
new file mode 100644
index 0000000..2a52c81
--- /dev/null
+++ b/cts/scheduler/scores/clone-recover-no-shuffle-4.scores
@@ -0,0 +1,31 @@
+
+pcmk__clone_assign: dummy-clone allocation score on node1: 100
+pcmk__clone_assign: dummy-clone allocation score on node2: 0
+pcmk__clone_assign: dummy-clone allocation score on node3: 0
+pcmk__clone_assign: dummy:0 allocation score on node1: 100
+pcmk__clone_assign: dummy:0 allocation score on node2: 1
+pcmk__clone_assign: dummy:0 allocation score on node3: 0
+pcmk__clone_assign: dummy:1 allocation score on node1: 100
+pcmk__clone_assign: dummy:1 allocation score on node2: 0
+pcmk__clone_assign: dummy:1 allocation score on node3: 1
+pcmk__clone_assign: dummy:2 allocation score on node1: 100
+pcmk__clone_assign: dummy:2 allocation score on node2: 0
+pcmk__clone_assign: dummy:2 allocation score on node3: 0
+pcmk__primitive_assign: Fencing allocation score on node1: 0
+pcmk__primitive_assign: Fencing allocation score on node2: 0
+pcmk__primitive_assign: Fencing allocation score on node3: 0
+pcmk__primitive_assign: dummy:0 allocation score on node1: -INFINITY
+pcmk__primitive_assign: dummy:0 allocation score on node1: 100
+pcmk__primitive_assign: dummy:0 allocation score on node2: 1
+pcmk__primitive_assign: dummy:0 allocation score on node2: 1
+pcmk__primitive_assign: dummy:0 allocation score on node3: 0
+pcmk__primitive_assign: dummy:0 allocation score on node3: 0
+pcmk__primitive_assign: dummy:1 allocation score on node1: -INFINITY
+pcmk__primitive_assign: dummy:1 allocation score on node1: 100
+pcmk__primitive_assign: dummy:1 allocation score on node2: -INFINITY
+pcmk__primitive_assign: dummy:1 allocation score on node2: -INFINITY
+pcmk__primitive_assign: dummy:1 allocation score on node3: 1
+pcmk__primitive_assign: dummy:1 allocation score on node3: 1
+pcmk__primitive_assign: dummy:2 allocation score on node1: 100
+pcmk__primitive_assign: dummy:2 allocation score on node2: -INFINITY
+pcmk__primitive_assign: dummy:2 allocation score on node3: -INFINITY
diff --git a/cts/scheduler/scores/clone-recover-no-shuffle-5.scores b/cts/scheduler/scores/clone-recover-no-shuffle-5.scores
new file mode 100644
index 0000000..c6c8072
--- /dev/null
+++ b/cts/scheduler/scores/clone-recover-no-shuffle-5.scores
@@ -0,0 +1,109 @@
+
+pcmk__clone_assign: grp-clone allocation score on node1: 100
+pcmk__clone_assign: grp-clone allocation score on node2: 0
+pcmk__clone_assign: grp-clone allocation score on node3: 0
+pcmk__clone_assign: grp:0 allocation score on node1: 100
+pcmk__clone_assign: grp:0 allocation score on node2: 0
+pcmk__clone_assign: grp:0 allocation score on node3: 0
+pcmk__clone_assign: grp:1 allocation score on node1: 100
+pcmk__clone_assign: grp:1 allocation score on node2: 0
+pcmk__clone_assign: grp:1 allocation score on node3: 0
+pcmk__clone_assign: grp:2 allocation score on node1: 100
+pcmk__clone_assign: grp:2 allocation score on node2: 0
+pcmk__clone_assign: grp:2 allocation score on node3: 0
+pcmk__clone_assign: rsc1:0 allocation score on node1: 100
+pcmk__clone_assign: rsc1:0 allocation score on node2: 1
+pcmk__clone_assign: rsc1:0 allocation score on node3: 0
+pcmk__clone_assign: rsc1:1 allocation score on node1: 100
+pcmk__clone_assign: rsc1:1 allocation score on node2: 0
+pcmk__clone_assign: rsc1:1 allocation score on node3: 1
+pcmk__clone_assign: rsc1:2 allocation score on node1: 100
+pcmk__clone_assign: rsc1:2 allocation score on node2: 0
+pcmk__clone_assign: rsc1:2 allocation score on node3: 0
+pcmk__clone_assign: rsc2:0 allocation score on node1: 0
+pcmk__clone_assign: rsc2:0 allocation score on node2: 1
+pcmk__clone_assign: rsc2:0 allocation score on node3: 0
+pcmk__clone_assign: rsc2:1 allocation score on node1: 0
+pcmk__clone_assign: rsc2:1 allocation score on node2: 0
+pcmk__clone_assign: rsc2:1 allocation score on node3: 1
+pcmk__clone_assign: rsc2:2 allocation score on node1: 0
+pcmk__clone_assign: rsc2:2 allocation score on node2: 0
+pcmk__clone_assign: rsc2:2 allocation score on node3: 0
+pcmk__group_assign: grp:0 allocation score on node1: -INFINITY
+pcmk__group_assign: grp:0 allocation score on node1: 100
+pcmk__group_assign: grp:0 allocation score on node2: 0
+pcmk__group_assign: grp:0 allocation score on node2: 0
+pcmk__group_assign: grp:0 allocation score on node3: 0
+pcmk__group_assign: grp:0 allocation score on node3: 0
+pcmk__group_assign: grp:1 allocation score on node1: -INFINITY
+pcmk__group_assign: grp:1 allocation score on node1: 100
+pcmk__group_assign: grp:1 allocation score on node2: -INFINITY
+pcmk__group_assign: grp:1 allocation score on node2: -INFINITY
+pcmk__group_assign: grp:1 allocation score on node3: 0
+pcmk__group_assign: grp:1 allocation score on node3: 0
+pcmk__group_assign: grp:2 allocation score on node1: 100
+pcmk__group_assign: grp:2 allocation score on node2: -INFINITY
+pcmk__group_assign: grp:2 allocation score on node3: -INFINITY
+pcmk__group_assign: rsc1:0 allocation score on node1: -INFINITY
+pcmk__group_assign: rsc1:0 allocation score on node1: 100
+pcmk__group_assign: rsc1:0 allocation score on node2: 1
+pcmk__group_assign: rsc1:0 allocation score on node2: 1
+pcmk__group_assign: rsc1:0 allocation score on node3: 0
+pcmk__group_assign: rsc1:0 allocation score on node3: 0
+pcmk__group_assign: rsc1:1 allocation score on node1: -INFINITY
+pcmk__group_assign: rsc1:1 allocation score on node1: 100
+pcmk__group_assign: rsc1:1 allocation score on node2: -INFINITY
+pcmk__group_assign: rsc1:1 allocation score on node2: -INFINITY
+pcmk__group_assign: rsc1:1 allocation score on node3: 1
+pcmk__group_assign: rsc1:1 allocation score on node3: 1
+pcmk__group_assign: rsc1:2 allocation score on node1: 100
+pcmk__group_assign: rsc1:2 allocation score on node2: -INFINITY
+pcmk__group_assign: rsc1:2 allocation score on node3: -INFINITY
+pcmk__group_assign: rsc2:0 allocation score on node1: -INFINITY
+pcmk__group_assign: rsc2:0 allocation score on node1: 0
+pcmk__group_assign: rsc2:0 allocation score on node2: 1
+pcmk__group_assign: rsc2:0 allocation score on node2: 1
+pcmk__group_assign: rsc2:0 allocation score on node3: 0
+pcmk__group_assign: rsc2:0 allocation score on node3: 0
+pcmk__group_assign: rsc2:1 allocation score on node1: -INFINITY
+pcmk__group_assign: rsc2:1 allocation score on node1: 0
+pcmk__group_assign: rsc2:1 allocation score on node2: -INFINITY
+pcmk__group_assign: rsc2:1 allocation score on node2: -INFINITY
+pcmk__group_assign: rsc2:1 allocation score on node3: 1
+pcmk__group_assign: rsc2:1 allocation score on node3: 1
+pcmk__group_assign: rsc2:2 allocation score on node1: 0
+pcmk__group_assign: rsc2:2 allocation score on node2: -INFINITY
+pcmk__group_assign: rsc2:2 allocation score on node3: -INFINITY
+pcmk__primitive_assign: Fencing allocation score on node1: 0
+pcmk__primitive_assign: Fencing allocation score on node2: 0
+pcmk__primitive_assign: Fencing allocation score on node3: 0
+pcmk__primitive_assign: rsc1:0 allocation score on node1: -INFINITY
+pcmk__primitive_assign: rsc1:0 allocation score on node1: 100
+pcmk__primitive_assign: rsc1:0 allocation score on node2: 2
+pcmk__primitive_assign: rsc1:0 allocation score on node2: 2
+pcmk__primitive_assign: rsc1:0 allocation score on node3: 0
+pcmk__primitive_assign: rsc1:0 allocation score on node3: 0
+pcmk__primitive_assign: rsc1:1 allocation score on node1: -INFINITY
+pcmk__primitive_assign: rsc1:1 allocation score on node1: 100
+pcmk__primitive_assign: rsc1:1 allocation score on node2: -INFINITY
+pcmk__primitive_assign: rsc1:1 allocation score on node2: -INFINITY
+pcmk__primitive_assign: rsc1:1 allocation score on node3: 2
+pcmk__primitive_assign: rsc1:1 allocation score on node3: 2
+pcmk__primitive_assign: rsc1:2 allocation score on node1: 100
+pcmk__primitive_assign: rsc1:2 allocation score on node2: -INFINITY
+pcmk__primitive_assign: rsc1:2 allocation score on node3: -INFINITY
+pcmk__primitive_assign: rsc2:0 allocation score on node1: -INFINITY
+pcmk__primitive_assign: rsc2:0 allocation score on node1: 0
+pcmk__primitive_assign: rsc2:0 allocation score on node2: -INFINITY
+pcmk__primitive_assign: rsc2:0 allocation score on node2: 1
+pcmk__primitive_assign: rsc2:0 allocation score on node3: -INFINITY
+pcmk__primitive_assign: rsc2:0 allocation score on node3: -INFINITY
+pcmk__primitive_assign: rsc2:1 allocation score on node1: -INFINITY
+pcmk__primitive_assign: rsc2:1 allocation score on node1: 0
+pcmk__primitive_assign: rsc2:1 allocation score on node2: -INFINITY
+pcmk__primitive_assign: rsc2:1 allocation score on node2: -INFINITY
+pcmk__primitive_assign: rsc2:1 allocation score on node3: -INFINITY
+pcmk__primitive_assign: rsc2:1 allocation score on node3: 1
+pcmk__primitive_assign: rsc2:2 allocation score on node1: 0
+pcmk__primitive_assign: rsc2:2 allocation score on node2: -INFINITY
+pcmk__primitive_assign: rsc2:2 allocation score on node3: -INFINITY
diff --git a/cts/scheduler/scores/clone-recover-no-shuffle-6.scores b/cts/scheduler/scores/clone-recover-no-shuffle-6.scores
new file mode 100644
index 0000000..a7231a7
--- /dev/null
+++ b/cts/scheduler/scores/clone-recover-no-shuffle-6.scores
@@ -0,0 +1,70 @@
+
+pcmk__bundle_assign: base-bundle allocation score on node1: 100
+pcmk__bundle_assign: base-bundle allocation score on node2: 0
+pcmk__bundle_assign: base-bundle allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-0 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-0 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-0 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-0: -INFINITY
+pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-1: -INFINITY
+pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-2: -INFINITY
+pcmk__bundle_assign: base-bundle-clone allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node1: 100
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node1: 100
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node1: 100
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node3: 0
+pcmk__bundle_assign: base:0 allocation score on base-bundle-0: 501
+pcmk__bundle_assign: base:1 allocation score on base-bundle-1: 501
+pcmk__bundle_assign: base:2 allocation score on base-bundle-2: 500
+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-0: 0
+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-1: 0
+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-2: 0
+pcmk__clone_assign: base-bundle-clone allocation score on node1: -INFINITY
+pcmk__clone_assign: base-bundle-clone allocation score on node2: -INFINITY
+pcmk__clone_assign: base-bundle-clone allocation score on node3: -INFINITY
+pcmk__clone_assign: base:0 allocation score on base-bundle-0: INFINITY
+pcmk__clone_assign: base:1 allocation score on base-bundle-1: INFINITY
+pcmk__clone_assign: base:2 allocation score on base-bundle-2: INFINITY
+pcmk__primitive_assign: Fencing allocation score on node1: 0
+pcmk__primitive_assign: Fencing allocation score on node2: 0
+pcmk__primitive_assign: Fencing allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-0 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-0 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-0 allocation score on node3: 10000
+pcmk__primitive_assign: base-bundle-1 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-1 allocation score on node2: 10000
+pcmk__primitive_assign: base-bundle-1 allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-2 allocation score on node1: 10000
+pcmk__primitive_assign: base-bundle-2 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-2 allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node1: -INFINITY
+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node1: 100
+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node1: -INFINITY
+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node1: 100
+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node3: -INFINITY
+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node3: -INFINITY
+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node1: 100
+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node2: -INFINITY
+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node3: -INFINITY
+pcmk__primitive_assign: base:0 allocation score on base-bundle-0: INFINITY
+pcmk__primitive_assign: base:1 allocation score on base-bundle-1: INFINITY
+pcmk__primitive_assign: base:2 allocation score on base-bundle-2: INFINITY
diff --git a/cts/scheduler/scores/clone-recover-no-shuffle-7.scores b/cts/scheduler/scores/clone-recover-no-shuffle-7.scores
new file mode 100644
index 0000000..503cbb3
--- /dev/null
+++ b/cts/scheduler/scores/clone-recover-no-shuffle-7.scores
@@ -0,0 +1,34 @@
+
+dummy:0 promotion score on node3: 5
+dummy:1 promotion score on node2: 10
+dummy:2 promotion score on node1: 15
+pcmk__clone_assign: dummy-clone allocation score on node1: 0
+pcmk__clone_assign: dummy-clone allocation score on node2: 0
+pcmk__clone_assign: dummy-clone allocation score on node3: 0
+pcmk__clone_assign: dummy:0 allocation score on node1: 15
+pcmk__clone_assign: dummy:0 allocation score on node2: 0
+pcmk__clone_assign: dummy:0 allocation score on node3: 6
+pcmk__clone_assign: dummy:1 allocation score on node1: 15
+pcmk__clone_assign: dummy:1 allocation score on node2: 11
+pcmk__clone_assign: dummy:1 allocation score on node3: 0
+pcmk__clone_assign: dummy:2 allocation score on node1: 15
+pcmk__clone_assign: dummy:2 allocation score on node2: 10
+pcmk__clone_assign: dummy:2 allocation score on node3: 5
+pcmk__primitive_assign: Fencing allocation score on node1: 0
+pcmk__primitive_assign: Fencing allocation score on node2: 0
+pcmk__primitive_assign: Fencing allocation score on node3: 0
+pcmk__primitive_assign: dummy:0 allocation score on node1: -INFINITY
+pcmk__primitive_assign: dummy:0 allocation score on node1: 15
+pcmk__primitive_assign: dummy:0 allocation score on node2: 0
+pcmk__primitive_assign: dummy:0 allocation score on node2: 0
+pcmk__primitive_assign: dummy:0 allocation score on node3: 6
+pcmk__primitive_assign: dummy:0 allocation score on node3: 6
+pcmk__primitive_assign: dummy:1 allocation score on node1: -INFINITY
+pcmk__primitive_assign: dummy:1 allocation score on node1: 15
+pcmk__primitive_assign: dummy:1 allocation score on node2: 11
+pcmk__primitive_assign: dummy:1 allocation score on node2: 11
+pcmk__primitive_assign: dummy:1 allocation score on node3: -INFINITY
+pcmk__primitive_assign: dummy:1 allocation score on node3: -INFINITY
+pcmk__primitive_assign: dummy:2 allocation score on node1: 15
+pcmk__primitive_assign: dummy:2 allocation score on node2: -INFINITY
+pcmk__primitive_assign: dummy:2 allocation score on node3: -INFINITY
diff --git a/cts/scheduler/scores/clone-recover-no-shuffle-8.scores b/cts/scheduler/scores/clone-recover-no-shuffle-8.scores
new file mode 100644
index 0000000..56d4cc8
--- /dev/null
+++ b/cts/scheduler/scores/clone-recover-no-shuffle-8.scores
@@ -0,0 +1,82 @@
+
+grp:0 promotion score on node3: 10
+grp:1 promotion score on node2: 20
+grp:2 promotion score on node1: 30
+pcmk__clone_assign: grp-clone allocation score on node1: 0
+pcmk__clone_assign: grp-clone allocation score on node2: 0
+pcmk__clone_assign: grp-clone allocation score on node3: 0
+pcmk__clone_assign: grp:0 allocation score on node1: 30
+pcmk__clone_assign: grp:0 allocation score on node2: 0
+pcmk__clone_assign: grp:0 allocation score on node3: 10
+pcmk__clone_assign: grp:1 allocation score on node1: 30
+pcmk__clone_assign: grp:1 allocation score on node2: 20
+pcmk__clone_assign: grp:1 allocation score on node3: 0
+pcmk__clone_assign: grp:2 allocation score on node1: 30
+pcmk__clone_assign: grp:2 allocation score on node2: 20
+pcmk__clone_assign: grp:2 allocation score on node3: 10
+pcmk__clone_assign: rsc1:0 allocation score on node1: 0
+pcmk__clone_assign: rsc1:0 allocation score on node2: 0
+pcmk__clone_assign: rsc1:0 allocation score on node3: 1
+pcmk__clone_assign: rsc1:1 allocation score on node1: 0
+pcmk__clone_assign: rsc1:1 allocation score on node2: 1
+pcmk__clone_assign: rsc1:1 allocation score on node3: 0
+pcmk__clone_assign: rsc1:2 allocation score on node1: 0
+pcmk__clone_assign: rsc1:2 allocation score on node2: 0
+pcmk__clone_assign: rsc1:2 allocation score on node3: 0
+pcmk__clone_assign: rsc2:0 allocation score on node1: 0
+pcmk__clone_assign: rsc2:0 allocation score on node2: 0
+pcmk__clone_assign: rsc2:0 allocation score on node3: 1
+pcmk__clone_assign: rsc2:1 allocation score on node1: 0
+pcmk__clone_assign: rsc2:1 allocation score on node2: 1
+pcmk__clone_assign: rsc2:1 allocation score on node3: 0
+pcmk__clone_assign: rsc2:2 allocation score on node1: 0
+pcmk__clone_assign: rsc2:2 allocation score on node2: 0
+pcmk__clone_assign: rsc2:2 allocation score on node3: 0
+pcmk__group_assign: grp:0 allocation score on node1: 30
+pcmk__group_assign: grp:0 allocation score on node2: 0
+pcmk__group_assign: grp:0 allocation score on node3: 10
+pcmk__group_assign: grp:1 allocation score on node1: 30
+pcmk__group_assign: grp:1 allocation score on node2: 20
+pcmk__group_assign: grp:1 allocation score on node3: -INFINITY
+pcmk__group_assign: grp:2 allocation score on node1: 30
+pcmk__group_assign: grp:2 allocation score on node2: -INFINITY
+pcmk__group_assign: grp:2 allocation score on node3: -INFINITY
+pcmk__group_assign: rsc1:0 allocation score on node1: 0
+pcmk__group_assign: rsc1:0 allocation score on node2: 0
+pcmk__group_assign: rsc1:0 allocation score on node3: 1
+pcmk__group_assign: rsc1:1 allocation score on node1: 0
+pcmk__group_assign: rsc1:1 allocation score on node2: 1
+pcmk__group_assign: rsc1:1 allocation score on node3: -INFINITY
+pcmk__group_assign: rsc1:2 allocation score on node1: 0
+pcmk__group_assign: rsc1:2 allocation score on node2: -INFINITY
+pcmk__group_assign: rsc1:2 allocation score on node3: -INFINITY
+pcmk__group_assign: rsc2:0 allocation score on node1: 0
+pcmk__group_assign: rsc2:0 allocation score on node2: 0
+pcmk__group_assign: rsc2:0 allocation score on node3: 1
+pcmk__group_assign: rsc2:1 allocation score on node1: 0
+pcmk__group_assign: rsc2:1 allocation score on node2: 1
+pcmk__group_assign: rsc2:1 allocation score on node3: -INFINITY
+pcmk__group_assign: rsc2:2 allocation score on node1: 0
+pcmk__group_assign: rsc2:2 allocation score on node2: -INFINITY
+pcmk__group_assign: rsc2:2 allocation score on node3: -INFINITY
+pcmk__primitive_assign: Fencing allocation score on node1: 0
+pcmk__primitive_assign: Fencing allocation score on node2: 0
+pcmk__primitive_assign: Fencing allocation score on node3: 0
+pcmk__primitive_assign: rsc1:0 allocation score on node1: 0
+pcmk__primitive_assign: rsc1:0 allocation score on node2: 0
+pcmk__primitive_assign: rsc1:0 allocation score on node3: 2
+pcmk__primitive_assign: rsc1:1 allocation score on node1: 0
+pcmk__primitive_assign: rsc1:1 allocation score on node2: 2
+pcmk__primitive_assign: rsc1:1 allocation score on node3: -INFINITY
+pcmk__primitive_assign: rsc1:2 allocation score on node1: 0
+pcmk__primitive_assign: rsc1:2 allocation score on node2: -INFINITY
+pcmk__primitive_assign: rsc1:2 allocation score on node3: -INFINITY
+pcmk__primitive_assign: rsc2:0 allocation score on node1: -INFINITY
+pcmk__primitive_assign: rsc2:0 allocation score on node2: -INFINITY
+pcmk__primitive_assign: rsc2:0 allocation score on node3: 1
+pcmk__primitive_assign: rsc2:1 allocation score on node1: -INFINITY
+pcmk__primitive_assign: rsc2:1 allocation score on node2: 1
+pcmk__primitive_assign: rsc2:1 allocation score on node3: -INFINITY
+pcmk__primitive_assign: rsc2:2 allocation score on node1: 0
+pcmk__primitive_assign: rsc2:2 allocation score on node2: -INFINITY
+pcmk__primitive_assign: rsc2:2 allocation score on node3: -INFINITY
diff --git a/cts/scheduler/scores/clone-recover-no-shuffle-9.scores b/cts/scheduler/scores/clone-recover-no-shuffle-9.scores
new file mode 100644
index 0000000..eb7a941
--- /dev/null
+++ b/cts/scheduler/scores/clone-recover-no-shuffle-9.scores
@@ -0,0 +1,67 @@
+
+base:0 promotion score on base-bundle-0: 5
+base:1 promotion score on base-bundle-1: 10
+base:2 promotion score on base-bundle-2: 15
+pcmk__bundle_assign: base-bundle allocation score on node1: 0
+pcmk__bundle_assign: base-bundle allocation score on node2: 0
+pcmk__bundle_assign: base-bundle allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-0 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-0 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-0 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-1 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-2 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-0: -INFINITY
+pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-1: -INFINITY
+pcmk__bundle_assign: base-bundle-clone allocation score on base-bundle-2: -INFINITY
+pcmk__bundle_assign: base-bundle-clone allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-clone allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-podman-0 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-podman-1 allocation score on node3: 0
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node1: 0
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node2: 0
+pcmk__bundle_assign: base-bundle-podman-2 allocation score on node3: 0
+pcmk__bundle_assign: base:0 allocation score on base-bundle-0: 501
+pcmk__bundle_assign: base:1 allocation score on base-bundle-1: 501
+pcmk__bundle_assign: base:2 allocation score on base-bundle-2: 500
+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-0: 0
+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-1: 0
+pcmk__clone_assign: base-bundle-clone allocation score on base-bundle-2: 0
+pcmk__clone_assign: base-bundle-clone allocation score on node1: -INFINITY
+pcmk__clone_assign: base-bundle-clone allocation score on node2: -INFINITY
+pcmk__clone_assign: base-bundle-clone allocation score on node3: -INFINITY
+pcmk__clone_assign: base:0 allocation score on base-bundle-0: INFINITY
+pcmk__clone_assign: base:1 allocation score on base-bundle-1: INFINITY
+pcmk__clone_assign: base:2 allocation score on base-bundle-2: INFINITY
+pcmk__primitive_assign: Fencing allocation score on node1: 0
+pcmk__primitive_assign: Fencing allocation score on node2: 0
+pcmk__primitive_assign: Fencing allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-0 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-0 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-0 allocation score on node3: 10000
+pcmk__primitive_assign: base-bundle-1 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-1 allocation score on node2: 10000
+pcmk__primitive_assign: base-bundle-1 allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-2 allocation score on node1: 10000
+pcmk__primitive_assign: base-bundle-2 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-2 allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-podman-0 allocation score on node3: 0
+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node2: 0
+pcmk__primitive_assign: base-bundle-podman-1 allocation score on node3: -INFINITY
+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node1: 0
+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node2: -INFINITY
+pcmk__primitive_assign: base-bundle-podman-2 allocation score on node3: -INFINITY
+pcmk__primitive_assign: base:0 allocation score on base-bundle-0: INFINITY
+pcmk__primitive_assign: base:1 allocation score on base-bundle-1: INFINITY
+pcmk__primitive_assign: base:2 allocation score on base-bundle-2: INFINITY
diff --git a/cts/scheduler/scores/cloned-group-stop.scores b/cts/scheduler/scores/cloned-group-stop.scores
index be835fa..7e406c6 100644
--- a/cts/scheduler/scores/cloned-group-stop.scores
+++ b/cts/scheduler/scores/cloned-group-stop.scores
@@ -122,8 +122,12 @@ pcmk__primitive_assign: mysql-fs allocation score on rhos4-node4: -INFINITY
pcmk__primitive_assign: mysql-vip allocation score on rhos4-node3: 300
pcmk__primitive_assign: mysql-vip allocation score on rhos4-node4: -INFINITY
pcmk__primitive_assign: qpidd:0 allocation score on rhos4-node3: -INFINITY
+pcmk__primitive_assign: qpidd:0 allocation score on rhos4-node3: -INFINITY
+pcmk__primitive_assign: qpidd:0 allocation score on rhos4-node4: -INFINITY
pcmk__primitive_assign: qpidd:0 allocation score on rhos4-node4: -INFINITY
pcmk__primitive_assign: qpidd:1 allocation score on rhos4-node3: -INFINITY
+pcmk__primitive_assign: qpidd:1 allocation score on rhos4-node3: -INFINITY
+pcmk__primitive_assign: qpidd:1 allocation score on rhos4-node4: -INFINITY
pcmk__primitive_assign: qpidd:1 allocation score on rhos4-node4: -INFINITY
pcmk__primitive_assign: virt-fencing allocation score on rhos4-node3: 100
pcmk__primitive_assign: virt-fencing allocation score on rhos4-node4: 0
diff --git a/cts/scheduler/scores/coloc-clone-stays-active.scores b/cts/scheduler/scores/coloc-clone-stays-active.scores
index 9b46943..52c6bc5 100644
--- a/cts/scheduler/scores/coloc-clone-stays-active.scores
+++ b/cts/scheduler/scores/coloc-clone-stays-active.scores
@@ -31,7 +31,7 @@ pcmk__clone_assign: cl-drbdlinks-s01-service allocation score on s01-0: 0
pcmk__clone_assign: cl-drbdlinks-s01-service allocation score on s01-1: 0
pcmk__clone_assign: cl-gfs2 allocation score on s01-0: 0
pcmk__clone_assign: cl-gfs2 allocation score on s01-1: 0
-pcmk__clone_assign: cl-ietd allocation score on s01-0: 11001
+pcmk__clone_assign: cl-ietd allocation score on s01-0: 12001
pcmk__clone_assign: cl-ietd allocation score on s01-1: 1000
pcmk__clone_assign: cl-libvirtd allocation score on s01-0: 0
pcmk__clone_assign: cl-libvirtd allocation score on s01-1: 0
@@ -337,16 +337,16 @@ pcmk__primitive_assign: dhcpd:0 allocation score on s01-0: -INFINITY
pcmk__primitive_assign: dhcpd:0 allocation score on s01-1: -INFINITY
pcmk__primitive_assign: dhcpd:1 allocation score on s01-0: -INFINITY
pcmk__primitive_assign: dhcpd:1 allocation score on s01-1: -INFINITY
-pcmk__primitive_assign: dlm:0 allocation score on s01-0: 0
+pcmk__primitive_assign: dlm:0 allocation score on s01-0: -INFINITY
pcmk__primitive_assign: dlm:0 allocation score on s01-1: 1
pcmk__primitive_assign: dlm:1 allocation score on s01-0: 1
pcmk__primitive_assign: dlm:1 allocation score on s01-1: -INFINITY
pcmk__primitive_assign: drbd-pool-0:0 allocation score on s01-0: -INFINITY
pcmk__primitive_assign: drbd-pool-0:0 allocation score on s01-1: 10001
-pcmk__primitive_assign: drbd-pool-0:1 allocation score on s01-0: 10001
+pcmk__primitive_assign: drbd-pool-0:1 allocation score on s01-0: 11001
pcmk__primitive_assign: drbd-pool-0:1 allocation score on s01-1: 0
pcmk__primitive_assign: drbd-pool-1:0 allocation score on s01-0: 0
-pcmk__primitive_assign: drbd-pool-1:0 allocation score on s01-1: 10001
+pcmk__primitive_assign: drbd-pool-1:0 allocation score on s01-1: 11001
pcmk__primitive_assign: drbd-pool-1:1 allocation score on s01-0: 10001
pcmk__primitive_assign: drbd-pool-1:1 allocation score on s01-1: -INFINITY
pcmk__primitive_assign: drbd-s01-logs:0 allocation score on s01-0: 0
@@ -357,7 +357,7 @@ pcmk__primitive_assign: drbd-s01-service:0 allocation score on s01-0: 0
pcmk__primitive_assign: drbd-s01-service:0 allocation score on s01-1: 10001
pcmk__primitive_assign: drbd-s01-service:1 allocation score on s01-0: 10001
pcmk__primitive_assign: drbd-s01-service:1 allocation score on s01-1: -INFINITY
-pcmk__primitive_assign: drbd-s01-vm-data:0 allocation score on s01-0: 0
+pcmk__primitive_assign: drbd-s01-vm-data:0 allocation score on s01-0: -INFINITY
pcmk__primitive_assign: drbd-s01-vm-data:0 allocation score on s01-1: 10001
pcmk__primitive_assign: drbd-s01-vm-data:1 allocation score on s01-0: 10001
pcmk__primitive_assign: drbd-s01-vm-data:1 allocation score on s01-1: -INFINITY
@@ -382,22 +382,22 @@ pcmk__primitive_assign: gfs2:0 allocation score on s01-1: 1
pcmk__primitive_assign: gfs2:1 allocation score on s01-0: 1
pcmk__primitive_assign: gfs2:1 allocation score on s01-1: -INFINITY
pcmk__primitive_assign: ietd:0 allocation score on s01-0: -INFINITY
-pcmk__primitive_assign: ietd:0 allocation score on s01-1: 1
-pcmk__primitive_assign: ietd:1 allocation score on s01-0: 1
-pcmk__primitive_assign: ietd:1 allocation score on s01-1: 0
+pcmk__primitive_assign: ietd:0 allocation score on s01-1: 1001
+pcmk__primitive_assign: ietd:1 allocation score on s01-0: 12002
+pcmk__primitive_assign: ietd:1 allocation score on s01-1: -INFINITY
pcmk__primitive_assign: iscsi-pool-0-lun-1 allocation score on s01-0: 0
pcmk__primitive_assign: iscsi-pool-0-lun-1 allocation score on s01-1: -INFINITY
-pcmk__primitive_assign: iscsi-pool-0-target allocation score on s01-0: 11001
+pcmk__primitive_assign: iscsi-pool-0-target allocation score on s01-0: 12001
pcmk__primitive_assign: iscsi-pool-0-target allocation score on s01-1: -INFINITY
pcmk__primitive_assign: iscsi-pool-1-lun-1 allocation score on s01-0: -INFINITY
pcmk__primitive_assign: iscsi-pool-1-lun-1 allocation score on s01-1: 0
pcmk__primitive_assign: iscsi-pool-1-target allocation score on s01-0: -INFINITY
-pcmk__primitive_assign: iscsi-pool-1-target allocation score on s01-1: 11001
+pcmk__primitive_assign: iscsi-pool-1-target allocation score on s01-1: 12001
pcmk__primitive_assign: iscsi-vds-dom0-stateless-0-lun-1 allocation score on s01-0: -INFINITY
pcmk__primitive_assign: iscsi-vds-dom0-stateless-0-lun-1 allocation score on s01-1: -INFINITY
pcmk__primitive_assign: iscsi-vds-dom0-stateless-0-target allocation score on s01-0: -INFINITY
pcmk__primitive_assign: iscsi-vds-dom0-stateless-0-target allocation score on s01-1: -INFINITY
-pcmk__primitive_assign: libvirtd:0 allocation score on s01-0: 0
+pcmk__primitive_assign: libvirtd:0 allocation score on s01-0: -INFINITY
pcmk__primitive_assign: libvirtd:0 allocation score on s01-1: 1
pcmk__primitive_assign: libvirtd:1 allocation score on s01-0: 1
pcmk__primitive_assign: libvirtd:1 allocation score on s01-1: -INFINITY
diff --git a/cts/scheduler/scores/coloc-with-inner-group-member.scores b/cts/scheduler/scores/coloc-with-inner-group-member.scores
new file mode 100644
index 0000000..8d1c6f6
--- /dev/null
+++ b/cts/scheduler/scores/coloc-with-inner-group-member.scores
@@ -0,0 +1,46 @@
+
+pcmk__group_assign: bar allocation score on rhel8-1: 0
+pcmk__group_assign: bar allocation score on rhel8-2: 0
+pcmk__group_assign: bar allocation score on rhel8-3: 0
+pcmk__group_assign: bar allocation score on rhel8-4: 0
+pcmk__group_assign: bar allocation score on rhel8-5: 0
+pcmk__group_assign: foo allocation score on rhel8-1: 0
+pcmk__group_assign: foo allocation score on rhel8-2: 0
+pcmk__group_assign: foo allocation score on rhel8-3: 0
+pcmk__group_assign: foo allocation score on rhel8-4: 0
+pcmk__group_assign: foo allocation score on rhel8-5: 0
+pcmk__group_assign: grp allocation score on rhel8-1: 0
+pcmk__group_assign: grp allocation score on rhel8-2: 0
+pcmk__group_assign: grp allocation score on rhel8-3: 0
+pcmk__group_assign: grp allocation score on rhel8-4: 0
+pcmk__group_assign: grp allocation score on rhel8-5: 0
+pcmk__group_assign: vip allocation score on rhel8-1: 0
+pcmk__group_assign: vip allocation score on rhel8-2: 0
+pcmk__group_assign: vip allocation score on rhel8-3: 0
+pcmk__group_assign: vip allocation score on rhel8-4: 0
+pcmk__group_assign: vip allocation score on rhel8-5: 0
+pcmk__primitive_assign: Fencing allocation score on rhel8-1: 0
+pcmk__primitive_assign: Fencing allocation score on rhel8-2: 0
+pcmk__primitive_assign: Fencing allocation score on rhel8-3: 0
+pcmk__primitive_assign: Fencing allocation score on rhel8-4: 0
+pcmk__primitive_assign: Fencing allocation score on rhel8-5: 0
+pcmk__primitive_assign: bar allocation score on rhel8-1: -INFINITY
+pcmk__primitive_assign: bar allocation score on rhel8-2: -INFINITY
+pcmk__primitive_assign: bar allocation score on rhel8-3: 0
+pcmk__primitive_assign: bar allocation score on rhel8-4: -INFINITY
+pcmk__primitive_assign: bar allocation score on rhel8-5: -INFINITY
+pcmk__primitive_assign: foo allocation score on rhel8-1: -INFINITY
+pcmk__primitive_assign: foo allocation score on rhel8-2: -INFINITY
+pcmk__primitive_assign: foo allocation score on rhel8-3: 0
+pcmk__primitive_assign: foo allocation score on rhel8-4: -INFINITY
+pcmk__primitive_assign: foo allocation score on rhel8-5: -INFINITY
+pcmk__primitive_assign: vip allocation score on rhel8-1: -INFINITY
+pcmk__primitive_assign: vip allocation score on rhel8-2: -INFINITY
+pcmk__primitive_assign: vip allocation score on rhel8-3: 0
+pcmk__primitive_assign: vip allocation score on rhel8-4: -INFINITY
+pcmk__primitive_assign: vip allocation score on rhel8-5: -INFINITY
+pcmk__primitive_assign: vip-dep allocation score on rhel8-1: 0
+pcmk__primitive_assign: vip-dep allocation score on rhel8-2: 0
+pcmk__primitive_assign: vip-dep allocation score on rhel8-3: 0
+pcmk__primitive_assign: vip-dep allocation score on rhel8-4: 0
+pcmk__primitive_assign: vip-dep allocation score on rhel8-5: 0
diff --git a/cts/scheduler/scores/colocate-primitive-with-clone.scores b/cts/scheduler/scores/colocate-primitive-with-clone.scores
index 58b4556..62615f6 100644
--- a/cts/scheduler/scores/colocate-primitive-with-clone.scores
+++ b/cts/scheduler/scores/colocate-primitive-with-clone.scores
@@ -284,65 +284,65 @@ pcmk__primitive_assign: UmVIPcheck allocation score on srv02: -INFINITY
pcmk__primitive_assign: UmVIPcheck allocation score on srv03: -INFINITY
pcmk__primitive_assign: UmVIPcheck allocation score on srv04: 100
pcmk__primitive_assign: clnG3dummy01:0 allocation score on srv01: -INFINITY
-pcmk__primitive_assign: clnG3dummy01:0 allocation score on srv02: 100
-pcmk__primitive_assign: clnG3dummy01:0 allocation score on srv03: 0
+pcmk__primitive_assign: clnG3dummy01:0 allocation score on srv02: 106
+pcmk__primitive_assign: clnG3dummy01:0 allocation score on srv03: -1000
pcmk__primitive_assign: clnG3dummy01:0 allocation score on srv04: -INFINITY
pcmk__primitive_assign: clnG3dummy01:1 allocation score on srv01: -INFINITY
pcmk__primitive_assign: clnG3dummy01:1 allocation score on srv02: -INFINITY
-pcmk__primitive_assign: clnG3dummy01:1 allocation score on srv03: 100
+pcmk__primitive_assign: clnG3dummy01:1 allocation score on srv03: 106
pcmk__primitive_assign: clnG3dummy01:1 allocation score on srv04: -INFINITY
pcmk__primitive_assign: clnG3dummy01:2 allocation score on srv01: -INFINITY
-pcmk__primitive_assign: clnG3dummy01:2 allocation score on srv02: 0
-pcmk__primitive_assign: clnG3dummy01:2 allocation score on srv03: 0
-pcmk__primitive_assign: clnG3dummy01:2 allocation score on srv04: 100
+pcmk__primitive_assign: clnG3dummy01:2 allocation score on srv02: -2994
+pcmk__primitive_assign: clnG3dummy01:2 allocation score on srv03: -2994
+pcmk__primitive_assign: clnG3dummy01:2 allocation score on srv04: 109
pcmk__primitive_assign: clnG3dummy01:3 allocation score on srv01: -INFINITY
pcmk__primitive_assign: clnG3dummy01:3 allocation score on srv02: -INFINITY
pcmk__primitive_assign: clnG3dummy01:3 allocation score on srv03: -INFINITY
pcmk__primitive_assign: clnG3dummy01:3 allocation score on srv04: -INFINITY
pcmk__primitive_assign: clnG3dummy02:0 allocation score on srv01: -INFINITY
-pcmk__primitive_assign: clnG3dummy02:0 allocation score on srv02: 100
-pcmk__primitive_assign: clnG3dummy02:0 allocation score on srv03: 0
+pcmk__primitive_assign: clnG3dummy02:0 allocation score on srv02: 106
+pcmk__primitive_assign: clnG3dummy02:0 allocation score on srv03: -1000
pcmk__primitive_assign: clnG3dummy02:0 allocation score on srv04: -INFINITY
pcmk__primitive_assign: clnG3dummy02:1 allocation score on srv01: -INFINITY
pcmk__primitive_assign: clnG3dummy02:1 allocation score on srv02: -INFINITY
-pcmk__primitive_assign: clnG3dummy02:1 allocation score on srv03: 100
+pcmk__primitive_assign: clnG3dummy02:1 allocation score on srv03: 106
pcmk__primitive_assign: clnG3dummy02:1 allocation score on srv04: -INFINITY
pcmk__primitive_assign: clnG3dummy02:2 allocation score on srv01: -INFINITY
-pcmk__primitive_assign: clnG3dummy02:2 allocation score on srv02: 0
-pcmk__primitive_assign: clnG3dummy02:2 allocation score on srv03: 0
-pcmk__primitive_assign: clnG3dummy02:2 allocation score on srv04: 100
+pcmk__primitive_assign: clnG3dummy02:2 allocation score on srv02: -2994
+pcmk__primitive_assign: clnG3dummy02:2 allocation score on srv03: -2994
+pcmk__primitive_assign: clnG3dummy02:2 allocation score on srv04: 109
pcmk__primitive_assign: clnG3dummy02:3 allocation score on srv01: -INFINITY
pcmk__primitive_assign: clnG3dummy02:3 allocation score on srv02: -INFINITY
pcmk__primitive_assign: clnG3dummy02:3 allocation score on srv03: -INFINITY
pcmk__primitive_assign: clnG3dummy02:3 allocation score on srv04: -INFINITY
pcmk__primitive_assign: clnPrmDiskd1:0 allocation score on srv01: -INFINITY
-pcmk__primitive_assign: clnPrmDiskd1:0 allocation score on srv02: 100
-pcmk__primitive_assign: clnPrmDiskd1:0 allocation score on srv03: 0
+pcmk__primitive_assign: clnPrmDiskd1:0 allocation score on srv02: 106
+pcmk__primitive_assign: clnPrmDiskd1:0 allocation score on srv03: -1000
pcmk__primitive_assign: clnPrmDiskd1:0 allocation score on srv04: -INFINITY
pcmk__primitive_assign: clnPrmDiskd1:1 allocation score on srv01: -INFINITY
pcmk__primitive_assign: clnPrmDiskd1:1 allocation score on srv02: -INFINITY
-pcmk__primitive_assign: clnPrmDiskd1:1 allocation score on srv03: 100
+pcmk__primitive_assign: clnPrmDiskd1:1 allocation score on srv03: 106
pcmk__primitive_assign: clnPrmDiskd1:1 allocation score on srv04: -INFINITY
pcmk__primitive_assign: clnPrmDiskd1:2 allocation score on srv01: -INFINITY
-pcmk__primitive_assign: clnPrmDiskd1:2 allocation score on srv02: 0
-pcmk__primitive_assign: clnPrmDiskd1:2 allocation score on srv03: 0
-pcmk__primitive_assign: clnPrmDiskd1:2 allocation score on srv04: 100
+pcmk__primitive_assign: clnPrmDiskd1:2 allocation score on srv02: -2994
+pcmk__primitive_assign: clnPrmDiskd1:2 allocation score on srv03: -2994
+pcmk__primitive_assign: clnPrmDiskd1:2 allocation score on srv04: 109
pcmk__primitive_assign: clnPrmDiskd1:3 allocation score on srv01: -INFINITY
pcmk__primitive_assign: clnPrmDiskd1:3 allocation score on srv02: -INFINITY
pcmk__primitive_assign: clnPrmDiskd1:3 allocation score on srv03: -INFINITY
pcmk__primitive_assign: clnPrmDiskd1:3 allocation score on srv04: -INFINITY
pcmk__primitive_assign: clnPrmPingd:0 allocation score on srv01: -INFINITY
-pcmk__primitive_assign: clnPrmPingd:0 allocation score on srv02: 100
-pcmk__primitive_assign: clnPrmPingd:0 allocation score on srv03: 0
+pcmk__primitive_assign: clnPrmPingd:0 allocation score on srv02: 106
+pcmk__primitive_assign: clnPrmPingd:0 allocation score on srv03: -1000
pcmk__primitive_assign: clnPrmPingd:0 allocation score on srv04: -INFINITY
pcmk__primitive_assign: clnPrmPingd:1 allocation score on srv01: -INFINITY
pcmk__primitive_assign: clnPrmPingd:1 allocation score on srv02: -INFINITY
-pcmk__primitive_assign: clnPrmPingd:1 allocation score on srv03: 100
+pcmk__primitive_assign: clnPrmPingd:1 allocation score on srv03: 106
pcmk__primitive_assign: clnPrmPingd:1 allocation score on srv04: -INFINITY
pcmk__primitive_assign: clnPrmPingd:2 allocation score on srv01: -INFINITY
-pcmk__primitive_assign: clnPrmPingd:2 allocation score on srv02: 0
-pcmk__primitive_assign: clnPrmPingd:2 allocation score on srv03: 0
-pcmk__primitive_assign: clnPrmPingd:2 allocation score on srv04: 100
+pcmk__primitive_assign: clnPrmPingd:2 allocation score on srv02: -2994
+pcmk__primitive_assign: clnPrmPingd:2 allocation score on srv03: -2994
+pcmk__primitive_assign: clnPrmPingd:2 allocation score on srv04: 109
pcmk__primitive_assign: clnPrmPingd:3 allocation score on srv01: -INFINITY
pcmk__primitive_assign: clnPrmPingd:3 allocation score on srv02: -INFINITY
pcmk__primitive_assign: clnPrmPingd:3 allocation score on srv03: -INFINITY
diff --git a/cts/scheduler/scores/colocation-influence.scores b/cts/scheduler/scores/colocation-influence.scores
index e15bdf5..2eb86ec 100644
--- a/cts/scheduler/scores/colocation-influence.scores
+++ b/cts/scheduler/scores/colocation-influence.scores
@@ -1,136 +1,136 @@
-pcmk__bundle_allocate: bundle10 allocation score on rhel7-1: 0
-pcmk__bundle_allocate: bundle10 allocation score on rhel7-1: 0
-pcmk__bundle_allocate: bundle10 allocation score on rhel7-2: 0
-pcmk__bundle_allocate: bundle10 allocation score on rhel7-2: 0
-pcmk__bundle_allocate: bundle10 allocation score on rhel7-3: 0
-pcmk__bundle_allocate: bundle10 allocation score on rhel7-3: 0
-pcmk__bundle_allocate: bundle10 allocation score on rhel7-4: 0
-pcmk__bundle_allocate: bundle10 allocation score on rhel7-4: 0
-pcmk__bundle_allocate: bundle10 allocation score on rhel7-5: -INFINITY
-pcmk__bundle_allocate: bundle10 allocation score on rhel7-5: -INFINITY
-pcmk__bundle_allocate: bundle10-0 allocation score on rhel7-1: 0
-pcmk__bundle_allocate: bundle10-0 allocation score on rhel7-1: 0
-pcmk__bundle_allocate: bundle10-0 allocation score on rhel7-2: 10
-pcmk__bundle_allocate: bundle10-0 allocation score on rhel7-2: 10010
-pcmk__bundle_allocate: bundle10-0 allocation score on rhel7-3: 0
-pcmk__bundle_allocate: bundle10-0 allocation score on rhel7-3: 0
-pcmk__bundle_allocate: bundle10-0 allocation score on rhel7-4: 0
-pcmk__bundle_allocate: bundle10-0 allocation score on rhel7-4: 0
-pcmk__bundle_allocate: bundle10-0 allocation score on rhel7-5: 0
-pcmk__bundle_allocate: bundle10-0 allocation score on rhel7-5: 0
-pcmk__bundle_allocate: bundle10-1 allocation score on rhel7-1: 0
-pcmk__bundle_allocate: bundle10-1 allocation score on rhel7-1: 0
-pcmk__bundle_allocate: bundle10-1 allocation score on rhel7-2: 0
-pcmk__bundle_allocate: bundle10-1 allocation score on rhel7-2: 0
-pcmk__bundle_allocate: bundle10-1 allocation score on rhel7-3: 10
-pcmk__bundle_allocate: bundle10-1 allocation score on rhel7-3: 10010
-pcmk__bundle_allocate: bundle10-1 allocation score on rhel7-4: 0
-pcmk__bundle_allocate: bundle10-1 allocation score on rhel7-4: 0
-pcmk__bundle_allocate: bundle10-1 allocation score on rhel7-5: 0
-pcmk__bundle_allocate: bundle10-1 allocation score on rhel7-5: 0
-pcmk__bundle_allocate: bundle10-clone allocation score on bundle10-0: -INFINITY
-pcmk__bundle_allocate: bundle10-clone allocation score on bundle10-0: 0
-pcmk__bundle_allocate: bundle10-clone allocation score on bundle10-1: -INFINITY
-pcmk__bundle_allocate: bundle10-clone allocation score on bundle10-1: 0
-pcmk__bundle_allocate: bundle10-clone allocation score on rhel7-1: -INFINITY
-pcmk__bundle_allocate: bundle10-clone allocation score on rhel7-1: 0
-pcmk__bundle_allocate: bundle10-clone allocation score on rhel7-2: -INFINITY
-pcmk__bundle_allocate: bundle10-clone allocation score on rhel7-2: 0
-pcmk__bundle_allocate: bundle10-clone allocation score on rhel7-3: -INFINITY
-pcmk__bundle_allocate: bundle10-clone allocation score on rhel7-3: 0
-pcmk__bundle_allocate: bundle10-clone allocation score on rhel7-4: -INFINITY
-pcmk__bundle_allocate: bundle10-clone allocation score on rhel7-4: 0
-pcmk__bundle_allocate: bundle10-clone allocation score on rhel7-5: -INFINITY
-pcmk__bundle_allocate: bundle10-clone allocation score on rhel7-5: 0
-pcmk__bundle_allocate: bundle10-docker-0 allocation score on rhel7-1: 0
-pcmk__bundle_allocate: bundle10-docker-0 allocation score on rhel7-1: 0
-pcmk__bundle_allocate: bundle10-docker-0 allocation score on rhel7-2: 10
-pcmk__bundle_allocate: bundle10-docker-0 allocation score on rhel7-2: 20
-pcmk__bundle_allocate: bundle10-docker-0 allocation score on rhel7-3: 0
-pcmk__bundle_allocate: bundle10-docker-0 allocation score on rhel7-3: 0
-pcmk__bundle_allocate: bundle10-docker-0 allocation score on rhel7-4: 0
-pcmk__bundle_allocate: bundle10-docker-0 allocation score on rhel7-4: 0
-pcmk__bundle_allocate: bundle10-docker-0 allocation score on rhel7-5: -INFINITY
-pcmk__bundle_allocate: bundle10-docker-0 allocation score on rhel7-5: -INFINITY
-pcmk__bundle_allocate: bundle10-docker-1 allocation score on rhel7-1: 0
-pcmk__bundle_allocate: bundle10-docker-1 allocation score on rhel7-1: 0
-pcmk__bundle_allocate: bundle10-docker-1 allocation score on rhel7-2: -INFINITY
-pcmk__bundle_allocate: bundle10-docker-1 allocation score on rhel7-2: 0
-pcmk__bundle_allocate: bundle10-docker-1 allocation score on rhel7-3: 10
-pcmk__bundle_allocate: bundle10-docker-1 allocation score on rhel7-3: 20
-pcmk__bundle_allocate: bundle10-docker-1 allocation score on rhel7-4: 0
-pcmk__bundle_allocate: bundle10-docker-1 allocation score on rhel7-4: 0
-pcmk__bundle_allocate: bundle10-docker-1 allocation score on rhel7-5: -INFINITY
-pcmk__bundle_allocate: bundle10-docker-1 allocation score on rhel7-5: -INFINITY
-pcmk__bundle_allocate: bundle10-ip-192.168.122.131 allocation score on rhel7-1: -INFINITY
-pcmk__bundle_allocate: bundle10-ip-192.168.122.131 allocation score on rhel7-1: 0
-pcmk__bundle_allocate: bundle10-ip-192.168.122.131 allocation score on rhel7-2: 10
-pcmk__bundle_allocate: bundle10-ip-192.168.122.131 allocation score on rhel7-2: 10
-pcmk__bundle_allocate: bundle10-ip-192.168.122.131 allocation score on rhel7-3: -INFINITY
-pcmk__bundle_allocate: bundle10-ip-192.168.122.131 allocation score on rhel7-3: 0
-pcmk__bundle_allocate: bundle10-ip-192.168.122.131 allocation score on rhel7-4: -INFINITY
-pcmk__bundle_allocate: bundle10-ip-192.168.122.131 allocation score on rhel7-4: 0
-pcmk__bundle_allocate: bundle10-ip-192.168.122.131 allocation score on rhel7-5: -INFINITY
-pcmk__bundle_allocate: bundle10-ip-192.168.122.131 allocation score on rhel7-5: -INFINITY
-pcmk__bundle_allocate: bundle10-ip-192.168.122.132 allocation score on rhel7-1: -INFINITY
-pcmk__bundle_allocate: bundle10-ip-192.168.122.132 allocation score on rhel7-1: 0
-pcmk__bundle_allocate: bundle10-ip-192.168.122.132 allocation score on rhel7-2: -INFINITY
-pcmk__bundle_allocate: bundle10-ip-192.168.122.132 allocation score on rhel7-2: 0
-pcmk__bundle_allocate: bundle10-ip-192.168.122.132 allocation score on rhel7-3: 10
-pcmk__bundle_allocate: bundle10-ip-192.168.122.132 allocation score on rhel7-3: 10
-pcmk__bundle_allocate: bundle10-ip-192.168.122.132 allocation score on rhel7-4: -INFINITY
-pcmk__bundle_allocate: bundle10-ip-192.168.122.132 allocation score on rhel7-4: 0
-pcmk__bundle_allocate: bundle10-ip-192.168.122.132 allocation score on rhel7-5: -INFINITY
-pcmk__bundle_allocate: bundle10-ip-192.168.122.132 allocation score on rhel7-5: -INFINITY
-pcmk__bundle_allocate: bundle11 allocation score on rhel7-1: 0
-pcmk__bundle_allocate: bundle11 allocation score on rhel7-2: 0
-pcmk__bundle_allocate: bundle11 allocation score on rhel7-3: 0
-pcmk__bundle_allocate: bundle11 allocation score on rhel7-4: 0
-pcmk__bundle_allocate: bundle11 allocation score on rhel7-5: -INFINITY
-pcmk__bundle_allocate: bundle11-0 allocation score on rhel7-1: 10
-pcmk__bundle_allocate: bundle11-0 allocation score on rhel7-2: 0
-pcmk__bundle_allocate: bundle11-0 allocation score on rhel7-3: 0
-pcmk__bundle_allocate: bundle11-0 allocation score on rhel7-4: 0
-pcmk__bundle_allocate: bundle11-0 allocation score on rhel7-5: 0
-pcmk__bundle_allocate: bundle11-1 allocation score on rhel7-1: 0
-pcmk__bundle_allocate: bundle11-1 allocation score on rhel7-2: 0
-pcmk__bundle_allocate: bundle11-1 allocation score on rhel7-3: 0
-pcmk__bundle_allocate: bundle11-1 allocation score on rhel7-4: 0
-pcmk__bundle_allocate: bundle11-1 allocation score on rhel7-5: 0
-pcmk__bundle_allocate: bundle11-clone allocation score on bundle11-0: -INFINITY
-pcmk__bundle_allocate: bundle11-clone allocation score on bundle11-1: -INFINITY
-pcmk__bundle_allocate: bundle11-clone allocation score on rhel7-1: 0
-pcmk__bundle_allocate: bundle11-clone allocation score on rhel7-2: 0
-pcmk__bundle_allocate: bundle11-clone allocation score on rhel7-3: 0
-pcmk__bundle_allocate: bundle11-clone allocation score on rhel7-4: 0
-pcmk__bundle_allocate: bundle11-clone allocation score on rhel7-5: 0
-pcmk__bundle_allocate: bundle11-docker-0 allocation score on rhel7-1: 10
-pcmk__bundle_allocate: bundle11-docker-0 allocation score on rhel7-2: 0
-pcmk__bundle_allocate: bundle11-docker-0 allocation score on rhel7-3: 0
-pcmk__bundle_allocate: bundle11-docker-0 allocation score on rhel7-4: 0
-pcmk__bundle_allocate: bundle11-docker-0 allocation score on rhel7-5: -INFINITY
-pcmk__bundle_allocate: bundle11-docker-1 allocation score on rhel7-1: 0
-pcmk__bundle_allocate: bundle11-docker-1 allocation score on rhel7-2: 0
-pcmk__bundle_allocate: bundle11-docker-1 allocation score on rhel7-3: 0
-pcmk__bundle_allocate: bundle11-docker-1 allocation score on rhel7-4: 0
-pcmk__bundle_allocate: bundle11-docker-1 allocation score on rhel7-5: -INFINITY
-pcmk__bundle_allocate: bundle11-ip-192.168.122.134 allocation score on rhel7-1: 10
-pcmk__bundle_allocate: bundle11-ip-192.168.122.134 allocation score on rhel7-2: 0
-pcmk__bundle_allocate: bundle11-ip-192.168.122.134 allocation score on rhel7-3: 0
-pcmk__bundle_allocate: bundle11-ip-192.168.122.134 allocation score on rhel7-4: 0
-pcmk__bundle_allocate: bundle11-ip-192.168.122.134 allocation score on rhel7-5: -INFINITY
-pcmk__bundle_allocate: bundle11-ip-192.168.122.135 allocation score on rhel7-1: 0
-pcmk__bundle_allocate: bundle11-ip-192.168.122.135 allocation score on rhel7-2: 0
-pcmk__bundle_allocate: bundle11-ip-192.168.122.135 allocation score on rhel7-3: 0
-pcmk__bundle_allocate: bundle11-ip-192.168.122.135 allocation score on rhel7-4: 0
-pcmk__bundle_allocate: bundle11-ip-192.168.122.135 allocation score on rhel7-5: -INFINITY
-pcmk__bundle_allocate: bundle11a:0 allocation score on bundle11-0: 510
-pcmk__bundle_allocate: bundle11a:1 allocation score on bundle11-1: 500
-pcmk__bundle_allocate: httpd:0 allocation score on bundle10-0: 510
-pcmk__bundle_allocate: httpd:0 allocation score on bundle10-0: INFINITY
-pcmk__bundle_allocate: httpd:1 allocation score on bundle10-1: 510
-pcmk__bundle_allocate: httpd:1 allocation score on bundle10-1: INFINITY
+pcmk__bundle_assign: bundle10 allocation score on rhel7-1: 0
+pcmk__bundle_assign: bundle10 allocation score on rhel7-1: 0
+pcmk__bundle_assign: bundle10 allocation score on rhel7-2: 0
+pcmk__bundle_assign: bundle10 allocation score on rhel7-2: 0
+pcmk__bundle_assign: bundle10 allocation score on rhel7-3: 0
+pcmk__bundle_assign: bundle10 allocation score on rhel7-3: 0
+pcmk__bundle_assign: bundle10 allocation score on rhel7-4: 0
+pcmk__bundle_assign: bundle10 allocation score on rhel7-4: 0
+pcmk__bundle_assign: bundle10 allocation score on rhel7-5: -INFINITY
+pcmk__bundle_assign: bundle10 allocation score on rhel7-5: -INFINITY
+pcmk__bundle_assign: bundle10-0 allocation score on rhel7-1: 0
+pcmk__bundle_assign: bundle10-0 allocation score on rhel7-1: 0
+pcmk__bundle_assign: bundle10-0 allocation score on rhel7-2: 10
+pcmk__bundle_assign: bundle10-0 allocation score on rhel7-2: 10010
+pcmk__bundle_assign: bundle10-0 allocation score on rhel7-3: 0
+pcmk__bundle_assign: bundle10-0 allocation score on rhel7-3: 0
+pcmk__bundle_assign: bundle10-0 allocation score on rhel7-4: 0
+pcmk__bundle_assign: bundle10-0 allocation score on rhel7-4: 0
+pcmk__bundle_assign: bundle10-0 allocation score on rhel7-5: 0
+pcmk__bundle_assign: bundle10-0 allocation score on rhel7-5: 0
+pcmk__bundle_assign: bundle10-1 allocation score on rhel7-1: 0
+pcmk__bundle_assign: bundle10-1 allocation score on rhel7-1: 0
+pcmk__bundle_assign: bundle10-1 allocation score on rhel7-2: 0
+pcmk__bundle_assign: bundle10-1 allocation score on rhel7-2: 0
+pcmk__bundle_assign: bundle10-1 allocation score on rhel7-3: 10
+pcmk__bundle_assign: bundle10-1 allocation score on rhel7-3: 10010
+pcmk__bundle_assign: bundle10-1 allocation score on rhel7-4: 0
+pcmk__bundle_assign: bundle10-1 allocation score on rhel7-4: 0
+pcmk__bundle_assign: bundle10-1 allocation score on rhel7-5: 0
+pcmk__bundle_assign: bundle10-1 allocation score on rhel7-5: 0
+pcmk__bundle_assign: bundle10-clone allocation score on bundle10-0: -INFINITY
+pcmk__bundle_assign: bundle10-clone allocation score on bundle10-0: 0
+pcmk__bundle_assign: bundle10-clone allocation score on bundle10-1: -INFINITY
+pcmk__bundle_assign: bundle10-clone allocation score on bundle10-1: 0
+pcmk__bundle_assign: bundle10-clone allocation score on rhel7-1: -INFINITY
+pcmk__bundle_assign: bundle10-clone allocation score on rhel7-1: 0
+pcmk__bundle_assign: bundle10-clone allocation score on rhel7-2: -INFINITY
+pcmk__bundle_assign: bundle10-clone allocation score on rhel7-2: 0
+pcmk__bundle_assign: bundle10-clone allocation score on rhel7-3: -INFINITY
+pcmk__bundle_assign: bundle10-clone allocation score on rhel7-3: 0
+pcmk__bundle_assign: bundle10-clone allocation score on rhel7-4: -INFINITY
+pcmk__bundle_assign: bundle10-clone allocation score on rhel7-4: 0
+pcmk__bundle_assign: bundle10-clone allocation score on rhel7-5: -INFINITY
+pcmk__bundle_assign: bundle10-clone allocation score on rhel7-5: 0
+pcmk__bundle_assign: bundle10-docker-0 allocation score on rhel7-1: 0
+pcmk__bundle_assign: bundle10-docker-0 allocation score on rhel7-1: 0
+pcmk__bundle_assign: bundle10-docker-0 allocation score on rhel7-2: 10
+pcmk__bundle_assign: bundle10-docker-0 allocation score on rhel7-2: 20
+pcmk__bundle_assign: bundle10-docker-0 allocation score on rhel7-3: 0
+pcmk__bundle_assign: bundle10-docker-0 allocation score on rhel7-3: 0
+pcmk__bundle_assign: bundle10-docker-0 allocation score on rhel7-4: 0
+pcmk__bundle_assign: bundle10-docker-0 allocation score on rhel7-4: 0
+pcmk__bundle_assign: bundle10-docker-0 allocation score on rhel7-5: -INFINITY
+pcmk__bundle_assign: bundle10-docker-0 allocation score on rhel7-5: -INFINITY
+pcmk__bundle_assign: bundle10-docker-1 allocation score on rhel7-1: 0
+pcmk__bundle_assign: bundle10-docker-1 allocation score on rhel7-1: 0
+pcmk__bundle_assign: bundle10-docker-1 allocation score on rhel7-2: -INFINITY
+pcmk__bundle_assign: bundle10-docker-1 allocation score on rhel7-2: 0
+pcmk__bundle_assign: bundle10-docker-1 allocation score on rhel7-3: 10
+pcmk__bundle_assign: bundle10-docker-1 allocation score on rhel7-3: 20
+pcmk__bundle_assign: bundle10-docker-1 allocation score on rhel7-4: 0
+pcmk__bundle_assign: bundle10-docker-1 allocation score on rhel7-4: 0
+pcmk__bundle_assign: bundle10-docker-1 allocation score on rhel7-5: -INFINITY
+pcmk__bundle_assign: bundle10-docker-1 allocation score on rhel7-5: -INFINITY
+pcmk__bundle_assign: bundle10-ip-192.168.122.131 allocation score on rhel7-1: -INFINITY
+pcmk__bundle_assign: bundle10-ip-192.168.122.131 allocation score on rhel7-1: 0
+pcmk__bundle_assign: bundle10-ip-192.168.122.131 allocation score on rhel7-2: 10
+pcmk__bundle_assign: bundle10-ip-192.168.122.131 allocation score on rhel7-2: 10
+pcmk__bundle_assign: bundle10-ip-192.168.122.131 allocation score on rhel7-3: -INFINITY
+pcmk__bundle_assign: bundle10-ip-192.168.122.131 allocation score on rhel7-3: 0
+pcmk__bundle_assign: bundle10-ip-192.168.122.131 allocation score on rhel7-4: -INFINITY
+pcmk__bundle_assign: bundle10-ip-192.168.122.131 allocation score on rhel7-4: 0
+pcmk__bundle_assign: bundle10-ip-192.168.122.131 allocation score on rhel7-5: -INFINITY
+pcmk__bundle_assign: bundle10-ip-192.168.122.131 allocation score on rhel7-5: -INFINITY
+pcmk__bundle_assign: bundle10-ip-192.168.122.132 allocation score on rhel7-1: -INFINITY
+pcmk__bundle_assign: bundle10-ip-192.168.122.132 allocation score on rhel7-1: 0
+pcmk__bundle_assign: bundle10-ip-192.168.122.132 allocation score on rhel7-2: -INFINITY
+pcmk__bundle_assign: bundle10-ip-192.168.122.132 allocation score on rhel7-2: 0
+pcmk__bundle_assign: bundle10-ip-192.168.122.132 allocation score on rhel7-3: 10
+pcmk__bundle_assign: bundle10-ip-192.168.122.132 allocation score on rhel7-3: 10
+pcmk__bundle_assign: bundle10-ip-192.168.122.132 allocation score on rhel7-4: -INFINITY
+pcmk__bundle_assign: bundle10-ip-192.168.122.132 allocation score on rhel7-4: 0
+pcmk__bundle_assign: bundle10-ip-192.168.122.132 allocation score on rhel7-5: -INFINITY
+pcmk__bundle_assign: bundle10-ip-192.168.122.132 allocation score on rhel7-5: -INFINITY
+pcmk__bundle_assign: bundle11 allocation score on rhel7-1: 0
+pcmk__bundle_assign: bundle11 allocation score on rhel7-2: 0
+pcmk__bundle_assign: bundle11 allocation score on rhel7-3: 0
+pcmk__bundle_assign: bundle11 allocation score on rhel7-4: 0
+pcmk__bundle_assign: bundle11 allocation score on rhel7-5: -INFINITY
+pcmk__bundle_assign: bundle11-0 allocation score on rhel7-1: 10
+pcmk__bundle_assign: bundle11-0 allocation score on rhel7-2: 0
+pcmk__bundle_assign: bundle11-0 allocation score on rhel7-3: 0
+pcmk__bundle_assign: bundle11-0 allocation score on rhel7-4: 0
+pcmk__bundle_assign: bundle11-0 allocation score on rhel7-5: 0
+pcmk__bundle_assign: bundle11-1 allocation score on rhel7-1: 0
+pcmk__bundle_assign: bundle11-1 allocation score on rhel7-2: 0
+pcmk__bundle_assign: bundle11-1 allocation score on rhel7-3: 0
+pcmk__bundle_assign: bundle11-1 allocation score on rhel7-4: 0
+pcmk__bundle_assign: bundle11-1 allocation score on rhel7-5: 0
+pcmk__bundle_assign: bundle11-clone allocation score on bundle11-0: -INFINITY
+pcmk__bundle_assign: bundle11-clone allocation score on bundle11-1: -INFINITY
+pcmk__bundle_assign: bundle11-clone allocation score on rhel7-1: 0
+pcmk__bundle_assign: bundle11-clone allocation score on rhel7-2: 0
+pcmk__bundle_assign: bundle11-clone allocation score on rhel7-3: 0
+pcmk__bundle_assign: bundle11-clone allocation score on rhel7-4: 0
+pcmk__bundle_assign: bundle11-clone allocation score on rhel7-5: 0
+pcmk__bundle_assign: bundle11-docker-0 allocation score on rhel7-1: 10
+pcmk__bundle_assign: bundle11-docker-0 allocation score on rhel7-2: 0
+pcmk__bundle_assign: bundle11-docker-0 allocation score on rhel7-3: 0
+pcmk__bundle_assign: bundle11-docker-0 allocation score on rhel7-4: 0
+pcmk__bundle_assign: bundle11-docker-0 allocation score on rhel7-5: -INFINITY
+pcmk__bundle_assign: bundle11-docker-1 allocation score on rhel7-1: 0
+pcmk__bundle_assign: bundle11-docker-1 allocation score on rhel7-2: 0
+pcmk__bundle_assign: bundle11-docker-1 allocation score on rhel7-3: 0
+pcmk__bundle_assign: bundle11-docker-1 allocation score on rhel7-4: 0
+pcmk__bundle_assign: bundle11-docker-1 allocation score on rhel7-5: -INFINITY
+pcmk__bundle_assign: bundle11-ip-192.168.122.134 allocation score on rhel7-1: 10
+pcmk__bundle_assign: bundle11-ip-192.168.122.134 allocation score on rhel7-2: 0
+pcmk__bundle_assign: bundle11-ip-192.168.122.134 allocation score on rhel7-3: 0
+pcmk__bundle_assign: bundle11-ip-192.168.122.134 allocation score on rhel7-4: 0
+pcmk__bundle_assign: bundle11-ip-192.168.122.134 allocation score on rhel7-5: -INFINITY
+pcmk__bundle_assign: bundle11-ip-192.168.122.135 allocation score on rhel7-1: 0
+pcmk__bundle_assign: bundle11-ip-192.168.122.135 allocation score on rhel7-2: 0
+pcmk__bundle_assign: bundle11-ip-192.168.122.135 allocation score on rhel7-3: 0
+pcmk__bundle_assign: bundle11-ip-192.168.122.135 allocation score on rhel7-4: 0
+pcmk__bundle_assign: bundle11-ip-192.168.122.135 allocation score on rhel7-5: -INFINITY
+pcmk__bundle_assign: bundle11a:0 allocation score on bundle11-0: 510
+pcmk__bundle_assign: bundle11a:1 allocation score on bundle11-1: 500
+pcmk__bundle_assign: httpd:0 allocation score on bundle10-0: 510
+pcmk__bundle_assign: httpd:0 allocation score on bundle10-0: INFINITY
+pcmk__bundle_assign: httpd:1 allocation score on bundle10-1: 510
+pcmk__bundle_assign: httpd:1 allocation score on bundle10-1: INFINITY
pcmk__clone_assign: bundle10-clone allocation score on bundle10-0: 0
pcmk__clone_assign: bundle10-clone allocation score on bundle10-1: 0
pcmk__clone_assign: bundle10-clone allocation score on rhel7-1: -INFINITY
diff --git a/cts/scheduler/scores/complex_enforce_colo.scores b/cts/scheduler/scores/complex_enforce_colo.scores
index 9968e10..a5d0b2b 100644
--- a/cts/scheduler/scores/complex_enforce_colo.scores
+++ b/cts/scheduler/scores/complex_enforce_colo.scores
@@ -588,13 +588,22 @@ pcmk__primitive_assign: horizon:2 allocation score on rhos6-node1: -INFINITY
pcmk__primitive_assign: horizon:2 allocation score on rhos6-node2: -INFINITY
pcmk__primitive_assign: horizon:2 allocation score on rhos6-node3: 1
pcmk__primitive_assign: keystone:0 allocation score on rhos6-node1: -INFINITY
+pcmk__primitive_assign: keystone:0 allocation score on rhos6-node1: -INFINITY
+pcmk__primitive_assign: keystone:0 allocation score on rhos6-node2: -INFINITY
pcmk__primitive_assign: keystone:0 allocation score on rhos6-node2: -INFINITY
pcmk__primitive_assign: keystone:0 allocation score on rhos6-node3: -INFINITY
+pcmk__primitive_assign: keystone:0 allocation score on rhos6-node3: -INFINITY
pcmk__primitive_assign: keystone:1 allocation score on rhos6-node1: -INFINITY
+pcmk__primitive_assign: keystone:1 allocation score on rhos6-node1: -INFINITY
+pcmk__primitive_assign: keystone:1 allocation score on rhos6-node2: -INFINITY
pcmk__primitive_assign: keystone:1 allocation score on rhos6-node2: -INFINITY
pcmk__primitive_assign: keystone:1 allocation score on rhos6-node3: -INFINITY
+pcmk__primitive_assign: keystone:1 allocation score on rhos6-node3: -INFINITY
+pcmk__primitive_assign: keystone:2 allocation score on rhos6-node1: -INFINITY
pcmk__primitive_assign: keystone:2 allocation score on rhos6-node1: -INFINITY
pcmk__primitive_assign: keystone:2 allocation score on rhos6-node2: -INFINITY
+pcmk__primitive_assign: keystone:2 allocation score on rhos6-node2: -INFINITY
+pcmk__primitive_assign: keystone:2 allocation score on rhos6-node3: -INFINITY
pcmk__primitive_assign: keystone:2 allocation score on rhos6-node3: -INFINITY
pcmk__primitive_assign: lb-haproxy:0 allocation score on rhos6-node1: 1
pcmk__primitive_assign: lb-haproxy:0 allocation score on rhos6-node2: 0
diff --git a/cts/scheduler/scores/enforce-colo1.scores b/cts/scheduler/scores/enforce-colo1.scores
index 8194789..262cbd9 100644
--- a/cts/scheduler/scores/enforce-colo1.scores
+++ b/cts/scheduler/scores/enforce-colo1.scores
@@ -18,13 +18,22 @@ pcmk__primitive_assign: engine allocation score on rhel7-auto1: -INFINITY
pcmk__primitive_assign: engine allocation score on rhel7-auto2: -INFINITY
pcmk__primitive_assign: engine allocation score on rhel7-auto3: 0
pcmk__primitive_assign: keystone:0 allocation score on rhel7-auto1: -INFINITY
+pcmk__primitive_assign: keystone:0 allocation score on rhel7-auto1: -INFINITY
+pcmk__primitive_assign: keystone:0 allocation score on rhel7-auto2: -INFINITY
pcmk__primitive_assign: keystone:0 allocation score on rhel7-auto2: -INFINITY
pcmk__primitive_assign: keystone:0 allocation score on rhel7-auto3: -INFINITY
+pcmk__primitive_assign: keystone:0 allocation score on rhel7-auto3: -INFINITY
pcmk__primitive_assign: keystone:1 allocation score on rhel7-auto1: -INFINITY
+pcmk__primitive_assign: keystone:1 allocation score on rhel7-auto1: -INFINITY
+pcmk__primitive_assign: keystone:1 allocation score on rhel7-auto2: -INFINITY
pcmk__primitive_assign: keystone:1 allocation score on rhel7-auto2: -INFINITY
pcmk__primitive_assign: keystone:1 allocation score on rhel7-auto3: -INFINITY
+pcmk__primitive_assign: keystone:1 allocation score on rhel7-auto3: -INFINITY
+pcmk__primitive_assign: keystone:2 allocation score on rhel7-auto1: -INFINITY
pcmk__primitive_assign: keystone:2 allocation score on rhel7-auto1: -INFINITY
pcmk__primitive_assign: keystone:2 allocation score on rhel7-auto2: -INFINITY
+pcmk__primitive_assign: keystone:2 allocation score on rhel7-auto2: -INFINITY
+pcmk__primitive_assign: keystone:2 allocation score on rhel7-auto3: -INFINITY
pcmk__primitive_assign: keystone:2 allocation score on rhel7-auto3: -INFINITY
pcmk__primitive_assign: shooter allocation score on rhel7-auto1: 0
pcmk__primitive_assign: shooter allocation score on rhel7-auto2: 0
diff --git a/cts/scheduler/scores/group-anticolocation-2.scores b/cts/scheduler/scores/group-anticolocation-2.scores
new file mode 100644
index 0000000..ab0a4c9
--- /dev/null
+++ b/cts/scheduler/scores/group-anticolocation-2.scores
@@ -0,0 +1,23 @@
+
+pcmk__group_assign: group1 allocation score on node1: 0
+pcmk__group_assign: group1 allocation score on node2: 0
+pcmk__group_assign: group2 allocation score on node1: 0
+pcmk__group_assign: group2 allocation score on node2: 0
+pcmk__group_assign: member1a allocation score on node1: 0
+pcmk__group_assign: member1a allocation score on node2: INFINITY
+pcmk__group_assign: member1b allocation score on node1: 0
+pcmk__group_assign: member1b allocation score on node2: INFINITY
+pcmk__group_assign: member2a allocation score on node1: INFINITY
+pcmk__group_assign: member2a allocation score on node2: 0
+pcmk__group_assign: member2b allocation score on node1: -INFINITY
+pcmk__group_assign: member2b allocation score on node2: 0
+pcmk__primitive_assign: Fencing allocation score on node1: INFINITY
+pcmk__primitive_assign: Fencing allocation score on node2: 0
+pcmk__primitive_assign: member1a allocation score on node1: -5000
+pcmk__primitive_assign: member1a allocation score on node2: INFINITY
+pcmk__primitive_assign: member1b allocation score on node1: -INFINITY
+pcmk__primitive_assign: member1b allocation score on node2: INFINITY
+pcmk__primitive_assign: member2a allocation score on node1: -INFINITY
+pcmk__primitive_assign: member2a allocation score on node2: 0
+pcmk__primitive_assign: member2b allocation score on node1: -INFINITY
+pcmk__primitive_assign: member2b allocation score on node2: 0
diff --git a/cts/scheduler/scores/group-anticolocation-3.scores b/cts/scheduler/scores/group-anticolocation-3.scores
new file mode 100644
index 0000000..5b2b8e4
--- /dev/null
+++ b/cts/scheduler/scores/group-anticolocation-3.scores
@@ -0,0 +1,23 @@
+
+pcmk__group_assign: group1 allocation score on node1: 0
+pcmk__group_assign: group1 allocation score on node2: 0
+pcmk__group_assign: group2 allocation score on node1: 0
+pcmk__group_assign: group2 allocation score on node2: 0
+pcmk__group_assign: member1a allocation score on node1: 0
+pcmk__group_assign: member1a allocation score on node2: INFINITY
+pcmk__group_assign: member1b allocation score on node1: 0
+pcmk__group_assign: member1b allocation score on node2: INFINITY
+pcmk__group_assign: member2a allocation score on node1: INFINITY
+pcmk__group_assign: member2a allocation score on node2: 0
+pcmk__group_assign: member2b allocation score on node1: -INFINITY
+pcmk__group_assign: member2b allocation score on node2: 0
+pcmk__primitive_assign: Fencing allocation score on node1: INFINITY
+pcmk__primitive_assign: Fencing allocation score on node2: 0
+pcmk__primitive_assign: member1a allocation score on node1: -INFINITY
+pcmk__primitive_assign: member1a allocation score on node2: INFINITY
+pcmk__primitive_assign: member1b allocation score on node1: -INFINITY
+pcmk__primitive_assign: member1b allocation score on node2: INFINITY
+pcmk__primitive_assign: member2a allocation score on node1: INFINITY
+pcmk__primitive_assign: member2a allocation score on node2: -INFINITY
+pcmk__primitive_assign: member2b allocation score on node1: -INFINITY
+pcmk__primitive_assign: member2b allocation score on node2: -INFINITY
diff --git a/cts/scheduler/scores/group-anticolocation-4.scores b/cts/scheduler/scores/group-anticolocation-4.scores
new file mode 100644
index 0000000..4449511
--- /dev/null
+++ b/cts/scheduler/scores/group-anticolocation-4.scores
@@ -0,0 +1,23 @@
+
+pcmk__group_assign: group1 allocation score on node1: 0
+pcmk__group_assign: group1 allocation score on node2: 0
+pcmk__group_assign: group2 allocation score on node1: 0
+pcmk__group_assign: group2 allocation score on node2: 0
+pcmk__group_assign: member1a allocation score on node1: 0
+pcmk__group_assign: member1a allocation score on node2: 0
+pcmk__group_assign: member1b allocation score on node1: 0
+pcmk__group_assign: member1b allocation score on node2: 0
+pcmk__group_assign: member2a allocation score on node1: 0
+pcmk__group_assign: member2a allocation score on node2: 0
+pcmk__group_assign: member2b allocation score on node1: -INFINITY
+pcmk__group_assign: member2b allocation score on node2: 0
+pcmk__primitive_assign: Fencing allocation score on node1: 0
+pcmk__primitive_assign: Fencing allocation score on node2: 0
+pcmk__primitive_assign: member1a allocation score on node1: 0
+pcmk__primitive_assign: member1a allocation score on node2: 0
+pcmk__primitive_assign: member1b allocation score on node1: -INFINITY
+pcmk__primitive_assign: member1b allocation score on node2: 0
+pcmk__primitive_assign: member2a allocation score on node1: -INFINITY
+pcmk__primitive_assign: member2a allocation score on node2: 0
+pcmk__primitive_assign: member2b allocation score on node1: -INFINITY
+pcmk__primitive_assign: member2b allocation score on node2: 0
diff --git a/cts/scheduler/scores/group-anticolocation-5.scores b/cts/scheduler/scores/group-anticolocation-5.scores
new file mode 100644
index 0000000..2af165f
--- /dev/null
+++ b/cts/scheduler/scores/group-anticolocation-5.scores
@@ -0,0 +1,34 @@
+
+pcmk__group_assign: group1 allocation score on node1: 0
+pcmk__group_assign: group1 allocation score on node2: 0
+pcmk__group_assign: group1 allocation score on node3: 0
+pcmk__group_assign: group2 allocation score on node1: 0
+pcmk__group_assign: group2 allocation score on node2: 0
+pcmk__group_assign: group2 allocation score on node3: 0
+pcmk__group_assign: member1a allocation score on node1: 0
+pcmk__group_assign: member1a allocation score on node2: 0
+pcmk__group_assign: member1a allocation score on node3: 0
+pcmk__group_assign: member1b allocation score on node1: 0
+pcmk__group_assign: member1b allocation score on node2: 0
+pcmk__group_assign: member1b allocation score on node3: 0
+pcmk__group_assign: member2a allocation score on node1: 0
+pcmk__group_assign: member2a allocation score on node2: 0
+pcmk__group_assign: member2a allocation score on node3: 0
+pcmk__group_assign: member2b allocation score on node1: -INFINITY
+pcmk__group_assign: member2b allocation score on node2: 0
+pcmk__group_assign: member2b allocation score on node3: 0
+pcmk__primitive_assign: Fencing allocation score on node1: 0
+pcmk__primitive_assign: Fencing allocation score on node2: 0
+pcmk__primitive_assign: Fencing allocation score on node3: 0
+pcmk__primitive_assign: member1a allocation score on node1: 0
+pcmk__primitive_assign: member1a allocation score on node2: 0
+pcmk__primitive_assign: member1a allocation score on node3: 0
+pcmk__primitive_assign: member1b allocation score on node1: -INFINITY
+pcmk__primitive_assign: member1b allocation score on node2: 0
+pcmk__primitive_assign: member1b allocation score on node3: -INFINITY
+pcmk__primitive_assign: member2a allocation score on node1: -INFINITY
+pcmk__primitive_assign: member2a allocation score on node2: -5000
+pcmk__primitive_assign: member2a allocation score on node3: 0
+pcmk__primitive_assign: member2b allocation score on node1: -INFINITY
+pcmk__primitive_assign: member2b allocation score on node2: -INFINITY
+pcmk__primitive_assign: member2b allocation score on node3: 0
diff --git a/cts/scheduler/scores/group-anticolocation.scores b/cts/scheduler/scores/group-anticolocation.scores
index 4449511..5d38fa1 100644
--- a/cts/scheduler/scores/group-anticolocation.scores
+++ b/cts/scheduler/scores/group-anticolocation.scores
@@ -13,10 +13,10 @@ pcmk__group_assign: member2b allocation score on node1: -INFINITY
pcmk__group_assign: member2b allocation score on node2: 0
pcmk__primitive_assign: Fencing allocation score on node1: 0
pcmk__primitive_assign: Fencing allocation score on node2: 0
-pcmk__primitive_assign: member1a allocation score on node1: 0
+pcmk__primitive_assign: member1a allocation score on node1: 5000
pcmk__primitive_assign: member1a allocation score on node2: 0
-pcmk__primitive_assign: member1b allocation score on node1: -INFINITY
-pcmk__primitive_assign: member1b allocation score on node2: 0
+pcmk__primitive_assign: member1b allocation score on node1: 5000
+pcmk__primitive_assign: member1b allocation score on node2: -INFINITY
pcmk__primitive_assign: member2a allocation score on node1: -INFINITY
pcmk__primitive_assign: member2a allocation score on node2: 0
pcmk__primitive_assign: member2b allocation score on node1: -INFINITY
diff --git a/cts/scheduler/scores/group-dependents.scores b/cts/scheduler/scores/group-dependents.scores
index 22ed3e5..ece06a3 100644
--- a/cts/scheduler/scores/group-dependents.scores
+++ b/cts/scheduler/scores/group-dependents.scores
@@ -57,14 +57,16 @@ pcmk__primitive_assign: asterisk allocation score on asttest1: -INFINITY
pcmk__primitive_assign: asterisk allocation score on asttest2: 0
pcmk__primitive_assign: dahdi allocation score on asttest1: -INFINITY
pcmk__primitive_assign: dahdi allocation score on asttest2: 0
-pcmk__primitive_assign: drbd:0 allocation score on asttest1: 6
-pcmk__primitive_assign: drbd:0 allocation score on asttest2: 0
+pcmk__primitive_assign: drbd:0 allocation score on asttest1: -INFINITY
+pcmk__primitive_assign: drbd:0 allocation score on asttest1: 8
+pcmk__primitive_assign: drbd:0 allocation score on asttest2: -INFINITY
+pcmk__primitive_assign: drbd:0 allocation score on asttest2: 1
pcmk__primitive_assign: drbd:1 allocation score on asttest1: -INFINITY
-pcmk__primitive_assign: drbd:1 allocation score on asttest2: 6
+pcmk__primitive_assign: drbd:1 allocation score on asttest2: 7
pcmk__primitive_assign: fonulator allocation score on asttest1: -INFINITY
pcmk__primitive_assign: fonulator allocation score on asttest2: 0
pcmk__primitive_assign: fs_drbd allocation score on asttest1: -INFINITY
-pcmk__primitive_assign: fs_drbd allocation score on asttest2: 7
+pcmk__primitive_assign: fs_drbd allocation score on asttest2: 8
pcmk__primitive_assign: httpd allocation score on asttest1: -INFINITY
pcmk__primitive_assign: httpd allocation score on asttest2: 0
pcmk__primitive_assign: iax2_mon allocation score on asttest1: -INFINITY
diff --git a/cts/scheduler/scores/guest-host-not-fenceable.scores b/cts/scheduler/scores/guest-host-not-fenceable.scores
index e4c7fc2..21f5daa 100644
--- a/cts/scheduler/scores/guest-host-not-fenceable.scores
+++ b/cts/scheduler/scores/guest-host-not-fenceable.scores
@@ -1,67 +1,67 @@
galera:0 promotion score on galera-bundle-0: 100
-galera:1 promotion score on galera-bundle-1: 100
+galera:1 promotion score on galera-bundle-1: -1
galera:2 promotion score on galera-bundle-2: -1
-pcmk__bundle_allocate: galera-bundle allocation score on node1: 0
-pcmk__bundle_allocate: galera-bundle allocation score on node2: 0
-pcmk__bundle_allocate: galera-bundle allocation score on node3: 0
-pcmk__bundle_allocate: galera-bundle-0 allocation score on node1: INFINITY
-pcmk__bundle_allocate: galera-bundle-0 allocation score on node2: 0
-pcmk__bundle_allocate: galera-bundle-0 allocation score on node3: 0
-pcmk__bundle_allocate: galera-bundle-1 allocation score on node1: 0
-pcmk__bundle_allocate: galera-bundle-1 allocation score on node2: INFINITY
-pcmk__bundle_allocate: galera-bundle-1 allocation score on node3: 0
-pcmk__bundle_allocate: galera-bundle-2 allocation score on node1: 0
-pcmk__bundle_allocate: galera-bundle-2 allocation score on node2: 0
-pcmk__bundle_allocate: galera-bundle-2 allocation score on node3: INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on node1: INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on node2: 0
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on node3: 0
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on node1: 0
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on node2: INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on node3: 0
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on node1: 0
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on node2: 0
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on node3: INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-2: -INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on node1: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on node2: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on node3: 0
-pcmk__bundle_allocate: galera:0 allocation score on galera-bundle-0: INFINITY
-pcmk__bundle_allocate: galera:1 allocation score on galera-bundle-1: INFINITY
-pcmk__bundle_allocate: galera:2 allocation score on galera-bundle-2: INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on node1: 0
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on node2: 0
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on node3: 0
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on node1: INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on node2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on node3: 0
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on node1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on node2: INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on node3: 0
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on node1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on node2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on node3: INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on node1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on node2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on node3: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on node1: INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on node2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on node3: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on node1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on node2: INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on node3: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on node1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on node2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on node3: INFINITY
-pcmk__bundle_allocate: rabbitmq:0 allocation score on rabbitmq-bundle-0: INFINITY
-pcmk__bundle_allocate: rabbitmq:1 allocation score on rabbitmq-bundle-1: INFINITY
-pcmk__bundle_allocate: rabbitmq:2 allocation score on rabbitmq-bundle-2: INFINITY
+pcmk__bundle_assign: galera-bundle allocation score on node1: 0
+pcmk__bundle_assign: galera-bundle allocation score on node2: 0
+pcmk__bundle_assign: galera-bundle allocation score on node3: 0
+pcmk__bundle_assign: galera-bundle-0 allocation score on node1: INFINITY
+pcmk__bundle_assign: galera-bundle-0 allocation score on node2: 0
+pcmk__bundle_assign: galera-bundle-0 allocation score on node3: 0
+pcmk__bundle_assign: galera-bundle-1 allocation score on node1: 0
+pcmk__bundle_assign: galera-bundle-1 allocation score on node2: INFINITY
+pcmk__bundle_assign: galera-bundle-1 allocation score on node3: 0
+pcmk__bundle_assign: galera-bundle-2 allocation score on node1: 0
+pcmk__bundle_assign: galera-bundle-2 allocation score on node2: 0
+pcmk__bundle_assign: galera-bundle-2 allocation score on node3: INFINITY
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on node1: INFINITY
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on node2: 0
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on node3: 0
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on node1: 0
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on node2: INFINITY
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on node3: 0
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on node1: 0
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on node2: 0
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on node3: INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-2: -INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on node1: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on node2: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on node3: 0
+pcmk__bundle_assign: galera:0 allocation score on galera-bundle-0: INFINITY
+pcmk__bundle_assign: galera:1 allocation score on galera-bundle-1: INFINITY
+pcmk__bundle_assign: galera:2 allocation score on galera-bundle-2: INFINITY
+pcmk__bundle_assign: rabbitmq-bundle allocation score on node1: 0
+pcmk__bundle_assign: rabbitmq-bundle allocation score on node2: 0
+pcmk__bundle_assign: rabbitmq-bundle allocation score on node3: 0
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on node1: INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on node2: 0
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on node3: 0
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on node1: 0
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on node2: INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on node3: 0
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on node1: 0
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on node2: 0
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on node3: INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on node1: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on node2: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on node3: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on node1: INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on node2: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on node3: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on node1: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on node2: INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on node3: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on node1: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on node2: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on node3: INFINITY
+pcmk__bundle_assign: rabbitmq:0 allocation score on rabbitmq-bundle-0: INFINITY
+pcmk__bundle_assign: rabbitmq:1 allocation score on rabbitmq-bundle-1: INFINITY
+pcmk__bundle_assign: rabbitmq:2 allocation score on rabbitmq-bundle-2: INFINITY
pcmk__clone_assign: galera-bundle-master allocation score on galera-bundle-0: 0
pcmk__clone_assign: galera-bundle-master allocation score on galera-bundle-1: 0
pcmk__clone_assign: galera-bundle-master allocation score on galera-bundle-2: 0
diff --git a/cts/scheduler/scores/load-stopped-loop-2.scores b/cts/scheduler/scores/load-stopped-loop-2.scores
index 0b28a72..704ca03 100644
--- a/cts/scheduler/scores/load-stopped-loop-2.scores
+++ b/cts/scheduler/scores/load-stopped-loop-2.scores
@@ -42,7 +42,7 @@ pcmk__clone_assign: p_glusterd:3 allocation score on xfc3: 0
pcmk__primitive_assign: p_bl_glusterfs:0 allocation score on xfc0: -INFINITY
pcmk__primitive_assign: p_bl_glusterfs:0 allocation score on xfc1: -INFINITY
pcmk__primitive_assign: p_bl_glusterfs:0 allocation score on xfc2: -INFINITY
-pcmk__primitive_assign: p_bl_glusterfs:0 allocation score on xfc3: 1
+pcmk__primitive_assign: p_bl_glusterfs:0 allocation score on xfc3: INFINITY
pcmk__primitive_assign: p_bl_glusterfs:1 allocation score on xfc0: 1
pcmk__primitive_assign: p_bl_glusterfs:1 allocation score on xfc1: -INFINITY
pcmk__primitive_assign: p_bl_glusterfs:1 allocation score on xfc2: -INFINITY
@@ -58,7 +58,7 @@ pcmk__primitive_assign: p_bl_glusterfs:3 allocation score on xfc3: -INFINITY
pcmk__primitive_assign: p_glusterd:0 allocation score on xfc0: 0
pcmk__primitive_assign: p_glusterd:0 allocation score on xfc1: 0
pcmk__primitive_assign: p_glusterd:0 allocation score on xfc2: 0
-pcmk__primitive_assign: p_glusterd:0 allocation score on xfc3: 1
+pcmk__primitive_assign: p_glusterd:0 allocation score on xfc3: INFINITY
pcmk__primitive_assign: p_glusterd:1 allocation score on xfc0: 1
pcmk__primitive_assign: p_glusterd:1 allocation score on xfc1: 0
pcmk__primitive_assign: p_glusterd:1 allocation score on xfc2: 0
diff --git a/cts/scheduler/scores/load-stopped-loop.scores b/cts/scheduler/scores/load-stopped-loop.scores
index 1a35119..17f8b48 100644
--- a/cts/scheduler/scores/load-stopped-loop.scores
+++ b/cts/scheduler/scores/load-stopped-loop.scores
@@ -881,8 +881,14 @@ pcmk__primitive_assign: dist.express-consult.org-vm allocation score on v03-b: -
pcmk__primitive_assign: dist.fly-uni.org-vm allocation score on mgmt01: -INFINITY
pcmk__primitive_assign: dist.fly-uni.org-vm allocation score on v03-a: -INFINITY
pcmk__primitive_assign: dist.fly-uni.org-vm allocation score on v03-b: -INFINITY
+pcmk__primitive_assign: dlm:0 allocation score on mgmt01: -INFINITY
+pcmk__primitive_assign: dlm:0 allocation score on mgmt01: -INFINITY
pcmk__primitive_assign: dlm:0 allocation score on mgmt01: 1
+pcmk__primitive_assign: dlm:0 allocation score on v03-a: -INFINITY
+pcmk__primitive_assign: dlm:0 allocation score on v03-a: -INFINITY
pcmk__primitive_assign: dlm:0 allocation score on v03-a: 0
+pcmk__primitive_assign: dlm:0 allocation score on v03-b: -INFINITY
+pcmk__primitive_assign: dlm:0 allocation score on v03-b: 0
pcmk__primitive_assign: dlm:0 allocation score on v03-b: 0
pcmk__primitive_assign: dlm:1 allocation score on mgmt01: -INFINITY
pcmk__primitive_assign: dlm:1 allocation score on v03-a: 0
@@ -932,8 +938,14 @@ pcmk__primitive_assign: gw.gleb.vds-ok.com-vm allocation score on v03-b: -INFINI
pcmk__primitive_assign: gw.gotin.vds-ok.com-vm allocation score on mgmt01: -INFINITY
pcmk__primitive_assign: gw.gotin.vds-ok.com-vm allocation score on v03-a: -INFINITY
pcmk__primitive_assign: gw.gotin.vds-ok.com-vm allocation score on v03-b: -INFINITY
+pcmk__primitive_assign: iscsid:0 allocation score on mgmt01: -INFINITY
+pcmk__primitive_assign: iscsid:0 allocation score on mgmt01: -INFINITY
pcmk__primitive_assign: iscsid:0 allocation score on mgmt01: 1
+pcmk__primitive_assign: iscsid:0 allocation score on v03-a: -INFINITY
+pcmk__primitive_assign: iscsid:0 allocation score on v03-a: -INFINITY
pcmk__primitive_assign: iscsid:0 allocation score on v03-a: 0
+pcmk__primitive_assign: iscsid:0 allocation score on v03-b: -INFINITY
+pcmk__primitive_assign: iscsid:0 allocation score on v03-b: 0
pcmk__primitive_assign: iscsid:0 allocation score on v03-b: 0
pcmk__primitive_assign: iscsid:1 allocation score on mgmt01: -INFINITY
pcmk__primitive_assign: iscsid:1 allocation score on v03-a: 0
@@ -965,8 +977,14 @@ pcmk__primitive_assign: ktstudio.net-vm allocation score on v03-b: 0
pcmk__primitive_assign: lenny-x32-devel-vm allocation score on mgmt01: -INFINITY
pcmk__primitive_assign: lenny-x32-devel-vm allocation score on v03-a: 0
pcmk__primitive_assign: lenny-x32-devel-vm allocation score on v03-b: 0
+pcmk__primitive_assign: libvirt-images-fs:0 allocation score on mgmt01: -INFINITY
+pcmk__primitive_assign: libvirt-images-fs:0 allocation score on mgmt01: -INFINITY
pcmk__primitive_assign: libvirt-images-fs:0 allocation score on mgmt01: 1
+pcmk__primitive_assign: libvirt-images-fs:0 allocation score on v03-a: -INFINITY
+pcmk__primitive_assign: libvirt-images-fs:0 allocation score on v03-a: -INFINITY
pcmk__primitive_assign: libvirt-images-fs:0 allocation score on v03-a: 0
+pcmk__primitive_assign: libvirt-images-fs:0 allocation score on v03-b: -INFINITY
+pcmk__primitive_assign: libvirt-images-fs:0 allocation score on v03-b: 0
pcmk__primitive_assign: libvirt-images-fs:0 allocation score on v03-b: 0
pcmk__primitive_assign: libvirt-images-fs:1 allocation score on mgmt01: -INFINITY
pcmk__primitive_assign: libvirt-images-fs:1 allocation score on v03-a: 0
@@ -1017,7 +1035,13 @@ pcmk__primitive_assign: libvirt-images-pool:7 allocation score on mgmt01: -INFIN
pcmk__primitive_assign: libvirt-images-pool:7 allocation score on v03-a: -INFINITY
pcmk__primitive_assign: libvirt-images-pool:7 allocation score on v03-b: -INFINITY
pcmk__primitive_assign: libvirt-install-fs:0 allocation score on mgmt01: 1
+pcmk__primitive_assign: libvirt-install-fs:0 allocation score on mgmt01: 1
+pcmk__primitive_assign: libvirt-install-fs:0 allocation score on mgmt01: 1
+pcmk__primitive_assign: libvirt-install-fs:0 allocation score on v03-a: -INFINITY
+pcmk__primitive_assign: libvirt-install-fs:0 allocation score on v03-a: -INFINITY
pcmk__primitive_assign: libvirt-install-fs:0 allocation score on v03-a: 0
+pcmk__primitive_assign: libvirt-install-fs:0 allocation score on v03-b: -INFINITY
+pcmk__primitive_assign: libvirt-install-fs:0 allocation score on v03-b: 0
pcmk__primitive_assign: libvirt-install-fs:0 allocation score on v03-b: 0
pcmk__primitive_assign: libvirt-install-fs:1 allocation score on mgmt01: -INFINITY
pcmk__primitive_assign: libvirt-install-fs:1 allocation score on v03-a: 0
@@ -1196,8 +1220,14 @@ pcmk__primitive_assign: mcast-test-net:7 allocation score on v03-b: -INFINITY
pcmk__primitive_assign: metae.ru-vm allocation score on mgmt01: -INFINITY
pcmk__primitive_assign: metae.ru-vm allocation score on v03-a: -INFINITY
pcmk__primitive_assign: metae.ru-vm allocation score on v03-b: -INFINITY
+pcmk__primitive_assign: multipathd:0 allocation score on mgmt01: -INFINITY
+pcmk__primitive_assign: multipathd:0 allocation score on mgmt01: -INFINITY
pcmk__primitive_assign: multipathd:0 allocation score on mgmt01: 1
+pcmk__primitive_assign: multipathd:0 allocation score on v03-a: -INFINITY
+pcmk__primitive_assign: multipathd:0 allocation score on v03-a: -INFINITY
pcmk__primitive_assign: multipathd:0 allocation score on v03-a: 0
+pcmk__primitive_assign: multipathd:0 allocation score on v03-b: -INFINITY
+pcmk__primitive_assign: multipathd:0 allocation score on v03-b: 0
pcmk__primitive_assign: multipathd:0 allocation score on v03-b: 0
pcmk__primitive_assign: multipathd:1 allocation score on mgmt01: -INFINITY
pcmk__primitive_assign: multipathd:1 allocation score on v03-a: 0
diff --git a/cts/scheduler/scores/migrate-begin.scores b/cts/scheduler/scores/migrate-begin.scores
index 4763646..7d0c5c3 100644
--- a/cts/scheduler/scores/migrate-begin.scores
+++ b/cts/scheduler/scores/migrate-begin.scores
@@ -5,7 +5,9 @@ pcmk__clone_assign: dlm:0 allocation score on hex-13: 0
pcmk__clone_assign: dlm:0 allocation score on hex-14: 1
pcmk__clone_assign: dlm:1 allocation score on hex-13: 1
pcmk__clone_assign: dlm:1 allocation score on hex-14: 0
+pcmk__primitive_assign: dlm:0 allocation score on hex-13: -INFINITY
pcmk__primitive_assign: dlm:0 allocation score on hex-13: 0
+pcmk__primitive_assign: dlm:0 allocation score on hex-14: -INFINITY
pcmk__primitive_assign: dlm:0 allocation score on hex-14: 1
pcmk__primitive_assign: dlm:1 allocation score on hex-13: 1
pcmk__primitive_assign: dlm:1 allocation score on hex-14: -INFINITY
diff --git a/cts/scheduler/scores/migrate-fail-2.scores b/cts/scheduler/scores/migrate-fail-2.scores
index 4763646..7d0c5c3 100644
--- a/cts/scheduler/scores/migrate-fail-2.scores
+++ b/cts/scheduler/scores/migrate-fail-2.scores
@@ -5,7 +5,9 @@ pcmk__clone_assign: dlm:0 allocation score on hex-13: 0
pcmk__clone_assign: dlm:0 allocation score on hex-14: 1
pcmk__clone_assign: dlm:1 allocation score on hex-13: 1
pcmk__clone_assign: dlm:1 allocation score on hex-14: 0
+pcmk__primitive_assign: dlm:0 allocation score on hex-13: -INFINITY
pcmk__primitive_assign: dlm:0 allocation score on hex-13: 0
+pcmk__primitive_assign: dlm:0 allocation score on hex-14: -INFINITY
pcmk__primitive_assign: dlm:0 allocation score on hex-14: 1
pcmk__primitive_assign: dlm:1 allocation score on hex-13: 1
pcmk__primitive_assign: dlm:1 allocation score on hex-14: -INFINITY
diff --git a/cts/scheduler/scores/migrate-fail-3.scores b/cts/scheduler/scores/migrate-fail-3.scores
index 159b82b..b75abc0 100644
--- a/cts/scheduler/scores/migrate-fail-3.scores
+++ b/cts/scheduler/scores/migrate-fail-3.scores
@@ -7,7 +7,7 @@ pcmk__clone_assign: dlm:1 allocation score on hex-13: 1
pcmk__clone_assign: dlm:1 allocation score on hex-14: 0
pcmk__primitive_assign: dlm:0 allocation score on hex-13: -INFINITY
pcmk__primitive_assign: dlm:0 allocation score on hex-14: 1
-pcmk__primitive_assign: dlm:1 allocation score on hex-13: 1
-pcmk__primitive_assign: dlm:1 allocation score on hex-14: 0
+pcmk__primitive_assign: dlm:1 allocation score on hex-13: 2
+pcmk__primitive_assign: dlm:1 allocation score on hex-14: -INFINITY
pcmk__primitive_assign: test-vm allocation score on hex-13: 1
pcmk__primitive_assign: test-vm allocation score on hex-14: -INFINITY
diff --git a/cts/scheduler/scores/migrate-fail-4.scores b/cts/scheduler/scores/migrate-fail-4.scores
index 4763646..7d0c5c3 100644
--- a/cts/scheduler/scores/migrate-fail-4.scores
+++ b/cts/scheduler/scores/migrate-fail-4.scores
@@ -5,7 +5,9 @@ pcmk__clone_assign: dlm:0 allocation score on hex-13: 0
pcmk__clone_assign: dlm:0 allocation score on hex-14: 1
pcmk__clone_assign: dlm:1 allocation score on hex-13: 1
pcmk__clone_assign: dlm:1 allocation score on hex-14: 0
+pcmk__primitive_assign: dlm:0 allocation score on hex-13: -INFINITY
pcmk__primitive_assign: dlm:0 allocation score on hex-13: 0
+pcmk__primitive_assign: dlm:0 allocation score on hex-14: -INFINITY
pcmk__primitive_assign: dlm:0 allocation score on hex-14: 1
pcmk__primitive_assign: dlm:1 allocation score on hex-13: 1
pcmk__primitive_assign: dlm:1 allocation score on hex-14: -INFINITY
diff --git a/cts/scheduler/scores/migrate-fail-5.scores b/cts/scheduler/scores/migrate-fail-5.scores
index 4763646..7d0c5c3 100644
--- a/cts/scheduler/scores/migrate-fail-5.scores
+++ b/cts/scheduler/scores/migrate-fail-5.scores
@@ -5,7 +5,9 @@ pcmk__clone_assign: dlm:0 allocation score on hex-13: 0
pcmk__clone_assign: dlm:0 allocation score on hex-14: 1
pcmk__clone_assign: dlm:1 allocation score on hex-13: 1
pcmk__clone_assign: dlm:1 allocation score on hex-14: 0
+pcmk__primitive_assign: dlm:0 allocation score on hex-13: -INFINITY
pcmk__primitive_assign: dlm:0 allocation score on hex-13: 0
+pcmk__primitive_assign: dlm:0 allocation score on hex-14: -INFINITY
pcmk__primitive_assign: dlm:0 allocation score on hex-14: 1
pcmk__primitive_assign: dlm:1 allocation score on hex-13: 1
pcmk__primitive_assign: dlm:1 allocation score on hex-14: -INFINITY
diff --git a/cts/scheduler/scores/migrate-fail-6.scores b/cts/scheduler/scores/migrate-fail-6.scores
index 4763646..7d0c5c3 100644
--- a/cts/scheduler/scores/migrate-fail-6.scores
+++ b/cts/scheduler/scores/migrate-fail-6.scores
@@ -5,7 +5,9 @@ pcmk__clone_assign: dlm:0 allocation score on hex-13: 0
pcmk__clone_assign: dlm:0 allocation score on hex-14: 1
pcmk__clone_assign: dlm:1 allocation score on hex-13: 1
pcmk__clone_assign: dlm:1 allocation score on hex-14: 0
+pcmk__primitive_assign: dlm:0 allocation score on hex-13: -INFINITY
pcmk__primitive_assign: dlm:0 allocation score on hex-13: 0
+pcmk__primitive_assign: dlm:0 allocation score on hex-14: -INFINITY
pcmk__primitive_assign: dlm:0 allocation score on hex-14: 1
pcmk__primitive_assign: dlm:1 allocation score on hex-13: 1
pcmk__primitive_assign: dlm:1 allocation score on hex-14: -INFINITY
diff --git a/cts/scheduler/scores/migrate-fail-7.scores b/cts/scheduler/scores/migrate-fail-7.scores
index 159b82b..b75abc0 100644
--- a/cts/scheduler/scores/migrate-fail-7.scores
+++ b/cts/scheduler/scores/migrate-fail-7.scores
@@ -7,7 +7,7 @@ pcmk__clone_assign: dlm:1 allocation score on hex-13: 1
pcmk__clone_assign: dlm:1 allocation score on hex-14: 0
pcmk__primitive_assign: dlm:0 allocation score on hex-13: -INFINITY
pcmk__primitive_assign: dlm:0 allocation score on hex-14: 1
-pcmk__primitive_assign: dlm:1 allocation score on hex-13: 1
-pcmk__primitive_assign: dlm:1 allocation score on hex-14: 0
+pcmk__primitive_assign: dlm:1 allocation score on hex-13: 2
+pcmk__primitive_assign: dlm:1 allocation score on hex-14: -INFINITY
pcmk__primitive_assign: test-vm allocation score on hex-13: 1
pcmk__primitive_assign: test-vm allocation score on hex-14: -INFINITY
diff --git a/cts/scheduler/scores/migrate-fail-8.scores b/cts/scheduler/scores/migrate-fail-8.scores
index 4763646..7d0c5c3 100644
--- a/cts/scheduler/scores/migrate-fail-8.scores
+++ b/cts/scheduler/scores/migrate-fail-8.scores
@@ -5,7 +5,9 @@ pcmk__clone_assign: dlm:0 allocation score on hex-13: 0
pcmk__clone_assign: dlm:0 allocation score on hex-14: 1
pcmk__clone_assign: dlm:1 allocation score on hex-13: 1
pcmk__clone_assign: dlm:1 allocation score on hex-14: 0
+pcmk__primitive_assign: dlm:0 allocation score on hex-13: -INFINITY
pcmk__primitive_assign: dlm:0 allocation score on hex-13: 0
+pcmk__primitive_assign: dlm:0 allocation score on hex-14: -INFINITY
pcmk__primitive_assign: dlm:0 allocation score on hex-14: 1
pcmk__primitive_assign: dlm:1 allocation score on hex-13: 1
pcmk__primitive_assign: dlm:1 allocation score on hex-14: -INFINITY
diff --git a/cts/scheduler/scores/migrate-fail-9.scores b/cts/scheduler/scores/migrate-fail-9.scores
index 4763646..7d0c5c3 100644
--- a/cts/scheduler/scores/migrate-fail-9.scores
+++ b/cts/scheduler/scores/migrate-fail-9.scores
@@ -5,7 +5,9 @@ pcmk__clone_assign: dlm:0 allocation score on hex-13: 0
pcmk__clone_assign: dlm:0 allocation score on hex-14: 1
pcmk__clone_assign: dlm:1 allocation score on hex-13: 1
pcmk__clone_assign: dlm:1 allocation score on hex-14: 0
+pcmk__primitive_assign: dlm:0 allocation score on hex-13: -INFINITY
pcmk__primitive_assign: dlm:0 allocation score on hex-13: 0
+pcmk__primitive_assign: dlm:0 allocation score on hex-14: -INFINITY
pcmk__primitive_assign: dlm:0 allocation score on hex-14: 1
pcmk__primitive_assign: dlm:1 allocation score on hex-13: 1
pcmk__primitive_assign: dlm:1 allocation score on hex-14: -INFINITY
diff --git a/cts/scheduler/scores/migrate-partial-1.scores b/cts/scheduler/scores/migrate-partial-1.scores
index 159b82b..b75abc0 100644
--- a/cts/scheduler/scores/migrate-partial-1.scores
+++ b/cts/scheduler/scores/migrate-partial-1.scores
@@ -7,7 +7,7 @@ pcmk__clone_assign: dlm:1 allocation score on hex-13: 1
pcmk__clone_assign: dlm:1 allocation score on hex-14: 0
pcmk__primitive_assign: dlm:0 allocation score on hex-13: -INFINITY
pcmk__primitive_assign: dlm:0 allocation score on hex-14: 1
-pcmk__primitive_assign: dlm:1 allocation score on hex-13: 1
-pcmk__primitive_assign: dlm:1 allocation score on hex-14: 0
+pcmk__primitive_assign: dlm:1 allocation score on hex-13: 2
+pcmk__primitive_assign: dlm:1 allocation score on hex-14: -INFINITY
pcmk__primitive_assign: test-vm allocation score on hex-13: 1
pcmk__primitive_assign: test-vm allocation score on hex-14: -INFINITY
diff --git a/cts/scheduler/scores/migrate-partial-2.scores b/cts/scheduler/scores/migrate-partial-2.scores
index 4763646..7d0c5c3 100644
--- a/cts/scheduler/scores/migrate-partial-2.scores
+++ b/cts/scheduler/scores/migrate-partial-2.scores
@@ -5,7 +5,9 @@ pcmk__clone_assign: dlm:0 allocation score on hex-13: 0
pcmk__clone_assign: dlm:0 allocation score on hex-14: 1
pcmk__clone_assign: dlm:1 allocation score on hex-13: 1
pcmk__clone_assign: dlm:1 allocation score on hex-14: 0
+pcmk__primitive_assign: dlm:0 allocation score on hex-13: -INFINITY
pcmk__primitive_assign: dlm:0 allocation score on hex-13: 0
+pcmk__primitive_assign: dlm:0 allocation score on hex-14: -INFINITY
pcmk__primitive_assign: dlm:0 allocation score on hex-14: 1
pcmk__primitive_assign: dlm:1 allocation score on hex-13: 1
pcmk__primitive_assign: dlm:1 allocation score on hex-14: -INFINITY
diff --git a/cts/scheduler/scores/migrate-partial-3.scores b/cts/scheduler/scores/migrate-partial-3.scores
index cfcd402..cec2f31 100644
--- a/cts/scheduler/scores/migrate-partial-3.scores
+++ b/cts/scheduler/scores/migrate-partial-3.scores
@@ -11,9 +11,12 @@ pcmk__clone_assign: dlm:1 allocation score on hex-15: 0
pcmk__clone_assign: dlm:2 allocation score on hex-13: 0
pcmk__clone_assign: dlm:2 allocation score on hex-14: 0
pcmk__clone_assign: dlm:2 allocation score on hex-15: 0
+pcmk__primitive_assign: dlm:0 allocation score on hex-13: -INFINITY
pcmk__primitive_assign: dlm:0 allocation score on hex-13: 0
+pcmk__primitive_assign: dlm:0 allocation score on hex-14: -INFINITY
pcmk__primitive_assign: dlm:0 allocation score on hex-14: 1
pcmk__primitive_assign: dlm:0 allocation score on hex-15: -INFINITY
+pcmk__primitive_assign: dlm:0 allocation score on hex-15: -INFINITY
pcmk__primitive_assign: dlm:1 allocation score on hex-13: 1
pcmk__primitive_assign: dlm:1 allocation score on hex-14: -INFINITY
pcmk__primitive_assign: dlm:1 allocation score on hex-15: -INFINITY
diff --git a/cts/scheduler/scores/migrate-start-complex.scores b/cts/scheduler/scores/migrate-start-complex.scores
index 31f46d3..859664c 100644
--- a/cts/scheduler/scores/migrate-start-complex.scores
+++ b/cts/scheduler/scores/migrate-start-complex.scores
@@ -15,21 +15,27 @@ pcmk__clone_assign: dom0-iscsi1:0 allocation score on dom0-01: 0
pcmk__clone_assign: dom0-iscsi1:0 allocation score on dom0-02: 0
pcmk__clone_assign: dom0-iscsi1:1 allocation score on dom0-01: 0
pcmk__clone_assign: dom0-iscsi1:1 allocation score on dom0-02: 0
+pcmk__group_assign: dom0-iscsi1-cnx1:0 allocation score on dom0-01: -INFINITY
pcmk__group_assign: dom0-iscsi1-cnx1:0 allocation score on dom0-01: 0
pcmk__group_assign: dom0-iscsi1-cnx1:0 allocation score on dom0-02: 5000
+pcmk__group_assign: dom0-iscsi1-cnx1:0 allocation score on dom0-02: 5000
pcmk__group_assign: dom0-iscsi1-cnx1:1 allocation score on dom0-01: 0
pcmk__group_assign: dom0-iscsi1-cnx1:1 allocation score on dom0-02: -INFINITY
+pcmk__group_assign: dom0-iscsi1:0 allocation score on dom0-01: -INFINITY
pcmk__group_assign: dom0-iscsi1:0 allocation score on dom0-01: 0
pcmk__group_assign: dom0-iscsi1:0 allocation score on dom0-02: 0
+pcmk__group_assign: dom0-iscsi1:0 allocation score on dom0-02: 0
pcmk__group_assign: dom0-iscsi1:1 allocation score on dom0-01: 0
pcmk__group_assign: dom0-iscsi1:1 allocation score on dom0-02: -INFINITY
-pcmk__primitive_assign: bottom:0 allocation score on dom0-01: 0
-pcmk__primitive_assign: bottom:0 allocation score on dom0-02: 0
+pcmk__primitive_assign: bottom:0 allocation score on dom0-01: INFINITY
+pcmk__primitive_assign: bottom:0 allocation score on dom0-02: 10000
pcmk__primitive_assign: bottom:1 allocation score on dom0-01: -INFINITY
-pcmk__primitive_assign: bottom:1 allocation score on dom0-02: 0
-pcmk__primitive_assign: dom0-iscsi1-cnx1:0 allocation score on dom0-01: 0
-pcmk__primitive_assign: dom0-iscsi1-cnx1:0 allocation score on dom0-02: 5000
-pcmk__primitive_assign: dom0-iscsi1-cnx1:1 allocation score on dom0-01: 0
+pcmk__primitive_assign: bottom:1 allocation score on dom0-02: 10000
+pcmk__primitive_assign: dom0-iscsi1-cnx1:0 allocation score on dom0-01: -INFINITY
+pcmk__primitive_assign: dom0-iscsi1-cnx1:0 allocation score on dom0-01: INFINITY
+pcmk__primitive_assign: dom0-iscsi1-cnx1:0 allocation score on dom0-02: 15000
+pcmk__primitive_assign: dom0-iscsi1-cnx1:0 allocation score on dom0-02: 15000
+pcmk__primitive_assign: dom0-iscsi1-cnx1:1 allocation score on dom0-01: INFINITY
pcmk__primitive_assign: dom0-iscsi1-cnx1:1 allocation score on dom0-02: -INFINITY
pcmk__primitive_assign: domU-test01 allocation score on dom0-01: INFINITY
pcmk__primitive_assign: domU-test01 allocation score on dom0-02: 10000
diff --git a/cts/scheduler/scores/migrate-start.scores b/cts/scheduler/scores/migrate-start.scores
index 277e152..3cc2f29 100644
--- a/cts/scheduler/scores/migrate-start.scores
+++ b/cts/scheduler/scores/migrate-start.scores
@@ -9,17 +9,23 @@ pcmk__clone_assign: dom0-iscsi1:0 allocation score on dom0-01: 0
pcmk__clone_assign: dom0-iscsi1:0 allocation score on dom0-02: 0
pcmk__clone_assign: dom0-iscsi1:1 allocation score on dom0-01: 0
pcmk__clone_assign: dom0-iscsi1:1 allocation score on dom0-02: 0
+pcmk__group_assign: dom0-iscsi1-cnx1:0 allocation score on dom0-01: -INFINITY
pcmk__group_assign: dom0-iscsi1-cnx1:0 allocation score on dom0-01: 0
pcmk__group_assign: dom0-iscsi1-cnx1:0 allocation score on dom0-02: 5000
+pcmk__group_assign: dom0-iscsi1-cnx1:0 allocation score on dom0-02: 5000
pcmk__group_assign: dom0-iscsi1-cnx1:1 allocation score on dom0-01: 0
pcmk__group_assign: dom0-iscsi1-cnx1:1 allocation score on dom0-02: -INFINITY
+pcmk__group_assign: dom0-iscsi1:0 allocation score on dom0-01: -INFINITY
pcmk__group_assign: dom0-iscsi1:0 allocation score on dom0-01: 0
pcmk__group_assign: dom0-iscsi1:0 allocation score on dom0-02: 0
+pcmk__group_assign: dom0-iscsi1:0 allocation score on dom0-02: 0
pcmk__group_assign: dom0-iscsi1:1 allocation score on dom0-01: 0
pcmk__group_assign: dom0-iscsi1:1 allocation score on dom0-02: -INFINITY
-pcmk__primitive_assign: dom0-iscsi1-cnx1:0 allocation score on dom0-01: 0
-pcmk__primitive_assign: dom0-iscsi1-cnx1:0 allocation score on dom0-02: 5000
-pcmk__primitive_assign: dom0-iscsi1-cnx1:1 allocation score on dom0-01: 0
+pcmk__primitive_assign: dom0-iscsi1-cnx1:0 allocation score on dom0-01: -INFINITY
+pcmk__primitive_assign: dom0-iscsi1-cnx1:0 allocation score on dom0-01: INFINITY
+pcmk__primitive_assign: dom0-iscsi1-cnx1:0 allocation score on dom0-02: 10000
+pcmk__primitive_assign: dom0-iscsi1-cnx1:0 allocation score on dom0-02: 10000
+pcmk__primitive_assign: dom0-iscsi1-cnx1:1 allocation score on dom0-01: INFINITY
pcmk__primitive_assign: dom0-iscsi1-cnx1:1 allocation score on dom0-02: -INFINITY
pcmk__primitive_assign: domU-test01 allocation score on dom0-01: INFINITY
pcmk__primitive_assign: domU-test01 allocation score on dom0-02: 5000
diff --git a/cts/scheduler/scores/migrate-stop-start-complex.scores b/cts/scheduler/scores/migrate-stop-start-complex.scores
index 78a5dc3..62ff5f2 100644
--- a/cts/scheduler/scores/migrate-stop-start-complex.scores
+++ b/cts/scheduler/scores/migrate-stop-start-complex.scores
@@ -23,11 +23,11 @@ pcmk__group_assign: dom0-iscsi1:0 allocation score on dom0-01: 0
pcmk__group_assign: dom0-iscsi1:0 allocation score on dom0-02: -INFINITY
pcmk__group_assign: dom0-iscsi1:1 allocation score on dom0-01: -INFINITY
pcmk__group_assign: dom0-iscsi1:1 allocation score on dom0-02: -INFINITY
-pcmk__primitive_assign: bottom:0 allocation score on dom0-01: 0
+pcmk__primitive_assign: bottom:0 allocation score on dom0-01: 5000
pcmk__primitive_assign: bottom:0 allocation score on dom0-02: -INFINITY
pcmk__primitive_assign: bottom:1 allocation score on dom0-01: -INFINITY
pcmk__primitive_assign: bottom:1 allocation score on dom0-02: -INFINITY
-pcmk__primitive_assign: dom0-iscsi1-cnx1:0 allocation score on dom0-01: 5000
+pcmk__primitive_assign: dom0-iscsi1-cnx1:0 allocation score on dom0-01: 10000
pcmk__primitive_assign: dom0-iscsi1-cnx1:0 allocation score on dom0-02: -INFINITY
pcmk__primitive_assign: dom0-iscsi1-cnx1:1 allocation score on dom0-01: -INFINITY
pcmk__primitive_assign: dom0-iscsi1-cnx1:1 allocation score on dom0-02: -INFINITY
diff --git a/cts/scheduler/scores/migrate-success.scores b/cts/scheduler/scores/migrate-success.scores
index 159b82b..b75abc0 100644
--- a/cts/scheduler/scores/migrate-success.scores
+++ b/cts/scheduler/scores/migrate-success.scores
@@ -7,7 +7,7 @@ pcmk__clone_assign: dlm:1 allocation score on hex-13: 1
pcmk__clone_assign: dlm:1 allocation score on hex-14: 0
pcmk__primitive_assign: dlm:0 allocation score on hex-13: -INFINITY
pcmk__primitive_assign: dlm:0 allocation score on hex-14: 1
-pcmk__primitive_assign: dlm:1 allocation score on hex-13: 1
-pcmk__primitive_assign: dlm:1 allocation score on hex-14: 0
+pcmk__primitive_assign: dlm:1 allocation score on hex-13: 2
+pcmk__primitive_assign: dlm:1 allocation score on hex-14: -INFINITY
pcmk__primitive_assign: test-vm allocation score on hex-13: 1
pcmk__primitive_assign: test-vm allocation score on hex-14: -INFINITY
diff --git a/cts/scheduler/scores/nested-remote-recovery.scores b/cts/scheduler/scores/nested-remote-recovery.scores
index bfbd8ba..e872849 100644
--- a/cts/scheduler/scores/nested-remote-recovery.scores
+++ b/cts/scheduler/scores/nested-remote-recovery.scores
@@ -2,330 +2,330 @@
galera:0 promotion score on galera-bundle-0: 100
galera:1 promotion score on galera-bundle-1: 100
galera:2 promotion score on galera-bundle-2: 100
-pcmk__bundle_allocate: galera-bundle allocation score on controller-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle allocation score on controller-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle allocation score on controller-2: -INFINITY
-pcmk__bundle_allocate: galera-bundle allocation score on database-0: 0
-pcmk__bundle_allocate: galera-bundle allocation score on database-1: 0
-pcmk__bundle_allocate: galera-bundle allocation score on database-2: 0
-pcmk__bundle_allocate: galera-bundle allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-0: INFINITY
-pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: galera-bundle-0 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-0 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-0 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: galera-bundle-0 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-0 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-0 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-1: INFINITY
-pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-2: 0
-pcmk__bundle_allocate: galera-bundle-1 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-1 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-1 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: galera-bundle-1 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-1 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-1 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-2: INFINITY
-pcmk__bundle_allocate: galera-bundle-2 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-2 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-2 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: galera-bundle-2 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-2 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-2 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on controller-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on controller-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on controller-2: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on database-0: INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on database-1: 0
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on database-2: 0
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on controller-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on controller-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on controller-2: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on database-0: 0
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on database-1: INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on database-2: 0
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on controller-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on controller-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on controller-2: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on database-0: 0
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on database-1: 0
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on database-2: INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on controller-0: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on controller-1: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on controller-2: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on database-0: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on database-1: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on database-2: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-2: -INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on messaging-0: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on messaging-1: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on messaging-2: 0
-pcmk__bundle_allocate: galera:0 allocation score on galera-bundle-0: INFINITY
-pcmk__bundle_allocate: galera:1 allocation score on galera-bundle-1: INFINITY
-pcmk__bundle_allocate: galera:2 allocation score on galera-bundle-2: INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on controller-0: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on controller-0: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on controller-1: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on controller-1: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on controller-2: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on controller-2: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-0: INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-0: INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-1: INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-1: INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-2: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-2: INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-2: INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-2: INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: openstack-cinder-volume allocation score on controller-0: 0
-pcmk__bundle_allocate: openstack-cinder-volume allocation score on controller-1: 0
-pcmk__bundle_allocate: openstack-cinder-volume allocation score on controller-2: 0
-pcmk__bundle_allocate: openstack-cinder-volume allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: openstack-cinder-volume allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: openstack-cinder-volume allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: openstack-cinder-volume allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: openstack-cinder-volume allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: openstack-cinder-volume allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: openstack-cinder-volume-docker-0 allocation score on controller-0: INFINITY
-pcmk__bundle_allocate: openstack-cinder-volume-docker-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: openstack-cinder-volume-docker-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: openstack-cinder-volume-docker-0 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: openstack-cinder-volume-docker-0 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: openstack-cinder-volume-docker-0 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: openstack-cinder-volume-docker-0 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: openstack-cinder-volume-docker-0 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: openstack-cinder-volume-docker-0 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on messaging-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on messaging-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on messaging-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-2: INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-1: INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-1: INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on database-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on database-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on database-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on messaging-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on messaging-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on messaging-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on controller-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on controller-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on controller-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on messaging-0: INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on messaging-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on messaging-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on controller-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on controller-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on controller-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on messaging-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on messaging-1: INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on messaging-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on controller-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on controller-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on controller-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on messaging-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on messaging-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on messaging-2: INFINITY
-pcmk__bundle_allocate: rabbitmq:0 allocation score on rabbitmq-bundle-0: INFINITY
-pcmk__bundle_allocate: rabbitmq:1 allocation score on rabbitmq-bundle-1: INFINITY
-pcmk__bundle_allocate: rabbitmq:2 allocation score on rabbitmq-bundle-2: INFINITY
-pcmk__bundle_allocate: redis-bundle allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle allocation score on controller-2: 0
-pcmk__bundle_allocate: redis-bundle allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: redis-bundle allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-0: INFINITY
-pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: redis-bundle-0 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-0 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-0 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: redis-bundle-0 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-0 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-0 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-1: INFINITY
-pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-2: 0
-pcmk__bundle_allocate: redis-bundle-1 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-1 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-1 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: redis-bundle-1 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-1 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-1 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-2: INFINITY
-pcmk__bundle_allocate: redis-bundle-2 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-2 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-2 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: redis-bundle-2 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-2 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-2 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on controller-0: INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on controller-1: INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on controller-2: 0
-pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on controller-2: INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: redis-bundle-master allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on controller-2: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on database-0: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on database-1: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on database-2: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on messaging-0: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on messaging-1: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on messaging-2: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-2: -INFINITY
-pcmk__bundle_allocate: redis:0 allocation score on redis-bundle-0: INFINITY
-pcmk__bundle_allocate: redis:1 allocation score on redis-bundle-1: INFINITY
-pcmk__bundle_allocate: redis:2 allocation score on redis-bundle-2: INFINITY
+pcmk__bundle_assign: galera-bundle allocation score on controller-0: -INFINITY
+pcmk__bundle_assign: galera-bundle allocation score on controller-1: -INFINITY
+pcmk__bundle_assign: galera-bundle allocation score on controller-2: -INFINITY
+pcmk__bundle_assign: galera-bundle allocation score on database-0: 0
+pcmk__bundle_assign: galera-bundle allocation score on database-1: 0
+pcmk__bundle_assign: galera-bundle allocation score on database-2: 0
+pcmk__bundle_assign: galera-bundle allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: galera-bundle allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: galera-bundle allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: galera-bundle-0 allocation score on controller-0: INFINITY
+pcmk__bundle_assign: galera-bundle-0 allocation score on controller-1: 0
+pcmk__bundle_assign: galera-bundle-0 allocation score on controller-2: 0
+pcmk__bundle_assign: galera-bundle-0 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-0 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-0 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: galera-bundle-0 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-0 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-0 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: galera-bundle-1 allocation score on controller-0: 0
+pcmk__bundle_assign: galera-bundle-1 allocation score on controller-1: INFINITY
+pcmk__bundle_assign: galera-bundle-1 allocation score on controller-2: 0
+pcmk__bundle_assign: galera-bundle-1 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-1 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-1 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: galera-bundle-1 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-1 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-1 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: galera-bundle-2 allocation score on controller-0: 0
+pcmk__bundle_assign: galera-bundle-2 allocation score on controller-1: 0
+pcmk__bundle_assign: galera-bundle-2 allocation score on controller-2: INFINITY
+pcmk__bundle_assign: galera-bundle-2 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-2 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-2 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: galera-bundle-2 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-2 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-2 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on controller-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on controller-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on controller-2: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on database-0: INFINITY
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on database-1: 0
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on database-2: 0
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on controller-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on controller-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on controller-2: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on database-0: 0
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on database-1: INFINITY
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on database-2: 0
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on controller-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on controller-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on controller-2: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on database-0: 0
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on database-1: 0
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on database-2: INFINITY
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on controller-0: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on controller-1: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on controller-2: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on database-0: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on database-1: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on database-2: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-2: -INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on messaging-0: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on messaging-1: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on messaging-2: 0
+pcmk__bundle_assign: galera:0 allocation score on galera-bundle-0: INFINITY
+pcmk__bundle_assign: galera:1 allocation score on galera-bundle-1: INFINITY
+pcmk__bundle_assign: galera:2 allocation score on galera-bundle-2: INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on controller-0: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on controller-0: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on controller-1: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on controller-1: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on controller-2: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on controller-2: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on database-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on database-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on database-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on database-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on database-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on database-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-0: INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-0: INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-1: 0
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-2: 0
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-0: 0
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-0: 0
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-1: INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-1: INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-2: 0
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-2: INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-0: 0
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-0: 0
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-1: 0
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-2: INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-2: INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: openstack-cinder-volume allocation score on controller-0: 0
+pcmk__bundle_assign: openstack-cinder-volume allocation score on controller-1: 0
+pcmk__bundle_assign: openstack-cinder-volume allocation score on controller-2: 0
+pcmk__bundle_assign: openstack-cinder-volume allocation score on database-0: -INFINITY
+pcmk__bundle_assign: openstack-cinder-volume allocation score on database-1: -INFINITY
+pcmk__bundle_assign: openstack-cinder-volume allocation score on database-2: -INFINITY
+pcmk__bundle_assign: openstack-cinder-volume allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: openstack-cinder-volume allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: openstack-cinder-volume allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: openstack-cinder-volume-docker-0 allocation score on controller-0: INFINITY
+pcmk__bundle_assign: openstack-cinder-volume-docker-0 allocation score on controller-1: 0
+pcmk__bundle_assign: openstack-cinder-volume-docker-0 allocation score on controller-2: 0
+pcmk__bundle_assign: openstack-cinder-volume-docker-0 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: openstack-cinder-volume-docker-0 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: openstack-cinder-volume-docker-0 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: openstack-cinder-volume-docker-0 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: openstack-cinder-volume-docker-0 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: openstack-cinder-volume-docker-0 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle allocation score on controller-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle allocation score on controller-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle allocation score on controller-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle allocation score on database-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle allocation score on database-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle allocation score on database-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle allocation score on messaging-0: 0
+pcmk__bundle_assign: rabbitmq-bundle allocation score on messaging-1: 0
+pcmk__bundle_assign: rabbitmq-bundle allocation score on messaging-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on controller-2: INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on controller-1: INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on controller-1: INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on database-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on database-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on database-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on messaging-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on messaging-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on messaging-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on controller-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on controller-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on controller-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on messaging-0: INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on messaging-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on messaging-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on controller-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on controller-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on controller-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on messaging-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on messaging-1: INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on messaging-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on controller-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on controller-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on controller-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on messaging-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on messaging-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on messaging-2: INFINITY
+pcmk__bundle_assign: rabbitmq:0 allocation score on rabbitmq-bundle-0: INFINITY
+pcmk__bundle_assign: rabbitmq:1 allocation score on rabbitmq-bundle-1: INFINITY
+pcmk__bundle_assign: rabbitmq:2 allocation score on rabbitmq-bundle-2: INFINITY
+pcmk__bundle_assign: redis-bundle allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle allocation score on controller-2: 0
+pcmk__bundle_assign: redis-bundle allocation score on database-0: -INFINITY
+pcmk__bundle_assign: redis-bundle allocation score on database-1: -INFINITY
+pcmk__bundle_assign: redis-bundle allocation score on database-2: -INFINITY
+pcmk__bundle_assign: redis-bundle allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: redis-bundle allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: redis-bundle allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: redis-bundle-0 allocation score on controller-0: INFINITY
+pcmk__bundle_assign: redis-bundle-0 allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle-0 allocation score on controller-2: 0
+pcmk__bundle_assign: redis-bundle-0 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-0 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-0 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: redis-bundle-0 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-0 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-0 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: redis-bundle-1 allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle-1 allocation score on controller-1: INFINITY
+pcmk__bundle_assign: redis-bundle-1 allocation score on controller-2: 0
+pcmk__bundle_assign: redis-bundle-1 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-1 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-1 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: redis-bundle-1 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-1 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-1 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: redis-bundle-2 allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle-2 allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle-2 allocation score on controller-2: INFINITY
+pcmk__bundle_assign: redis-bundle-2 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-2 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-2 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: redis-bundle-2 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-2 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-2 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: redis-bundle-docker-0 allocation score on controller-0: INFINITY
+pcmk__bundle_assign: redis-bundle-docker-0 allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle-docker-0 allocation score on controller-2: 0
+pcmk__bundle_assign: redis-bundle-docker-0 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-docker-0 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-docker-0 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: redis-bundle-docker-0 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-docker-0 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-docker-0 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: redis-bundle-docker-1 allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle-docker-1 allocation score on controller-1: INFINITY
+pcmk__bundle_assign: redis-bundle-docker-1 allocation score on controller-2: 0
+pcmk__bundle_assign: redis-bundle-docker-1 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-docker-1 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-docker-1 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: redis-bundle-docker-1 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-docker-1 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-docker-1 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: redis-bundle-docker-2 allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle-docker-2 allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle-docker-2 allocation score on controller-2: INFINITY
+pcmk__bundle_assign: redis-bundle-docker-2 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-docker-2 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-docker-2 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: redis-bundle-docker-2 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-docker-2 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-docker-2 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: redis-bundle-master allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on controller-2: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on database-0: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on database-1: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on database-2: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on messaging-0: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on messaging-1: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on messaging-2: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-2: -INFINITY
+pcmk__bundle_assign: redis:0 allocation score on redis-bundle-0: INFINITY
+pcmk__bundle_assign: redis:1 allocation score on redis-bundle-1: INFINITY
+pcmk__bundle_assign: redis:2 allocation score on redis-bundle-2: INFINITY
pcmk__clone_assign: galera-bundle-master allocation score on controller-0: -INFINITY
pcmk__clone_assign: galera-bundle-master allocation score on controller-1: -INFINITY
pcmk__clone_assign: galera-bundle-master allocation score on controller-2: -INFINITY
diff --git a/cts/scheduler/scores/no-promote-on-unrunnable-guest.scores b/cts/scheduler/scores/no-promote-on-unrunnable-guest.scores
index 7923cdc..12f4c7f 100644
--- a/cts/scheduler/scores/no-promote-on-unrunnable-guest.scores
+++ b/cts/scheduler/scores/no-promote-on-unrunnable-guest.scores
@@ -2,135 +2,135 @@
galera:0 promotion score on galera-bundle-0: 100
galera:1 promotion score on galera-bundle-1: 100
galera:2 promotion score on galera-bundle-2: 100
-ovndb_servers:0 promotion score on ovn-dbs-bundle-0: 5
+ovndb_servers:0 promotion score on ovn-dbs-bundle-0: -1
ovndb_servers:1 promotion score on ovn-dbs-bundle-1: 5
ovndb_servers:2 promotion score on ovn-dbs-bundle-2: 5
-pcmk__bundle_allocate: galera-bundle allocation score on controller-0: 0
-pcmk__bundle_allocate: galera-bundle allocation score on controller-1: 0
-pcmk__bundle_allocate: galera-bundle allocation score on controller-2: 0
-pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-0: INFINITY
-pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-1: INFINITY
-pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-2: 0
-pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-2: INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on controller-0: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on controller-1: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on controller-2: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-2: -INFINITY
-pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on controller-0: INFINITY
-pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on controller-1: INFINITY
-pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on controller-2: 0
-pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on controller-2: INFINITY
-pcmk__bundle_allocate: galera:0 allocation score on galera-bundle-0: INFINITY
-pcmk__bundle_allocate: galera:1 allocation score on galera-bundle-1: INFINITY
-pcmk__bundle_allocate: galera:2 allocation score on galera-bundle-2: INFINITY
-pcmk__bundle_allocate: openstack-cinder-volume allocation score on controller-0: 0
-pcmk__bundle_allocate: openstack-cinder-volume allocation score on controller-1: 0
-pcmk__bundle_allocate: openstack-cinder-volume allocation score on controller-2: 0
-pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on controller-0: INFINITY
-pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: ovn-dbs-bundle allocation score on controller-0: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle allocation score on controller-1: 0
-pcmk__bundle_allocate: ovn-dbs-bundle allocation score on controller-2: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on controller-0: INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on controller-1: INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on controller-2: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on controller-2: INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on controller-0: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on controller-1: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on controller-2: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-0: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-1: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-2: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on controller-0: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on controller-0: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on controller-1: INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on controller-2: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on controller-0: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on controller-2: INFINITY
-pcmk__bundle_allocate: ovndb_servers:0 allocation score on ovn-dbs-bundle-0: INFINITY
-pcmk__bundle_allocate: ovndb_servers:1 allocation score on ovn-dbs-bundle-1: INFINITY
-pcmk__bundle_allocate: ovndb_servers:2 allocation score on ovn-dbs-bundle-2: INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-0: INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-1: INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-2: INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on controller-0: INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on controller-1: INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on controller-2: INFINITY
-pcmk__bundle_allocate: rabbitmq:0 allocation score on rabbitmq-bundle-0: INFINITY
-pcmk__bundle_allocate: rabbitmq:1 allocation score on rabbitmq-bundle-1: INFINITY
-pcmk__bundle_allocate: rabbitmq:2 allocation score on rabbitmq-bundle-2: INFINITY
-pcmk__bundle_allocate: redis-bundle allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle allocation score on controller-2: 0
-pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-0: INFINITY
-pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-1: INFINITY
-pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-2: 0
-pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-2: INFINITY
-pcmk__bundle_allocate: redis-bundle-master allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on controller-2: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-2: -INFINITY
-pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on controller-0: INFINITY
-pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on controller-1: INFINITY
-pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on controller-2: 0
-pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on controller-2: INFINITY
-pcmk__bundle_allocate: redis:0 allocation score on redis-bundle-0: INFINITY
-pcmk__bundle_allocate: redis:1 allocation score on redis-bundle-1: INFINITY
-pcmk__bundle_allocate: redis:2 allocation score on redis-bundle-2: INFINITY
+pcmk__bundle_assign: galera-bundle allocation score on controller-0: 0
+pcmk__bundle_assign: galera-bundle allocation score on controller-1: 0
+pcmk__bundle_assign: galera-bundle allocation score on controller-2: 0
+pcmk__bundle_assign: galera-bundle-0 allocation score on controller-0: INFINITY
+pcmk__bundle_assign: galera-bundle-0 allocation score on controller-1: 0
+pcmk__bundle_assign: galera-bundle-0 allocation score on controller-2: 0
+pcmk__bundle_assign: galera-bundle-1 allocation score on controller-0: 0
+pcmk__bundle_assign: galera-bundle-1 allocation score on controller-1: INFINITY
+pcmk__bundle_assign: galera-bundle-1 allocation score on controller-2: 0
+pcmk__bundle_assign: galera-bundle-2 allocation score on controller-0: 0
+pcmk__bundle_assign: galera-bundle-2 allocation score on controller-1: 0
+pcmk__bundle_assign: galera-bundle-2 allocation score on controller-2: INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on controller-0: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on controller-1: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on controller-2: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-2: -INFINITY
+pcmk__bundle_assign: galera-bundle-podman-0 allocation score on controller-0: INFINITY
+pcmk__bundle_assign: galera-bundle-podman-0 allocation score on controller-1: 0
+pcmk__bundle_assign: galera-bundle-podman-0 allocation score on controller-2: 0
+pcmk__bundle_assign: galera-bundle-podman-1 allocation score on controller-0: 0
+pcmk__bundle_assign: galera-bundle-podman-1 allocation score on controller-1: INFINITY
+pcmk__bundle_assign: galera-bundle-podman-1 allocation score on controller-2: 0
+pcmk__bundle_assign: galera-bundle-podman-2 allocation score on controller-0: 0
+pcmk__bundle_assign: galera-bundle-podman-2 allocation score on controller-1: 0
+pcmk__bundle_assign: galera-bundle-podman-2 allocation score on controller-2: INFINITY
+pcmk__bundle_assign: galera:0 allocation score on galera-bundle-0: INFINITY
+pcmk__bundle_assign: galera:1 allocation score on galera-bundle-1: INFINITY
+pcmk__bundle_assign: galera:2 allocation score on galera-bundle-2: INFINITY
+pcmk__bundle_assign: openstack-cinder-volume allocation score on controller-0: 0
+pcmk__bundle_assign: openstack-cinder-volume allocation score on controller-1: 0
+pcmk__bundle_assign: openstack-cinder-volume allocation score on controller-2: 0
+pcmk__bundle_assign: openstack-cinder-volume-podman-0 allocation score on controller-0: INFINITY
+pcmk__bundle_assign: openstack-cinder-volume-podman-0 allocation score on controller-1: 0
+pcmk__bundle_assign: openstack-cinder-volume-podman-0 allocation score on controller-2: 0
+pcmk__bundle_assign: ovn-dbs-bundle allocation score on controller-0: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle allocation score on controller-1: 0
+pcmk__bundle_assign: ovn-dbs-bundle allocation score on controller-2: 0
+pcmk__bundle_assign: ovn-dbs-bundle-0 allocation score on controller-0: INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-0 allocation score on controller-1: 0
+pcmk__bundle_assign: ovn-dbs-bundle-0 allocation score on controller-2: 0
+pcmk__bundle_assign: ovn-dbs-bundle-1 allocation score on controller-0: 0
+pcmk__bundle_assign: ovn-dbs-bundle-1 allocation score on controller-1: INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-1 allocation score on controller-2: 0
+pcmk__bundle_assign: ovn-dbs-bundle-2 allocation score on controller-0: 0
+pcmk__bundle_assign: ovn-dbs-bundle-2 allocation score on controller-1: 0
+pcmk__bundle_assign: ovn-dbs-bundle-2 allocation score on controller-2: INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on controller-0: 0
+pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on controller-1: 0
+pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on controller-2: 0
+pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-0: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-1: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-2: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-podman-0 allocation score on controller-0: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-podman-0 allocation score on controller-1: 0
+pcmk__bundle_assign: ovn-dbs-bundle-podman-0 allocation score on controller-2: 0
+pcmk__bundle_assign: ovn-dbs-bundle-podman-1 allocation score on controller-0: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-podman-1 allocation score on controller-1: INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-podman-1 allocation score on controller-2: 0
+pcmk__bundle_assign: ovn-dbs-bundle-podman-2 allocation score on controller-0: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-podman-2 allocation score on controller-1: 0
+pcmk__bundle_assign: ovn-dbs-bundle-podman-2 allocation score on controller-2: INFINITY
+pcmk__bundle_assign: ovndb_servers:0 allocation score on ovn-dbs-bundle-0: INFINITY
+pcmk__bundle_assign: ovndb_servers:1 allocation score on ovn-dbs-bundle-1: INFINITY
+pcmk__bundle_assign: ovndb_servers:2 allocation score on ovn-dbs-bundle-2: INFINITY
+pcmk__bundle_assign: rabbitmq-bundle allocation score on controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle allocation score on controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle allocation score on controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on controller-0: INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on controller-1: INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on controller-2: INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-podman-0 allocation score on controller-0: INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-podman-0 allocation score on controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-podman-0 allocation score on controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-podman-1 allocation score on controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-podman-1 allocation score on controller-1: INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-podman-1 allocation score on controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-podman-2 allocation score on controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-podman-2 allocation score on controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-podman-2 allocation score on controller-2: INFINITY
+pcmk__bundle_assign: rabbitmq:0 allocation score on rabbitmq-bundle-0: INFINITY
+pcmk__bundle_assign: rabbitmq:1 allocation score on rabbitmq-bundle-1: INFINITY
+pcmk__bundle_assign: rabbitmq:2 allocation score on rabbitmq-bundle-2: INFINITY
+pcmk__bundle_assign: redis-bundle allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle allocation score on controller-2: 0
+pcmk__bundle_assign: redis-bundle-0 allocation score on controller-0: INFINITY
+pcmk__bundle_assign: redis-bundle-0 allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle-0 allocation score on controller-2: 0
+pcmk__bundle_assign: redis-bundle-1 allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle-1 allocation score on controller-1: INFINITY
+pcmk__bundle_assign: redis-bundle-1 allocation score on controller-2: 0
+pcmk__bundle_assign: redis-bundle-2 allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle-2 allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle-2 allocation score on controller-2: INFINITY
+pcmk__bundle_assign: redis-bundle-master allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on controller-2: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-2: -INFINITY
+pcmk__bundle_assign: redis-bundle-podman-0 allocation score on controller-0: INFINITY
+pcmk__bundle_assign: redis-bundle-podman-0 allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle-podman-0 allocation score on controller-2: 0
+pcmk__bundle_assign: redis-bundle-podman-1 allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle-podman-1 allocation score on controller-1: INFINITY
+pcmk__bundle_assign: redis-bundle-podman-1 allocation score on controller-2: 0
+pcmk__bundle_assign: redis-bundle-podman-2 allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle-podman-2 allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle-podman-2 allocation score on controller-2: INFINITY
+pcmk__bundle_assign: redis:0 allocation score on redis-bundle-0: INFINITY
+pcmk__bundle_assign: redis:1 allocation score on redis-bundle-1: INFINITY
+pcmk__bundle_assign: redis:2 allocation score on redis-bundle-2: INFINITY
pcmk__clone_assign: galera-bundle-master allocation score on controller-0: -INFINITY
pcmk__clone_assign: galera-bundle-master allocation score on controller-1: -INFINITY
pcmk__clone_assign: galera-bundle-master allocation score on controller-2: -INFINITY
diff --git a/cts/scheduler/scores/node-pending-timeout.scores b/cts/scheduler/scores/node-pending-timeout.scores
new file mode 100644
index 0000000..90a7c8b
--- /dev/null
+++ b/cts/scheduler/scores/node-pending-timeout.scores
@@ -0,0 +1,3 @@
+
+pcmk__primitive_assign: st-sbd allocation score on node-1: 0
+pcmk__primitive_assign: st-sbd allocation score on node-2: 0
diff --git a/cts/scheduler/scores/notifs-for-unrunnable.scores b/cts/scheduler/scores/notifs-for-unrunnable.scores
index dd823a4..95d0f7b 100644
--- a/cts/scheduler/scores/notifs-for-unrunnable.scores
+++ b/cts/scheduler/scores/notifs-for-unrunnable.scores
@@ -2,120 +2,120 @@
galera:0 promotion score on galera-bundle-0: -1
galera:1 promotion score on galera-bundle-1: 100
galera:2 promotion score on galera-bundle-2: 100
-pcmk__bundle_allocate: galera-bundle allocation score on controller-0: 0
-pcmk__bundle_allocate: galera-bundle allocation score on controller-1: 0
-pcmk__bundle_allocate: galera-bundle allocation score on controller-2: 0
-pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-0: 0
-pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-1: INFINITY
-pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-2: 0
-pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-2: INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on controller-0: 0
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on controller-1: INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on controller-2: 0
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on controller-2: INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on controller-0: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on controller-1: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on controller-2: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-2: -INFINITY
-pcmk__bundle_allocate: galera:0 allocation score on galera-bundle-0: 500
-pcmk__bundle_allocate: galera:1 allocation score on galera-bundle-1: INFINITY
-pcmk__bundle_allocate: galera:2 allocation score on galera-bundle-2: INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on controller-0: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on controller-0: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on controller-1: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on controller-1: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on controller-2: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on controller-2: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-0: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-1: INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-1: INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-2: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-2: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-2: INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-2: INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-1: INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-2: INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on controller-1: INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on controller-2: INFINITY
-pcmk__bundle_allocate: rabbitmq:0 allocation score on rabbitmq-bundle-0: 500
-pcmk__bundle_allocate: rabbitmq:1 allocation score on rabbitmq-bundle-1: INFINITY
-pcmk__bundle_allocate: rabbitmq:2 allocation score on rabbitmq-bundle-2: INFINITY
-pcmk__bundle_allocate: redis-bundle allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle allocation score on controller-2: 0
-pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-1: INFINITY
-pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-2: 0
-pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-2: INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on controller-1: INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on controller-2: 0
-pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on controller-2: INFINITY
-pcmk__bundle_allocate: redis-bundle-master allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on controller-2: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-2: -INFINITY
-pcmk__bundle_allocate: redis:0 allocation score on redis-bundle-0: 500
-pcmk__bundle_allocate: redis:1 allocation score on redis-bundle-1: INFINITY
-pcmk__bundle_allocate: redis:2 allocation score on redis-bundle-2: INFINITY
+pcmk__bundle_assign: galera-bundle allocation score on controller-0: 0
+pcmk__bundle_assign: galera-bundle allocation score on controller-1: 0
+pcmk__bundle_assign: galera-bundle allocation score on controller-2: 0
+pcmk__bundle_assign: galera-bundle-0 allocation score on controller-0: 0
+pcmk__bundle_assign: galera-bundle-0 allocation score on controller-1: 0
+pcmk__bundle_assign: galera-bundle-0 allocation score on controller-2: 0
+pcmk__bundle_assign: galera-bundle-1 allocation score on controller-0: 0
+pcmk__bundle_assign: galera-bundle-1 allocation score on controller-1: INFINITY
+pcmk__bundle_assign: galera-bundle-1 allocation score on controller-2: 0
+pcmk__bundle_assign: galera-bundle-2 allocation score on controller-0: 0
+pcmk__bundle_assign: galera-bundle-2 allocation score on controller-1: 0
+pcmk__bundle_assign: galera-bundle-2 allocation score on controller-2: INFINITY
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on controller-0: 0
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on controller-1: 0
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on controller-2: 0
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on controller-0: 0
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on controller-1: INFINITY
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on controller-2: 0
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on controller-0: 0
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on controller-1: 0
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on controller-2: INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on controller-0: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on controller-1: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on controller-2: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-2: -INFINITY
+pcmk__bundle_assign: galera:0 allocation score on galera-bundle-0: 500
+pcmk__bundle_assign: galera:1 allocation score on galera-bundle-1: INFINITY
+pcmk__bundle_assign: galera:2 allocation score on galera-bundle-2: INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on controller-0: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on controller-0: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on controller-1: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on controller-1: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on controller-2: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on controller-2: 0
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-0: 0
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-1: 0
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-2: 0
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-0: 0
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-1: INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-1: INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-2: 0
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-2: INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-0: 0
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-1: 0
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-2: INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-2: INFINITY
+pcmk__bundle_assign: rabbitmq-bundle allocation score on controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle allocation score on controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle allocation score on controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on controller-1: INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on controller-2: INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on controller-1: INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on controller-2: INFINITY
+pcmk__bundle_assign: rabbitmq:0 allocation score on rabbitmq-bundle-0: 500
+pcmk__bundle_assign: rabbitmq:1 allocation score on rabbitmq-bundle-1: INFINITY
+pcmk__bundle_assign: rabbitmq:2 allocation score on rabbitmq-bundle-2: INFINITY
+pcmk__bundle_assign: redis-bundle allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle allocation score on controller-2: 0
+pcmk__bundle_assign: redis-bundle-0 allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle-0 allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle-0 allocation score on controller-2: 0
+pcmk__bundle_assign: redis-bundle-1 allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle-1 allocation score on controller-1: INFINITY
+pcmk__bundle_assign: redis-bundle-1 allocation score on controller-2: 0
+pcmk__bundle_assign: redis-bundle-2 allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle-2 allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle-2 allocation score on controller-2: INFINITY
+pcmk__bundle_assign: redis-bundle-docker-0 allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle-docker-0 allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle-docker-0 allocation score on controller-2: 0
+pcmk__bundle_assign: redis-bundle-docker-1 allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle-docker-1 allocation score on controller-1: INFINITY
+pcmk__bundle_assign: redis-bundle-docker-1 allocation score on controller-2: 0
+pcmk__bundle_assign: redis-bundle-docker-2 allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle-docker-2 allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle-docker-2 allocation score on controller-2: INFINITY
+pcmk__bundle_assign: redis-bundle-master allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on controller-2: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-2: -INFINITY
+pcmk__bundle_assign: redis:0 allocation score on redis-bundle-0: 500
+pcmk__bundle_assign: redis:1 allocation score on redis-bundle-1: INFINITY
+pcmk__bundle_assign: redis:2 allocation score on redis-bundle-2: INFINITY
pcmk__clone_assign: galera-bundle-master allocation score on controller-0: -INFINITY
pcmk__clone_assign: galera-bundle-master allocation score on controller-1: -INFINITY
pcmk__clone_assign: galera-bundle-master allocation score on controller-2: -INFINITY
@@ -169,7 +169,7 @@ pcmk__primitive_assign: haproxy-bundle-docker-0 allocation score on controller-1
pcmk__primitive_assign: haproxy-bundle-docker-0 allocation score on controller-2: -INFINITY
pcmk__primitive_assign: haproxy-bundle-docker-1 allocation score on controller-0: -INFINITY
pcmk__primitive_assign: haproxy-bundle-docker-1 allocation score on controller-1: INFINITY
-pcmk__primitive_assign: haproxy-bundle-docker-1 allocation score on controller-2: 0
+pcmk__primitive_assign: haproxy-bundle-docker-1 allocation score on controller-2: INFINITY
pcmk__primitive_assign: haproxy-bundle-docker-2 allocation score on controller-0: -INFINITY
pcmk__primitive_assign: haproxy-bundle-docker-2 allocation score on controller-1: -INFINITY
pcmk__primitive_assign: haproxy-bundle-docker-2 allocation score on controller-2: INFINITY
diff --git a/cts/scheduler/scores/notify-behind-stopping-remote.scores b/cts/scheduler/scores/notify-behind-stopping-remote.scores
index 015404a..17a5ca9 100644
--- a/cts/scheduler/scores/notify-behind-stopping-remote.scores
+++ b/cts/scheduler/scores/notify-behind-stopping-remote.scores
@@ -1,34 +1,34 @@
-pcmk__bundle_allocate: redis-bundle allocation score on ra1: 0
-pcmk__bundle_allocate: redis-bundle allocation score on ra2: -INFINITY
-pcmk__bundle_allocate: redis-bundle allocation score on ra3: 0
-pcmk__bundle_allocate: redis-bundle-0 allocation score on ra1: 0
-pcmk__bundle_allocate: redis-bundle-0 allocation score on ra2: 0
-pcmk__bundle_allocate: redis-bundle-0 allocation score on ra3: 0
-pcmk__bundle_allocate: redis-bundle-1 allocation score on ra1: 0
-pcmk__bundle_allocate: redis-bundle-1 allocation score on ra2: 0
-pcmk__bundle_allocate: redis-bundle-1 allocation score on ra3: 0
-pcmk__bundle_allocate: redis-bundle-2 allocation score on ra1: 0
-pcmk__bundle_allocate: redis-bundle-2 allocation score on ra2: 0
-pcmk__bundle_allocate: redis-bundle-2 allocation score on ra3: 0
-pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on ra1: 0
-pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on ra2: -INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on ra3: 0
-pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on ra1: 0
-pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on ra2: -INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on ra3: 0
-pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on ra1: 0
-pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on ra2: -INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on ra3: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on ra1: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on ra2: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on ra3: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-2: -INFINITY
-pcmk__bundle_allocate: redis:0 allocation score on redis-bundle-0: 501
-pcmk__bundle_allocate: redis:1 allocation score on redis-bundle-1: 500
-pcmk__bundle_allocate: redis:2 allocation score on redis-bundle-2: 501
+pcmk__bundle_assign: redis-bundle allocation score on ra1: 0
+pcmk__bundle_assign: redis-bundle allocation score on ra2: -INFINITY
+pcmk__bundle_assign: redis-bundle allocation score on ra3: 0
+pcmk__bundle_assign: redis-bundle-0 allocation score on ra1: 0
+pcmk__bundle_assign: redis-bundle-0 allocation score on ra2: 0
+pcmk__bundle_assign: redis-bundle-0 allocation score on ra3: 0
+pcmk__bundle_assign: redis-bundle-1 allocation score on ra1: 0
+pcmk__bundle_assign: redis-bundle-1 allocation score on ra2: 0
+pcmk__bundle_assign: redis-bundle-1 allocation score on ra3: 0
+pcmk__bundle_assign: redis-bundle-2 allocation score on ra1: 0
+pcmk__bundle_assign: redis-bundle-2 allocation score on ra2: 0
+pcmk__bundle_assign: redis-bundle-2 allocation score on ra3: 0
+pcmk__bundle_assign: redis-bundle-docker-0 allocation score on ra1: 0
+pcmk__bundle_assign: redis-bundle-docker-0 allocation score on ra2: -INFINITY
+pcmk__bundle_assign: redis-bundle-docker-0 allocation score on ra3: 0
+pcmk__bundle_assign: redis-bundle-docker-1 allocation score on ra1: 0
+pcmk__bundle_assign: redis-bundle-docker-1 allocation score on ra2: -INFINITY
+pcmk__bundle_assign: redis-bundle-docker-1 allocation score on ra3: 0
+pcmk__bundle_assign: redis-bundle-docker-2 allocation score on ra1: 0
+pcmk__bundle_assign: redis-bundle-docker-2 allocation score on ra2: -INFINITY
+pcmk__bundle_assign: redis-bundle-docker-2 allocation score on ra3: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on ra1: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on ra2: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on ra3: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-2: -INFINITY
+pcmk__bundle_assign: redis:0 allocation score on redis-bundle-0: 501
+pcmk__bundle_assign: redis:1 allocation score on redis-bundle-1: 500
+pcmk__bundle_assign: redis:2 allocation score on redis-bundle-2: 501
pcmk__clone_assign: redis-bundle-master allocation score on ra1: -INFINITY
pcmk__clone_assign: redis-bundle-master allocation score on ra2: -INFINITY
pcmk__clone_assign: redis-bundle-master allocation score on ra3: -INFINITY
diff --git a/cts/scheduler/scores/novell-239087.scores b/cts/scheduler/scores/novell-239087.scores
index 7da4f35..34dbc1d 100644
--- a/cts/scheduler/scores/novell-239087.scores
+++ b/cts/scheduler/scores/novell-239087.scores
@@ -9,7 +9,7 @@ pcmk__clone_assign: ms-drbd0 allocation score on xen-1: 100
pcmk__clone_assign: ms-drbd0 allocation score on xen-2: 0
pcmk__primitive_assign: drbd0:0 allocation score on xen-1: -INFINITY
pcmk__primitive_assign: drbd0:0 allocation score on xen-2: 110
-pcmk__primitive_assign: drbd0:1 allocation score on xen-1: 110
+pcmk__primitive_assign: drbd0:1 allocation score on xen-1: 210
pcmk__primitive_assign: drbd0:1 allocation score on xen-2: 0
-pcmk__primitive_assign: fs_1 allocation score on xen-1: 210
+pcmk__primitive_assign: fs_1 allocation score on xen-1: 310
pcmk__primitive_assign: fs_1 allocation score on xen-2: -INFINITY
diff --git a/cts/scheduler/scores/on_fail_demote1.scores b/cts/scheduler/scores/on_fail_demote1.scores
index 8810211..2a22478 100644
--- a/cts/scheduler/scores/on_fail_demote1.scores
+++ b/cts/scheduler/scores/on_fail_demote1.scores
@@ -4,89 +4,89 @@ bundled:1 promotion score on stateful-bundle-1: 5
bundled:2 promotion score on stateful-bundle-2: 5
lxc-ms:0 promotion score on lxc2: INFINITY
lxc-ms:1 promotion score on lxc1: INFINITY
-pcmk__bundle_allocate: bundled:0 allocation score on stateful-bundle-0: 501
-pcmk__bundle_allocate: bundled:1 allocation score on stateful-bundle-1: 501
-pcmk__bundle_allocate: bundled:2 allocation score on stateful-bundle-2: 501
-pcmk__bundle_allocate: stateful-bundle allocation score on lxc1: 0
-pcmk__bundle_allocate: stateful-bundle allocation score on lxc2: 0
-pcmk__bundle_allocate: stateful-bundle allocation score on remote-rhel7-2: 0
-pcmk__bundle_allocate: stateful-bundle allocation score on rhel7-1: 0
-pcmk__bundle_allocate: stateful-bundle allocation score on rhel7-3: 0
-pcmk__bundle_allocate: stateful-bundle allocation score on rhel7-4: 0
-pcmk__bundle_allocate: stateful-bundle allocation score on rhel7-5: 0
-pcmk__bundle_allocate: stateful-bundle-0 allocation score on lxc1: -INFINITY
-pcmk__bundle_allocate: stateful-bundle-0 allocation score on lxc2: -INFINITY
-pcmk__bundle_allocate: stateful-bundle-0 allocation score on remote-rhel7-2: -INFINITY
-pcmk__bundle_allocate: stateful-bundle-0 allocation score on rhel7-1: 0
-pcmk__bundle_allocate: stateful-bundle-0 allocation score on rhel7-3: 0
-pcmk__bundle_allocate: stateful-bundle-0 allocation score on rhel7-4: 0
-pcmk__bundle_allocate: stateful-bundle-0 allocation score on rhel7-5: 0
-pcmk__bundle_allocate: stateful-bundle-1 allocation score on lxc1: -INFINITY
-pcmk__bundle_allocate: stateful-bundle-1 allocation score on lxc2: -INFINITY
-pcmk__bundle_allocate: stateful-bundle-1 allocation score on remote-rhel7-2: -INFINITY
-pcmk__bundle_allocate: stateful-bundle-1 allocation score on rhel7-1: 0
-pcmk__bundle_allocate: stateful-bundle-1 allocation score on rhel7-3: 0
-pcmk__bundle_allocate: stateful-bundle-1 allocation score on rhel7-4: 0
-pcmk__bundle_allocate: stateful-bundle-1 allocation score on rhel7-5: 0
-pcmk__bundle_allocate: stateful-bundle-2 allocation score on lxc1: -INFINITY
-pcmk__bundle_allocate: stateful-bundle-2 allocation score on lxc2: -INFINITY
-pcmk__bundle_allocate: stateful-bundle-2 allocation score on remote-rhel7-2: -INFINITY
-pcmk__bundle_allocate: stateful-bundle-2 allocation score on rhel7-1: 0
-pcmk__bundle_allocate: stateful-bundle-2 allocation score on rhel7-3: 0
-pcmk__bundle_allocate: stateful-bundle-2 allocation score on rhel7-4: 0
-pcmk__bundle_allocate: stateful-bundle-2 allocation score on rhel7-5: 0
-pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on lxc1: 0
-pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on lxc2: 0
-pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on remote-rhel7-2: 0
-pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on rhel7-1: 0
-pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on rhel7-3: 0
-pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on rhel7-4: 0
-pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on rhel7-5: 0
-pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on lxc1: 0
-pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on lxc2: 0
-pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on remote-rhel7-2: 0
-pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on rhel7-1: 0
-pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on rhel7-3: 0
-pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on rhel7-4: 0
-pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on rhel7-5: 0
-pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on lxc1: 0
-pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on lxc2: 0
-pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on remote-rhel7-2: 0
-pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on rhel7-1: 0
-pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on rhel7-3: 0
-pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on rhel7-4: 0
-pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on rhel7-5: 0
-pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on lxc1: 0
-pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on lxc2: 0
-pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on remote-rhel7-2: 0
-pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-1: 0
-pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-3: 0
-pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-4: 0
-pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-5: 0
-pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on lxc1: 0
-pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on lxc2: 0
-pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on remote-rhel7-2: 0
-pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-1: 0
-pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-3: 0
-pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-4: 0
-pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-5: 0
-pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on lxc1: 0
-pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on lxc2: 0
-pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on remote-rhel7-2: 0
-pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-1: 0
-pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-3: 0
-pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-4: 0
-pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-5: 0
-pcmk__bundle_allocate: stateful-bundle-master allocation score on lxc1: 0
-pcmk__bundle_allocate: stateful-bundle-master allocation score on lxc2: 0
-pcmk__bundle_allocate: stateful-bundle-master allocation score on remote-rhel7-2: 0
-pcmk__bundle_allocate: stateful-bundle-master allocation score on rhel7-1: 0
-pcmk__bundle_allocate: stateful-bundle-master allocation score on rhel7-3: 0
-pcmk__bundle_allocate: stateful-bundle-master allocation score on rhel7-4: 0
-pcmk__bundle_allocate: stateful-bundle-master allocation score on rhel7-5: 0
-pcmk__bundle_allocate: stateful-bundle-master allocation score on stateful-bundle-0: -INFINITY
-pcmk__bundle_allocate: stateful-bundle-master allocation score on stateful-bundle-1: -INFINITY
-pcmk__bundle_allocate: stateful-bundle-master allocation score on stateful-bundle-2: -INFINITY
+pcmk__bundle_assign: bundled:0 allocation score on stateful-bundle-0: 501
+pcmk__bundle_assign: bundled:1 allocation score on stateful-bundle-1: 501
+pcmk__bundle_assign: bundled:2 allocation score on stateful-bundle-2: 501
+pcmk__bundle_assign: stateful-bundle allocation score on lxc1: 0
+pcmk__bundle_assign: stateful-bundle allocation score on lxc2: 0
+pcmk__bundle_assign: stateful-bundle allocation score on remote-rhel7-2: 0
+pcmk__bundle_assign: stateful-bundle allocation score on rhel7-1: 0
+pcmk__bundle_assign: stateful-bundle allocation score on rhel7-3: 0
+pcmk__bundle_assign: stateful-bundle allocation score on rhel7-4: 0
+pcmk__bundle_assign: stateful-bundle allocation score on rhel7-5: 0
+pcmk__bundle_assign: stateful-bundle-0 allocation score on lxc1: -INFINITY
+pcmk__bundle_assign: stateful-bundle-0 allocation score on lxc2: -INFINITY
+pcmk__bundle_assign: stateful-bundle-0 allocation score on remote-rhel7-2: -INFINITY
+pcmk__bundle_assign: stateful-bundle-0 allocation score on rhel7-1: 0
+pcmk__bundle_assign: stateful-bundle-0 allocation score on rhel7-3: 0
+pcmk__bundle_assign: stateful-bundle-0 allocation score on rhel7-4: 0
+pcmk__bundle_assign: stateful-bundle-0 allocation score on rhel7-5: 0
+pcmk__bundle_assign: stateful-bundle-1 allocation score on lxc1: -INFINITY
+pcmk__bundle_assign: stateful-bundle-1 allocation score on lxc2: -INFINITY
+pcmk__bundle_assign: stateful-bundle-1 allocation score on remote-rhel7-2: -INFINITY
+pcmk__bundle_assign: stateful-bundle-1 allocation score on rhel7-1: 0
+pcmk__bundle_assign: stateful-bundle-1 allocation score on rhel7-3: 0
+pcmk__bundle_assign: stateful-bundle-1 allocation score on rhel7-4: 0
+pcmk__bundle_assign: stateful-bundle-1 allocation score on rhel7-5: 0
+pcmk__bundle_assign: stateful-bundle-2 allocation score on lxc1: -INFINITY
+pcmk__bundle_assign: stateful-bundle-2 allocation score on lxc2: -INFINITY
+pcmk__bundle_assign: stateful-bundle-2 allocation score on remote-rhel7-2: -INFINITY
+pcmk__bundle_assign: stateful-bundle-2 allocation score on rhel7-1: 0
+pcmk__bundle_assign: stateful-bundle-2 allocation score on rhel7-3: 0
+pcmk__bundle_assign: stateful-bundle-2 allocation score on rhel7-4: 0
+pcmk__bundle_assign: stateful-bundle-2 allocation score on rhel7-5: 0
+pcmk__bundle_assign: stateful-bundle-docker-0 allocation score on lxc1: 0
+pcmk__bundle_assign: stateful-bundle-docker-0 allocation score on lxc2: 0
+pcmk__bundle_assign: stateful-bundle-docker-0 allocation score on remote-rhel7-2: 0
+pcmk__bundle_assign: stateful-bundle-docker-0 allocation score on rhel7-1: 0
+pcmk__bundle_assign: stateful-bundle-docker-0 allocation score on rhel7-3: 0
+pcmk__bundle_assign: stateful-bundle-docker-0 allocation score on rhel7-4: 0
+pcmk__bundle_assign: stateful-bundle-docker-0 allocation score on rhel7-5: 0
+pcmk__bundle_assign: stateful-bundle-docker-1 allocation score on lxc1: 0
+pcmk__bundle_assign: stateful-bundle-docker-1 allocation score on lxc2: 0
+pcmk__bundle_assign: stateful-bundle-docker-1 allocation score on remote-rhel7-2: 0
+pcmk__bundle_assign: stateful-bundle-docker-1 allocation score on rhel7-1: 0
+pcmk__bundle_assign: stateful-bundle-docker-1 allocation score on rhel7-3: 0
+pcmk__bundle_assign: stateful-bundle-docker-1 allocation score on rhel7-4: 0
+pcmk__bundle_assign: stateful-bundle-docker-1 allocation score on rhel7-5: 0
+pcmk__bundle_assign: stateful-bundle-docker-2 allocation score on lxc1: 0
+pcmk__bundle_assign: stateful-bundle-docker-2 allocation score on lxc2: 0
+pcmk__bundle_assign: stateful-bundle-docker-2 allocation score on remote-rhel7-2: 0
+pcmk__bundle_assign: stateful-bundle-docker-2 allocation score on rhel7-1: 0
+pcmk__bundle_assign: stateful-bundle-docker-2 allocation score on rhel7-3: 0
+pcmk__bundle_assign: stateful-bundle-docker-2 allocation score on rhel7-4: 0
+pcmk__bundle_assign: stateful-bundle-docker-2 allocation score on rhel7-5: 0
+pcmk__bundle_assign: stateful-bundle-ip-192.168.122.131 allocation score on lxc1: 0
+pcmk__bundle_assign: stateful-bundle-ip-192.168.122.131 allocation score on lxc2: 0
+pcmk__bundle_assign: stateful-bundle-ip-192.168.122.131 allocation score on remote-rhel7-2: 0
+pcmk__bundle_assign: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-1: 0
+pcmk__bundle_assign: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-3: 0
+pcmk__bundle_assign: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-4: 0
+pcmk__bundle_assign: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-5: 0
+pcmk__bundle_assign: stateful-bundle-ip-192.168.122.132 allocation score on lxc1: 0
+pcmk__bundle_assign: stateful-bundle-ip-192.168.122.132 allocation score on lxc2: 0
+pcmk__bundle_assign: stateful-bundle-ip-192.168.122.132 allocation score on remote-rhel7-2: 0
+pcmk__bundle_assign: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-1: 0
+pcmk__bundle_assign: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-3: 0
+pcmk__bundle_assign: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-4: 0
+pcmk__bundle_assign: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-5: 0
+pcmk__bundle_assign: stateful-bundle-ip-192.168.122.133 allocation score on lxc1: 0
+pcmk__bundle_assign: stateful-bundle-ip-192.168.122.133 allocation score on lxc2: 0
+pcmk__bundle_assign: stateful-bundle-ip-192.168.122.133 allocation score on remote-rhel7-2: 0
+pcmk__bundle_assign: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-1: 0
+pcmk__bundle_assign: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-3: 0
+pcmk__bundle_assign: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-4: 0
+pcmk__bundle_assign: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-5: 0
+pcmk__bundle_assign: stateful-bundle-master allocation score on lxc1: 0
+pcmk__bundle_assign: stateful-bundle-master allocation score on lxc2: 0
+pcmk__bundle_assign: stateful-bundle-master allocation score on remote-rhel7-2: 0
+pcmk__bundle_assign: stateful-bundle-master allocation score on rhel7-1: 0
+pcmk__bundle_assign: stateful-bundle-master allocation score on rhel7-3: 0
+pcmk__bundle_assign: stateful-bundle-master allocation score on rhel7-4: 0
+pcmk__bundle_assign: stateful-bundle-master allocation score on rhel7-5: 0
+pcmk__bundle_assign: stateful-bundle-master allocation score on stateful-bundle-0: -INFINITY
+pcmk__bundle_assign: stateful-bundle-master allocation score on stateful-bundle-1: -INFINITY
+pcmk__bundle_assign: stateful-bundle-master allocation score on stateful-bundle-2: -INFINITY
pcmk__clone_assign: bundled:0 allocation score on stateful-bundle-0: INFINITY
pcmk__clone_assign: bundled:1 allocation score on stateful-bundle-1: INFINITY
pcmk__clone_assign: bundled:2 allocation score on stateful-bundle-2: INFINITY
diff --git a/cts/scheduler/scores/on_fail_demote4.scores b/cts/scheduler/scores/on_fail_demote4.scores
index cff13e7..b4896e1 100644
--- a/cts/scheduler/scores/on_fail_demote4.scores
+++ b/cts/scheduler/scores/on_fail_demote4.scores
@@ -4,89 +4,89 @@ bundled:1 promotion score on stateful-bundle-1: 5
bundled:2 promotion score on stateful-bundle-2: 5
lxc-ms:0 promotion score on lxc2: INFINITY
lxc-ms:1 promotion score on lxc1: INFINITY
-pcmk__bundle_allocate: bundled:0 allocation score on stateful-bundle-0: 501
-pcmk__bundle_allocate: bundled:1 allocation score on stateful-bundle-1: 501
-pcmk__bundle_allocate: bundled:2 allocation score on stateful-bundle-2: 501
-pcmk__bundle_allocate: stateful-bundle allocation score on lxc1: 0
-pcmk__bundle_allocate: stateful-bundle allocation score on lxc2: 0
-pcmk__bundle_allocate: stateful-bundle allocation score on remote-rhel7-2: 0
-pcmk__bundle_allocate: stateful-bundle allocation score on rhel7-1: 0
-pcmk__bundle_allocate: stateful-bundle allocation score on rhel7-3: 0
-pcmk__bundle_allocate: stateful-bundle allocation score on rhel7-4: 0
-pcmk__bundle_allocate: stateful-bundle allocation score on rhel7-5: 0
-pcmk__bundle_allocate: stateful-bundle-0 allocation score on lxc1: -INFINITY
-pcmk__bundle_allocate: stateful-bundle-0 allocation score on lxc2: -INFINITY
-pcmk__bundle_allocate: stateful-bundle-0 allocation score on remote-rhel7-2: -INFINITY
-pcmk__bundle_allocate: stateful-bundle-0 allocation score on rhel7-1: 0
-pcmk__bundle_allocate: stateful-bundle-0 allocation score on rhel7-3: 0
-pcmk__bundle_allocate: stateful-bundle-0 allocation score on rhel7-4: 0
-pcmk__bundle_allocate: stateful-bundle-0 allocation score on rhel7-5: 0
-pcmk__bundle_allocate: stateful-bundle-1 allocation score on lxc1: -INFINITY
-pcmk__bundle_allocate: stateful-bundle-1 allocation score on lxc2: -INFINITY
-pcmk__bundle_allocate: stateful-bundle-1 allocation score on remote-rhel7-2: -INFINITY
-pcmk__bundle_allocate: stateful-bundle-1 allocation score on rhel7-1: 0
-pcmk__bundle_allocate: stateful-bundle-1 allocation score on rhel7-3: 0
-pcmk__bundle_allocate: stateful-bundle-1 allocation score on rhel7-4: 0
-pcmk__bundle_allocate: stateful-bundle-1 allocation score on rhel7-5: 0
-pcmk__bundle_allocate: stateful-bundle-2 allocation score on lxc1: -INFINITY
-pcmk__bundle_allocate: stateful-bundle-2 allocation score on lxc2: -INFINITY
-pcmk__bundle_allocate: stateful-bundle-2 allocation score on remote-rhel7-2: -INFINITY
-pcmk__bundle_allocate: stateful-bundle-2 allocation score on rhel7-1: 0
-pcmk__bundle_allocate: stateful-bundle-2 allocation score on rhel7-3: 0
-pcmk__bundle_allocate: stateful-bundle-2 allocation score on rhel7-4: 0
-pcmk__bundle_allocate: stateful-bundle-2 allocation score on rhel7-5: 0
-pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on lxc1: 0
-pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on lxc2: 0
-pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on remote-rhel7-2: 0
-pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on rhel7-1: 0
-pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on rhel7-3: 0
-pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on rhel7-4: 0
-pcmk__bundle_allocate: stateful-bundle-docker-0 allocation score on rhel7-5: 0
-pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on lxc1: 0
-pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on lxc2: 0
-pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on remote-rhel7-2: 0
-pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on rhel7-1: 0
-pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on rhel7-3: 0
-pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on rhel7-4: 0
-pcmk__bundle_allocate: stateful-bundle-docker-1 allocation score on rhel7-5: 0
-pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on lxc1: 0
-pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on lxc2: 0
-pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on remote-rhel7-2: 0
-pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on rhel7-1: 0
-pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on rhel7-3: 0
-pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on rhel7-4: 0
-pcmk__bundle_allocate: stateful-bundle-docker-2 allocation score on rhel7-5: 0
-pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on lxc1: 0
-pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on lxc2: 0
-pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on remote-rhel7-2: 0
-pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-1: 0
-pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-3: 0
-pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-4: 0
-pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-5: 0
-pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on lxc1: 0
-pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on lxc2: 0
-pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on remote-rhel7-2: 0
-pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-1: 0
-pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-3: 0
-pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-4: 0
-pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-5: 0
-pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on lxc1: 0
-pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on lxc2: 0
-pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on remote-rhel7-2: 0
-pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-1: 0
-pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-3: 0
-pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-4: 0
-pcmk__bundle_allocate: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-5: 0
-pcmk__bundle_allocate: stateful-bundle-master allocation score on lxc1: 0
-pcmk__bundle_allocate: stateful-bundle-master allocation score on lxc2: 0
-pcmk__bundle_allocate: stateful-bundle-master allocation score on remote-rhel7-2: 0
-pcmk__bundle_allocate: stateful-bundle-master allocation score on rhel7-1: 0
-pcmk__bundle_allocate: stateful-bundle-master allocation score on rhel7-3: 0
-pcmk__bundle_allocate: stateful-bundle-master allocation score on rhel7-4: 0
-pcmk__bundle_allocate: stateful-bundle-master allocation score on rhel7-5: 0
-pcmk__bundle_allocate: stateful-bundle-master allocation score on stateful-bundle-0: -INFINITY
-pcmk__bundle_allocate: stateful-bundle-master allocation score on stateful-bundle-1: -INFINITY
-pcmk__bundle_allocate: stateful-bundle-master allocation score on stateful-bundle-2: -INFINITY
+pcmk__bundle_assign: bundled:0 allocation score on stateful-bundle-0: 501
+pcmk__bundle_assign: bundled:1 allocation score on stateful-bundle-1: 501
+pcmk__bundle_assign: bundled:2 allocation score on stateful-bundle-2: 501
+pcmk__bundle_assign: stateful-bundle allocation score on lxc1: 0
+pcmk__bundle_assign: stateful-bundle allocation score on lxc2: 0
+pcmk__bundle_assign: stateful-bundle allocation score on remote-rhel7-2: 0
+pcmk__bundle_assign: stateful-bundle allocation score on rhel7-1: 0
+pcmk__bundle_assign: stateful-bundle allocation score on rhel7-3: 0
+pcmk__bundle_assign: stateful-bundle allocation score on rhel7-4: 0
+pcmk__bundle_assign: stateful-bundle allocation score on rhel7-5: 0
+pcmk__bundle_assign: stateful-bundle-0 allocation score on lxc1: -INFINITY
+pcmk__bundle_assign: stateful-bundle-0 allocation score on lxc2: -INFINITY
+pcmk__bundle_assign: stateful-bundle-0 allocation score on remote-rhel7-2: -INFINITY
+pcmk__bundle_assign: stateful-bundle-0 allocation score on rhel7-1: 0
+pcmk__bundle_assign: stateful-bundle-0 allocation score on rhel7-3: 0
+pcmk__bundle_assign: stateful-bundle-0 allocation score on rhel7-4: 0
+pcmk__bundle_assign: stateful-bundle-0 allocation score on rhel7-5: 0
+pcmk__bundle_assign: stateful-bundle-1 allocation score on lxc1: -INFINITY
+pcmk__bundle_assign: stateful-bundle-1 allocation score on lxc2: -INFINITY
+pcmk__bundle_assign: stateful-bundle-1 allocation score on remote-rhel7-2: -INFINITY
+pcmk__bundle_assign: stateful-bundle-1 allocation score on rhel7-1: 0
+pcmk__bundle_assign: stateful-bundle-1 allocation score on rhel7-3: 0
+pcmk__bundle_assign: stateful-bundle-1 allocation score on rhel7-4: 0
+pcmk__bundle_assign: stateful-bundle-1 allocation score on rhel7-5: 0
+pcmk__bundle_assign: stateful-bundle-2 allocation score on lxc1: -INFINITY
+pcmk__bundle_assign: stateful-bundle-2 allocation score on lxc2: -INFINITY
+pcmk__bundle_assign: stateful-bundle-2 allocation score on remote-rhel7-2: -INFINITY
+pcmk__bundle_assign: stateful-bundle-2 allocation score on rhel7-1: 0
+pcmk__bundle_assign: stateful-bundle-2 allocation score on rhel7-3: 0
+pcmk__bundle_assign: stateful-bundle-2 allocation score on rhel7-4: 0
+pcmk__bundle_assign: stateful-bundle-2 allocation score on rhel7-5: 0
+pcmk__bundle_assign: stateful-bundle-docker-0 allocation score on lxc1: 0
+pcmk__bundle_assign: stateful-bundle-docker-0 allocation score on lxc2: 0
+pcmk__bundle_assign: stateful-bundle-docker-0 allocation score on remote-rhel7-2: 0
+pcmk__bundle_assign: stateful-bundle-docker-0 allocation score on rhel7-1: 0
+pcmk__bundle_assign: stateful-bundle-docker-0 allocation score on rhel7-3: 0
+pcmk__bundle_assign: stateful-bundle-docker-0 allocation score on rhel7-4: 0
+pcmk__bundle_assign: stateful-bundle-docker-0 allocation score on rhel7-5: 0
+pcmk__bundle_assign: stateful-bundle-docker-1 allocation score on lxc1: 0
+pcmk__bundle_assign: stateful-bundle-docker-1 allocation score on lxc2: 0
+pcmk__bundle_assign: stateful-bundle-docker-1 allocation score on remote-rhel7-2: 0
+pcmk__bundle_assign: stateful-bundle-docker-1 allocation score on rhel7-1: 0
+pcmk__bundle_assign: stateful-bundle-docker-1 allocation score on rhel7-3: 0
+pcmk__bundle_assign: stateful-bundle-docker-1 allocation score on rhel7-4: 0
+pcmk__bundle_assign: stateful-bundle-docker-1 allocation score on rhel7-5: 0
+pcmk__bundle_assign: stateful-bundle-docker-2 allocation score on lxc1: 0
+pcmk__bundle_assign: stateful-bundle-docker-2 allocation score on lxc2: 0
+pcmk__bundle_assign: stateful-bundle-docker-2 allocation score on remote-rhel7-2: 0
+pcmk__bundle_assign: stateful-bundle-docker-2 allocation score on rhel7-1: 0
+pcmk__bundle_assign: stateful-bundle-docker-2 allocation score on rhel7-3: 0
+pcmk__bundle_assign: stateful-bundle-docker-2 allocation score on rhel7-4: 0
+pcmk__bundle_assign: stateful-bundle-docker-2 allocation score on rhel7-5: 0
+pcmk__bundle_assign: stateful-bundle-ip-192.168.122.131 allocation score on lxc1: 0
+pcmk__bundle_assign: stateful-bundle-ip-192.168.122.131 allocation score on lxc2: 0
+pcmk__bundle_assign: stateful-bundle-ip-192.168.122.131 allocation score on remote-rhel7-2: 0
+pcmk__bundle_assign: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-1: 0
+pcmk__bundle_assign: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-3: 0
+pcmk__bundle_assign: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-4: 0
+pcmk__bundle_assign: stateful-bundle-ip-192.168.122.131 allocation score on rhel7-5: 0
+pcmk__bundle_assign: stateful-bundle-ip-192.168.122.132 allocation score on lxc1: 0
+pcmk__bundle_assign: stateful-bundle-ip-192.168.122.132 allocation score on lxc2: 0
+pcmk__bundle_assign: stateful-bundle-ip-192.168.122.132 allocation score on remote-rhel7-2: 0
+pcmk__bundle_assign: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-1: 0
+pcmk__bundle_assign: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-3: 0
+pcmk__bundle_assign: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-4: 0
+pcmk__bundle_assign: stateful-bundle-ip-192.168.122.132 allocation score on rhel7-5: 0
+pcmk__bundle_assign: stateful-bundle-ip-192.168.122.133 allocation score on lxc1: 0
+pcmk__bundle_assign: stateful-bundle-ip-192.168.122.133 allocation score on lxc2: 0
+pcmk__bundle_assign: stateful-bundle-ip-192.168.122.133 allocation score on remote-rhel7-2: 0
+pcmk__bundle_assign: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-1: 0
+pcmk__bundle_assign: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-3: 0
+pcmk__bundle_assign: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-4: 0
+pcmk__bundle_assign: stateful-bundle-ip-192.168.122.133 allocation score on rhel7-5: 0
+pcmk__bundle_assign: stateful-bundle-master allocation score on lxc1: 0
+pcmk__bundle_assign: stateful-bundle-master allocation score on lxc2: 0
+pcmk__bundle_assign: stateful-bundle-master allocation score on remote-rhel7-2: 0
+pcmk__bundle_assign: stateful-bundle-master allocation score on rhel7-1: 0
+pcmk__bundle_assign: stateful-bundle-master allocation score on rhel7-3: 0
+pcmk__bundle_assign: stateful-bundle-master allocation score on rhel7-4: 0
+pcmk__bundle_assign: stateful-bundle-master allocation score on rhel7-5: 0
+pcmk__bundle_assign: stateful-bundle-master allocation score on stateful-bundle-0: -INFINITY
+pcmk__bundle_assign: stateful-bundle-master allocation score on stateful-bundle-1: -INFINITY
+pcmk__bundle_assign: stateful-bundle-master allocation score on stateful-bundle-2: -INFINITY
pcmk__clone_assign: bundled:0 allocation score on stateful-bundle-0: INFINITY
pcmk__clone_assign: bundled:1 allocation score on stateful-bundle-1: INFINITY
pcmk__clone_assign: bundled:2 allocation score on stateful-bundle-2: INFINITY
diff --git a/cts/scheduler/scores/order-expired-failure.scores b/cts/scheduler/scores/order-expired-failure.scores
index 1605ec0..a2fe598 100644
--- a/cts/scheduler/scores/order-expired-failure.scores
+++ b/cts/scheduler/scores/order-expired-failure.scores
@@ -2,194 +2,194 @@
galera:0 promotion score on galera-bundle-0: 100
galera:1 promotion score on galera-bundle-1: 100
galera:2 promotion score on galera-bundle-2: 100
-pcmk__bundle_allocate: galera-bundle allocation score on controller-0: 0
-pcmk__bundle_allocate: galera-bundle allocation score on controller-1: 0
-pcmk__bundle_allocate: galera-bundle allocation score on controller-2: 0
-pcmk__bundle_allocate: galera-bundle allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle allocation score on overcloud-novacompute-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-0: 0
-pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: galera-bundle-0 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-0 allocation score on overcloud-novacompute-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-1: 0
-pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-2: 0
-pcmk__bundle_allocate: galera-bundle-1 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-1 allocation score on overcloud-novacompute-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-2: 0
-pcmk__bundle_allocate: galera-bundle-2 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-2 allocation score on overcloud-novacompute-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on controller-0: 0
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on overcloud-novacompute-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on controller-1: 0
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on controller-2: 0
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on overcloud-novacompute-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on controller-2: 0
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on overcloud-novacompute-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on controller-0: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on controller-1: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on controller-2: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-2: -INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on overcloud-novacompute-0: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on overcloud-novacompute-1: 0
-pcmk__bundle_allocate: galera:0 allocation score on galera-bundle-0: 501
-pcmk__bundle_allocate: galera:1 allocation score on galera-bundle-1: 501
-pcmk__bundle_allocate: galera:2 allocation score on galera-bundle-2: 501
-pcmk__bundle_allocate: haproxy-bundle allocation score on controller-0: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on controller-0: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on controller-1: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on controller-1: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on controller-2: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on controller-2: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on overcloud-novacompute-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on overcloud-novacompute-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-0: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-0: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on overcloud-novacompute-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on overcloud-novacompute-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-1: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-1: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-2: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on overcloud-novacompute-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on overcloud-novacompute-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-2: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on overcloud-novacompute-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on overcloud-novacompute-1: -INFINITY
-pcmk__bundle_allocate: openstack-cinder-volume allocation score on controller-0: 0
-pcmk__bundle_allocate: openstack-cinder-volume allocation score on controller-1: 0
-pcmk__bundle_allocate: openstack-cinder-volume allocation score on controller-2: 0
-pcmk__bundle_allocate: openstack-cinder-volume allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: openstack-cinder-volume allocation score on overcloud-novacompute-1: -INFINITY
-pcmk__bundle_allocate: openstack-cinder-volume-docker-0 allocation score on controller-0: 0
-pcmk__bundle_allocate: openstack-cinder-volume-docker-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: openstack-cinder-volume-docker-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: openstack-cinder-volume-docker-0 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: openstack-cinder-volume-docker-0 allocation score on overcloud-novacompute-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on overcloud-novacompute-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on overcloud-novacompute-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on overcloud-novacompute-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on overcloud-novacompute-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on overcloud-novacompute-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on overcloud-novacompute-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on overcloud-novacompute-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on overcloud-novacompute-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on overcloud-novacompute-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq:0 allocation score on rabbitmq-bundle-0: 501
-pcmk__bundle_allocate: rabbitmq:1 allocation score on rabbitmq-bundle-1: 501
-pcmk__bundle_allocate: rabbitmq:2 allocation score on rabbitmq-bundle-2: 501
-pcmk__bundle_allocate: redis-bundle allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle allocation score on controller-2: 0
-pcmk__bundle_allocate: redis-bundle allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle allocation score on overcloud-novacompute-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: redis-bundle-0 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-0 allocation score on overcloud-novacompute-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-2: 0
-pcmk__bundle_allocate: redis-bundle-1 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-1 allocation score on overcloud-novacompute-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-2: 0
-pcmk__bundle_allocate: redis-bundle-2 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-2 allocation score on overcloud-novacompute-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on overcloud-novacompute-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on controller-2: 0
-pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on overcloud-novacompute-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on controller-2: 0
-pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on overcloud-novacompute-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-master allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on controller-2: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on overcloud-novacompute-0: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on overcloud-novacompute-1: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-2: -INFINITY
-pcmk__bundle_allocate: redis:0 allocation score on redis-bundle-0: 501
-pcmk__bundle_allocate: redis:1 allocation score on redis-bundle-1: 501
-pcmk__bundle_allocate: redis:2 allocation score on redis-bundle-2: 501
+pcmk__bundle_assign: galera-bundle allocation score on controller-0: 0
+pcmk__bundle_assign: galera-bundle allocation score on controller-1: 0
+pcmk__bundle_assign: galera-bundle allocation score on controller-2: 0
+pcmk__bundle_assign: galera-bundle allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: galera-bundle allocation score on overcloud-novacompute-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-0 allocation score on controller-0: 0
+pcmk__bundle_assign: galera-bundle-0 allocation score on controller-1: 0
+pcmk__bundle_assign: galera-bundle-0 allocation score on controller-2: 0
+pcmk__bundle_assign: galera-bundle-0 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-0 allocation score on overcloud-novacompute-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-1 allocation score on controller-0: 0
+pcmk__bundle_assign: galera-bundle-1 allocation score on controller-1: 0
+pcmk__bundle_assign: galera-bundle-1 allocation score on controller-2: 0
+pcmk__bundle_assign: galera-bundle-1 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-1 allocation score on overcloud-novacompute-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-2 allocation score on controller-0: 0
+pcmk__bundle_assign: galera-bundle-2 allocation score on controller-1: 0
+pcmk__bundle_assign: galera-bundle-2 allocation score on controller-2: 0
+pcmk__bundle_assign: galera-bundle-2 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-2 allocation score on overcloud-novacompute-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on controller-0: 0
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on controller-1: 0
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on controller-2: 0
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on overcloud-novacompute-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on controller-0: 0
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on controller-1: 0
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on controller-2: 0
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on overcloud-novacompute-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on controller-0: 0
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on controller-1: 0
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on controller-2: 0
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on overcloud-novacompute-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on controller-0: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on controller-1: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on controller-2: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-2: -INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on overcloud-novacompute-0: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on overcloud-novacompute-1: 0
+pcmk__bundle_assign: galera:0 allocation score on galera-bundle-0: 501
+pcmk__bundle_assign: galera:1 allocation score on galera-bundle-1: 501
+pcmk__bundle_assign: galera:2 allocation score on galera-bundle-2: 501
+pcmk__bundle_assign: haproxy-bundle allocation score on controller-0: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on controller-0: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on controller-1: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on controller-1: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on controller-2: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on controller-2: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on overcloud-novacompute-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on overcloud-novacompute-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-0: 0
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-0: 0
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-1: 0
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-1: 0
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-2: 0
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-2: 0
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on overcloud-novacompute-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on overcloud-novacompute-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-0: 0
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-0: 0
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-1: 0
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-1: 0
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-2: 0
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on overcloud-novacompute-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on overcloud-novacompute-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-0: 0
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-1: 0
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-1: 0
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-2: 0
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on overcloud-novacompute-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on overcloud-novacompute-1: -INFINITY
+pcmk__bundle_assign: openstack-cinder-volume allocation score on controller-0: 0
+pcmk__bundle_assign: openstack-cinder-volume allocation score on controller-1: 0
+pcmk__bundle_assign: openstack-cinder-volume allocation score on controller-2: 0
+pcmk__bundle_assign: openstack-cinder-volume allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: openstack-cinder-volume allocation score on overcloud-novacompute-1: -INFINITY
+pcmk__bundle_assign: openstack-cinder-volume-docker-0 allocation score on controller-0: 0
+pcmk__bundle_assign: openstack-cinder-volume-docker-0 allocation score on controller-1: 0
+pcmk__bundle_assign: openstack-cinder-volume-docker-0 allocation score on controller-2: 0
+pcmk__bundle_assign: openstack-cinder-volume-docker-0 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: openstack-cinder-volume-docker-0 allocation score on overcloud-novacompute-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle allocation score on controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle allocation score on controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle allocation score on controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle allocation score on overcloud-novacompute-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on overcloud-novacompute-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on overcloud-novacompute-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on overcloud-novacompute-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on overcloud-novacompute-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on overcloud-novacompute-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on overcloud-novacompute-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on overcloud-novacompute-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on overcloud-novacompute-1: -INFINITY
+pcmk__bundle_assign: rabbitmq:0 allocation score on rabbitmq-bundle-0: 501
+pcmk__bundle_assign: rabbitmq:1 allocation score on rabbitmq-bundle-1: 501
+pcmk__bundle_assign: rabbitmq:2 allocation score on rabbitmq-bundle-2: 501
+pcmk__bundle_assign: redis-bundle allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle allocation score on controller-2: 0
+pcmk__bundle_assign: redis-bundle allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: redis-bundle allocation score on overcloud-novacompute-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-0 allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle-0 allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle-0 allocation score on controller-2: 0
+pcmk__bundle_assign: redis-bundle-0 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-0 allocation score on overcloud-novacompute-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-1 allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle-1 allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle-1 allocation score on controller-2: 0
+pcmk__bundle_assign: redis-bundle-1 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-1 allocation score on overcloud-novacompute-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-2 allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle-2 allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle-2 allocation score on controller-2: 0
+pcmk__bundle_assign: redis-bundle-2 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-2 allocation score on overcloud-novacompute-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-docker-0 allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle-docker-0 allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle-docker-0 allocation score on controller-2: 0
+pcmk__bundle_assign: redis-bundle-docker-0 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-docker-0 allocation score on overcloud-novacompute-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-docker-1 allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle-docker-1 allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle-docker-1 allocation score on controller-2: 0
+pcmk__bundle_assign: redis-bundle-docker-1 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-docker-1 allocation score on overcloud-novacompute-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-docker-2 allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle-docker-2 allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle-docker-2 allocation score on controller-2: 0
+pcmk__bundle_assign: redis-bundle-docker-2 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-docker-2 allocation score on overcloud-novacompute-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-master allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on controller-2: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on overcloud-novacompute-0: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on overcloud-novacompute-1: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-2: -INFINITY
+pcmk__bundle_assign: redis:0 allocation score on redis-bundle-0: 501
+pcmk__bundle_assign: redis:1 allocation score on redis-bundle-1: 501
+pcmk__bundle_assign: redis:2 allocation score on redis-bundle-2: 501
pcmk__clone_assign: compute-unfence-trigger-clone allocation score on controller-0: -INFINITY
pcmk__clone_assign: compute-unfence-trigger-clone allocation score on controller-1: -INFINITY
pcmk__clone_assign: compute-unfence-trigger-clone allocation score on controller-2: -INFINITY
diff --git a/cts/scheduler/scores/params-6.scores b/cts/scheduler/scores/params-6.scores
index 39d172c..b6d28af 100644
--- a/cts/scheduler/scores/params-6.scores
+++ b/cts/scheduler/scores/params-6.scores
@@ -827,11 +827,14 @@ pcmk__primitive_assign: dist.express-consult.org-vm allocation score on v03-b: -
pcmk__primitive_assign: dist.fly-uni.org-vm allocation score on mgmt01: -INFINITY
pcmk__primitive_assign: dist.fly-uni.org-vm allocation score on v03-a: -INFINITY
pcmk__primitive_assign: dist.fly-uni.org-vm allocation score on v03-b: -INFINITY
-pcmk__primitive_assign: dlm:0 allocation score on mgmt01: 0
+pcmk__primitive_assign: dlm:0 allocation score on mgmt01: -INFINITY
pcmk__primitive_assign: dlm:0 allocation score on v03-a: 1
pcmk__primitive_assign: dlm:0 allocation score on v03-b: 0
+pcmk__primitive_assign: dlm:1 allocation score on mgmt01: -INFINITY
pcmk__primitive_assign: dlm:1 allocation score on mgmt01: 1
pcmk__primitive_assign: dlm:1 allocation score on v03-a: -INFINITY
+pcmk__primitive_assign: dlm:1 allocation score on v03-a: -INFINITY
+pcmk__primitive_assign: dlm:1 allocation score on v03-b: -INFINITY
pcmk__primitive_assign: dlm:1 allocation score on v03-b: 0
pcmk__primitive_assign: dlm:2 allocation score on mgmt01: -INFINITY
pcmk__primitive_assign: dlm:2 allocation score on v03-a: -INFINITY
@@ -869,11 +872,14 @@ pcmk__primitive_assign: git.vds-ok.com-vm allocation score on v03-b: -INFINITY
pcmk__primitive_assign: gotin-bbb-vm allocation score on mgmt01: -INFINITY
pcmk__primitive_assign: gotin-bbb-vm allocation score on v03-a: -INFINITY
pcmk__primitive_assign: gotin-bbb-vm allocation score on v03-b: -INFINITY
-pcmk__primitive_assign: iscsid:0 allocation score on mgmt01: 0
+pcmk__primitive_assign: iscsid:0 allocation score on mgmt01: -INFINITY
pcmk__primitive_assign: iscsid:0 allocation score on v03-a: 1
pcmk__primitive_assign: iscsid:0 allocation score on v03-b: 0
+pcmk__primitive_assign: iscsid:1 allocation score on mgmt01: -INFINITY
pcmk__primitive_assign: iscsid:1 allocation score on mgmt01: 1
pcmk__primitive_assign: iscsid:1 allocation score on v03-a: -INFINITY
+pcmk__primitive_assign: iscsid:1 allocation score on v03-a: -INFINITY
+pcmk__primitive_assign: iscsid:1 allocation score on v03-b: -INFINITY
pcmk__primitive_assign: iscsid:1 allocation score on v03-b: 0
pcmk__primitive_assign: iscsid:2 allocation score on mgmt01: -INFINITY
pcmk__primitive_assign: iscsid:2 allocation score on v03-a: -INFINITY
@@ -899,11 +905,14 @@ pcmk__primitive_assign: iscsid:8 allocation score on v03-b: -INFINITY
pcmk__primitive_assign: lenny-x32-devel-vm allocation score on mgmt01: -INFINITY
pcmk__primitive_assign: lenny-x32-devel-vm allocation score on v03-a: 0
pcmk__primitive_assign: lenny-x32-devel-vm allocation score on v03-b: 0
-pcmk__primitive_assign: libvirt-images-fs:0 allocation score on mgmt01: 0
+pcmk__primitive_assign: libvirt-images-fs:0 allocation score on mgmt01: -INFINITY
pcmk__primitive_assign: libvirt-images-fs:0 allocation score on v03-a: 1
pcmk__primitive_assign: libvirt-images-fs:0 allocation score on v03-b: 0
+pcmk__primitive_assign: libvirt-images-fs:1 allocation score on mgmt01: -INFINITY
pcmk__primitive_assign: libvirt-images-fs:1 allocation score on mgmt01: 1
pcmk__primitive_assign: libvirt-images-fs:1 allocation score on v03-a: -INFINITY
+pcmk__primitive_assign: libvirt-images-fs:1 allocation score on v03-a: -INFINITY
+pcmk__primitive_assign: libvirt-images-fs:1 allocation score on v03-b: -INFINITY
pcmk__primitive_assign: libvirt-images-fs:1 allocation score on v03-b: 0
pcmk__primitive_assign: libvirt-images-fs:2 allocation score on mgmt01: -INFINITY
pcmk__primitive_assign: libvirt-images-fs:2 allocation score on v03-a: -INFINITY
@@ -1055,11 +1064,14 @@ pcmk__primitive_assign: mcast-test-net:7 allocation score on v03-b: -INFINITY
pcmk__primitive_assign: metae.ru-vm allocation score on mgmt01: -INFINITY
pcmk__primitive_assign: metae.ru-vm allocation score on v03-a: -INFINITY
pcmk__primitive_assign: metae.ru-vm allocation score on v03-b: -INFINITY
-pcmk__primitive_assign: multipathd:0 allocation score on mgmt01: 0
+pcmk__primitive_assign: multipathd:0 allocation score on mgmt01: -INFINITY
pcmk__primitive_assign: multipathd:0 allocation score on v03-a: 1
pcmk__primitive_assign: multipathd:0 allocation score on v03-b: 0
+pcmk__primitive_assign: multipathd:1 allocation score on mgmt01: -INFINITY
pcmk__primitive_assign: multipathd:1 allocation score on mgmt01: 1
pcmk__primitive_assign: multipathd:1 allocation score on v03-a: -INFINITY
+pcmk__primitive_assign: multipathd:1 allocation score on v03-a: -INFINITY
+pcmk__primitive_assign: multipathd:1 allocation score on v03-b: -INFINITY
pcmk__primitive_assign: multipathd:1 allocation score on v03-b: 0
pcmk__primitive_assign: multipathd:2 allocation score on mgmt01: -INFINITY
pcmk__primitive_assign: multipathd:2 allocation score on v03-a: -INFINITY
@@ -1083,7 +1095,10 @@ pcmk__primitive_assign: multipathd:8 allocation score on mgmt01: -INFINITY
pcmk__primitive_assign: multipathd:8 allocation score on v03-a: -INFINITY
pcmk__primitive_assign: multipathd:8 allocation score on v03-b: -INFINITY
pcmk__primitive_assign: node-params:0 allocation score on mgmt01: -INFINITY
+pcmk__primitive_assign: node-params:0 allocation score on mgmt01: -INFINITY
+pcmk__primitive_assign: node-params:0 allocation score on v03-a: -INFINITY
pcmk__primitive_assign: node-params:0 allocation score on v03-a: 1
+pcmk__primitive_assign: node-params:0 allocation score on v03-b: -INFINITY
pcmk__primitive_assign: node-params:0 allocation score on v03-b: 0
pcmk__primitive_assign: node-params:1 allocation score on mgmt01: -INFINITY
pcmk__primitive_assign: node-params:1 allocation score on v03-a: -INFINITY
diff --git a/cts/scheduler/scores/pending-node-no-uname.scores b/cts/scheduler/scores/pending-node-no-uname.scores
new file mode 100644
index 0000000..90a7c8b
--- /dev/null
+++ b/cts/scheduler/scores/pending-node-no-uname.scores
@@ -0,0 +1,3 @@
+
+pcmk__primitive_assign: st-sbd allocation score on node-1: 0
+pcmk__primitive_assign: st-sbd allocation score on node-2: 0
diff --git a/cts/scheduler/scores/probe-2.scores b/cts/scheduler/scores/probe-2.scores
index d396171..d3b50ce 100644
--- a/cts/scheduler/scores/probe-2.scores
+++ b/cts/scheduler/scores/probe-2.scores
@@ -137,7 +137,7 @@ pcmk__primitive_assign: mysql-proxy:1 allocation score on wc01: -INFINITY
pcmk__primitive_assign: mysql-proxy:1 allocation score on wc02: -INFINITY
pcmk__primitive_assign: mysql-server allocation score on wc01: 0
pcmk__primitive_assign: mysql-server allocation score on wc02: -INFINITY
-pcmk__primitive_assign: nfs-common:0 allocation score on wc01: 1
+pcmk__primitive_assign: nfs-common:0 allocation score on wc01: 77
pcmk__primitive_assign: nfs-common:0 allocation score on wc02: -INFINITY
pcmk__primitive_assign: nfs-common:1 allocation score on wc01: -INFINITY
pcmk__primitive_assign: nfs-common:1 allocation score on wc02: -INFINITY
diff --git a/cts/scheduler/scores/promoted-13.scores b/cts/scheduler/scores/promoted-13.scores
index 5ee6994..19b299c 100644
--- a/cts/scheduler/scores/promoted-13.scores
+++ b/cts/scheduler/scores/promoted-13.scores
@@ -18,7 +18,7 @@ pcmk__primitive_assign: IPaddr0 allocation score on frigg: -INFINITY
pcmk__primitive_assign: IPaddr0 allocation score on odin: INFINITY
pcmk__primitive_assign: MailTo allocation score on frigg: -INFINITY
pcmk__primitive_assign: MailTo allocation score on odin: 0
-pcmk__primitive_assign: drbd0:0 allocation score on frigg: 0
+pcmk__primitive_assign: drbd0:0 allocation score on frigg: -INFINITY
pcmk__primitive_assign: drbd0:0 allocation score on odin: INFINITY
pcmk__primitive_assign: drbd0:1 allocation score on frigg: INFINITY
pcmk__primitive_assign: drbd0:1 allocation score on odin: -INFINITY
diff --git a/cts/scheduler/scores/promoted-asymmetrical-order.scores b/cts/scheduler/scores/promoted-asymmetrical-order.scores
index 382e0eb..18bc704 100644
--- a/cts/scheduler/scores/promoted-asymmetrical-order.scores
+++ b/cts/scheduler/scores/promoted-asymmetrical-order.scores
@@ -12,8 +12,12 @@ pcmk__clone_assign: rsc2:0 allocation score on node2: 0
pcmk__clone_assign: rsc2:1 allocation score on node1: 0
pcmk__clone_assign: rsc2:1 allocation score on node2: 1
pcmk__primitive_assign: rsc1:0 allocation score on node1: -INFINITY
+pcmk__primitive_assign: rsc1:0 allocation score on node1: -INFINITY
+pcmk__primitive_assign: rsc1:0 allocation score on node2: -INFINITY
pcmk__primitive_assign: rsc1:0 allocation score on node2: -INFINITY
pcmk__primitive_assign: rsc1:1 allocation score on node1: -INFINITY
+pcmk__primitive_assign: rsc1:1 allocation score on node1: -INFINITY
+pcmk__primitive_assign: rsc1:1 allocation score on node2: -INFINITY
pcmk__primitive_assign: rsc1:1 allocation score on node2: -INFINITY
pcmk__primitive_assign: rsc2:0 allocation score on node1: 1
pcmk__primitive_assign: rsc2:0 allocation score on node2: 0
diff --git a/cts/scheduler/scores/promoted-demote.scores b/cts/scheduler/scores/promoted-demote.scores
index 0a04576..a0ddf9a 100644
--- a/cts/scheduler/scores/promoted-demote.scores
+++ b/cts/scheduler/scores/promoted-demote.scores
@@ -30,10 +30,10 @@ pcmk__clone_assign: pingd_node:1 allocation score on cxa1: 0
pcmk__clone_assign: pingd_node:1 allocation score on cxb1: 1
pcmk__primitive_assign: cyrus_address allocation score on cxa1: 210
pcmk__primitive_assign: cyrus_address allocation score on cxb1: 200
-pcmk__primitive_assign: cyrus_drbd_node:0 allocation score on cxa1: 76
-pcmk__primitive_assign: cyrus_drbd_node:0 allocation score on cxb1: 0
+pcmk__primitive_assign: cyrus_drbd_node:0 allocation score on cxa1: 286
+pcmk__primitive_assign: cyrus_drbd_node:0 allocation score on cxb1: 200
pcmk__primitive_assign: cyrus_drbd_node:1 allocation score on cxa1: -INFINITY
-pcmk__primitive_assign: cyrus_drbd_node:1 allocation score on cxb1: 76
+pcmk__primitive_assign: cyrus_drbd_node:1 allocation score on cxb1: 276
pcmk__primitive_assign: cyrus_filesys allocation score on cxa1: -INFINITY
pcmk__primitive_assign: cyrus_filesys allocation score on cxb1: -INFINITY
pcmk__primitive_assign: cyrus_master allocation score on cxa1: -INFINITY
@@ -50,10 +50,10 @@ pcmk__primitive_assign: named_address allocation score on cxa1: 200
pcmk__primitive_assign: named_address allocation score on cxb1: 210
pcmk__primitive_assign: named_daemon allocation score on cxa1: -INFINITY
pcmk__primitive_assign: named_daemon allocation score on cxb1: -INFINITY
-pcmk__primitive_assign: named_drbd_node:0 allocation score on cxa1: 76
-pcmk__primitive_assign: named_drbd_node:0 allocation score on cxb1: 0
+pcmk__primitive_assign: named_drbd_node:0 allocation score on cxa1: 276
+pcmk__primitive_assign: named_drbd_node:0 allocation score on cxb1: 210
pcmk__primitive_assign: named_drbd_node:1 allocation score on cxa1: -INFINITY
-pcmk__primitive_assign: named_drbd_node:1 allocation score on cxb1: 76
+pcmk__primitive_assign: named_drbd_node:1 allocation score on cxb1: 286
pcmk__primitive_assign: named_filesys allocation score on cxa1: -INFINITY
pcmk__primitive_assign: named_filesys allocation score on cxb1: -INFINITY
pcmk__primitive_assign: named_syslogd allocation score on cxa1: -INFINITY
diff --git a/cts/scheduler/scores/promoted-failed-demote-2.scores b/cts/scheduler/scores/promoted-failed-demote-2.scores
index 2a85ae6..39399d9 100644
--- a/cts/scheduler/scores/promoted-failed-demote-2.scores
+++ b/cts/scheduler/scores/promoted-failed-demote-2.scores
@@ -16,14 +16,20 @@ pcmk__clone_assign: stateful-2:0 allocation score on dl380g5b: -INFINITY
pcmk__clone_assign: stateful-2:1 allocation score on dl380g5a: INFINITY
pcmk__clone_assign: stateful-2:1 allocation score on dl380g5b: 0
pcmk__group_assign: group:0 allocation score on dl380g5a: -INFINITY
+pcmk__group_assign: group:0 allocation score on dl380g5a: -INFINITY
+pcmk__group_assign: group:0 allocation score on dl380g5b: 0
pcmk__group_assign: group:0 allocation score on dl380g5b: 0
pcmk__group_assign: group:1 allocation score on dl380g5a: 0
pcmk__group_assign: group:1 allocation score on dl380g5b: 0
pcmk__group_assign: stateful-1:0 allocation score on dl380g5a: -INFINITY
+pcmk__group_assign: stateful-1:0 allocation score on dl380g5a: -INFINITY
+pcmk__group_assign: stateful-1:0 allocation score on dl380g5b: -INFINITY
pcmk__group_assign: stateful-1:0 allocation score on dl380g5b: -INFINITY
pcmk__group_assign: stateful-1:1 allocation score on dl380g5a: INFINITY
pcmk__group_assign: stateful-1:1 allocation score on dl380g5b: 0
pcmk__group_assign: stateful-2:0 allocation score on dl380g5a: -INFINITY
+pcmk__group_assign: stateful-2:0 allocation score on dl380g5a: -INFINITY
+pcmk__group_assign: stateful-2:0 allocation score on dl380g5b: -INFINITY
pcmk__group_assign: stateful-2:0 allocation score on dl380g5b: -INFINITY
pcmk__group_assign: stateful-2:1 allocation score on dl380g5a: INFINITY
pcmk__group_assign: stateful-2:1 allocation score on dl380g5b: 0
diff --git a/cts/scheduler/scores/promoted-failed-demote.scores b/cts/scheduler/scores/promoted-failed-demote.scores
index 2a85ae6..39399d9 100644
--- a/cts/scheduler/scores/promoted-failed-demote.scores
+++ b/cts/scheduler/scores/promoted-failed-demote.scores
@@ -16,14 +16,20 @@ pcmk__clone_assign: stateful-2:0 allocation score on dl380g5b: -INFINITY
pcmk__clone_assign: stateful-2:1 allocation score on dl380g5a: INFINITY
pcmk__clone_assign: stateful-2:1 allocation score on dl380g5b: 0
pcmk__group_assign: group:0 allocation score on dl380g5a: -INFINITY
+pcmk__group_assign: group:0 allocation score on dl380g5a: -INFINITY
+pcmk__group_assign: group:0 allocation score on dl380g5b: 0
pcmk__group_assign: group:0 allocation score on dl380g5b: 0
pcmk__group_assign: group:1 allocation score on dl380g5a: 0
pcmk__group_assign: group:1 allocation score on dl380g5b: 0
pcmk__group_assign: stateful-1:0 allocation score on dl380g5a: -INFINITY
+pcmk__group_assign: stateful-1:0 allocation score on dl380g5a: -INFINITY
+pcmk__group_assign: stateful-1:0 allocation score on dl380g5b: -INFINITY
pcmk__group_assign: stateful-1:0 allocation score on dl380g5b: -INFINITY
pcmk__group_assign: stateful-1:1 allocation score on dl380g5a: INFINITY
pcmk__group_assign: stateful-1:1 allocation score on dl380g5b: 0
pcmk__group_assign: stateful-2:0 allocation score on dl380g5a: -INFINITY
+pcmk__group_assign: stateful-2:0 allocation score on dl380g5a: -INFINITY
+pcmk__group_assign: stateful-2:0 allocation score on dl380g5b: -INFINITY
pcmk__group_assign: stateful-2:0 allocation score on dl380g5b: -INFINITY
pcmk__group_assign: stateful-2:1 allocation score on dl380g5a: INFINITY
pcmk__group_assign: stateful-2:1 allocation score on dl380g5b: 0
diff --git a/cts/scheduler/scores/promoted-move.scores b/cts/scheduler/scores/promoted-move.scores
index 7006cda..5ed2b60 100644
--- a/cts/scheduler/scores/promoted-move.scores
+++ b/cts/scheduler/scores/promoted-move.scores
@@ -19,7 +19,9 @@ pcmk__primitive_assign: dummy02 allocation score on bl460g1n13: -INFINITY
pcmk__primitive_assign: dummy02 allocation score on bl460g1n14: 0
pcmk__primitive_assign: dummy03 allocation score on bl460g1n13: -INFINITY
pcmk__primitive_assign: dummy03 allocation score on bl460g1n14: 0
+pcmk__primitive_assign: prmDRBD:0 allocation score on bl460g1n13: -INFINITY
pcmk__primitive_assign: prmDRBD:0 allocation score on bl460g1n13: INFINITY
+pcmk__primitive_assign: prmDRBD:0 allocation score on bl460g1n14: -INFINITY
pcmk__primitive_assign: prmDRBD:0 allocation score on bl460g1n14: 0
pcmk__primitive_assign: prmDRBD:1 allocation score on bl460g1n13: -INFINITY
pcmk__primitive_assign: prmDRBD:1 allocation score on bl460g1n14: INFINITY
diff --git a/cts/scheduler/scores/promoted-ordering.scores b/cts/scheduler/scores/promoted-ordering.scores
index 46dac66..5162abf 100644
--- a/cts/scheduler/scores/promoted-ordering.scores
+++ b/cts/scheduler/scores/promoted-ordering.scores
@@ -55,7 +55,7 @@ pcmk__primitive_assign: apache2:0 allocation score on webcluster01: -INFINITY
pcmk__primitive_assign: apache2:0 allocation score on webcluster02: -INFINITY
pcmk__primitive_assign: apache2:1 allocation score on webcluster01: -INFINITY
pcmk__primitive_assign: apache2:1 allocation score on webcluster02: -INFINITY
-pcmk__primitive_assign: drbd_mysql:0 allocation score on webcluster01: 0
+pcmk__primitive_assign: drbd_mysql:0 allocation score on webcluster01: 100
pcmk__primitive_assign: drbd_mysql:0 allocation score on webcluster02: -INFINITY
pcmk__primitive_assign: drbd_mysql:1 allocation score on webcluster01: -INFINITY
pcmk__primitive_assign: drbd_mysql:1 allocation score on webcluster02: -INFINITY
@@ -71,10 +71,10 @@ pcmk__primitive_assign: fs_mysql allocation score on webcluster01: -INFINITY
pcmk__primitive_assign: fs_mysql allocation score on webcluster02: -INFINITY
pcmk__primitive_assign: intip_0_main allocation score on webcluster01: -INFINITY
pcmk__primitive_assign: intip_0_main allocation score on webcluster02: -INFINITY
-pcmk__primitive_assign: intip_1_master allocation score on webcluster01: 200
-pcmk__primitive_assign: intip_1_master allocation score on webcluster02: 0
-pcmk__primitive_assign: intip_2_slave allocation score on webcluster01: 0
-pcmk__primitive_assign: intip_2_slave allocation score on webcluster02: 100
+pcmk__primitive_assign: intip_1_active allocation score on webcluster01: 200
+pcmk__primitive_assign: intip_1_active allocation score on webcluster02: 0
+pcmk__primitive_assign: intip_2_passive allocation score on webcluster01: 0
+pcmk__primitive_assign: intip_2_passive allocation score on webcluster02: 100
pcmk__primitive_assign: mysql-proxy:0 allocation score on webcluster01: -INFINITY
pcmk__primitive_assign: mysql-proxy:0 allocation score on webcluster02: -INFINITY
pcmk__primitive_assign: mysql-proxy:1 allocation score on webcluster01: -INFINITY
diff --git a/cts/scheduler/scores/promoted-partially-demoted-group.scores b/cts/scheduler/scores/promoted-partially-demoted-group.scores
index 5205aa5..f266c64 100644
--- a/cts/scheduler/scores/promoted-partially-demoted-group.scores
+++ b/cts/scheduler/scores/promoted-partially-demoted-group.scores
@@ -60,17 +60,17 @@ pcmk__group_assign: vip-165-fw:0 allocation score on sd01-1: 100
pcmk__group_assign: vip-165-fw:1 allocation score on sd01-0: 100
pcmk__group_assign: vip-165-fw:1 allocation score on sd01-1: 0
pcmk__primitive_assign: cdev-pool-0-drbd:0 allocation score on sd01-0: -INFINITY
-pcmk__primitive_assign: cdev-pool-0-drbd:0 allocation score on sd01-1: 10100
-pcmk__primitive_assign: cdev-pool-0-drbd:1 allocation score on sd01-0: 10100
-pcmk__primitive_assign: cdev-pool-0-drbd:1 allocation score on sd01-1: 0
+pcmk__primitive_assign: cdev-pool-0-drbd:0 allocation score on sd01-1: 10500
+pcmk__primitive_assign: cdev-pool-0-drbd:1 allocation score on sd01-0: INFINITY
+pcmk__primitive_assign: cdev-pool-0-drbd:1 allocation score on sd01-1: 400
pcmk__primitive_assign: cdev-pool-0-iscsi-lun-1 allocation score on sd01-0: 0
pcmk__primitive_assign: cdev-pool-0-iscsi-lun-1 allocation score on sd01-1: -INFINITY
pcmk__primitive_assign: cdev-pool-0-iscsi-target allocation score on sd01-0: INFINITY
pcmk__primitive_assign: cdev-pool-0-iscsi-target allocation score on sd01-1: -INFINITY
pcmk__primitive_assign: ietd:0 allocation score on sd01-0: -INFINITY
pcmk__primitive_assign: ietd:0 allocation score on sd01-1: 100
-pcmk__primitive_assign: ietd:1 allocation score on sd01-0: 100
-pcmk__primitive_assign: ietd:1 allocation score on sd01-1: 0
+pcmk__primitive_assign: ietd:1 allocation score on sd01-0: INFINITY
+pcmk__primitive_assign: ietd:1 allocation score on sd01-1: -INFINITY
pcmk__primitive_assign: stonith-xvm-sd01-0 allocation score on sd01-0: -INFINITY
pcmk__primitive_assign: stonith-xvm-sd01-0 allocation score on sd01-1: 100
pcmk__primitive_assign: stonith-xvm-sd01-1 allocation score on sd01-0: 100
diff --git a/cts/scheduler/scores/promoted-probed-score.scores b/cts/scheduler/scores/promoted-probed-score.scores
index 1a01a5b..bf6d7fc 100644
--- a/cts/scheduler/scores/promoted-probed-score.scores
+++ b/cts/scheduler/scores/promoted-probed-score.scores
@@ -1,11 +1,11 @@
-AdminDrbd:0 promotion score on hypatia-corosync.nevis.columbia.edu: 5
-AdminDrbd:1 promotion score on orestes-corosync.nevis.columbia.edu: INFINITY
+AdminDrbd:0 promotion score on orestes-corosync.nevis.columbia.edu: INFINITY
+AdminDrbd:1 promotion score on hypatia-corosync.nevis.columbia.edu: 5
pcmk__clone_assign: AdminClone allocation score on hypatia-corosync.nevis.columbia.edu: 0
pcmk__clone_assign: AdminClone allocation score on orestes-corosync.nevis.columbia.edu: INFINITY
pcmk__clone_assign: AdminDrbd:0 allocation score on hypatia-corosync.nevis.columbia.edu: 5
-pcmk__clone_assign: AdminDrbd:0 allocation score on orestes-corosync.nevis.columbia.edu: 0
-pcmk__clone_assign: AdminDrbd:1 allocation score on hypatia-corosync.nevis.columbia.edu: 0
+pcmk__clone_assign: AdminDrbd:0 allocation score on orestes-corosync.nevis.columbia.edu: 5
+pcmk__clone_assign: AdminDrbd:1 allocation score on hypatia-corosync.nevis.columbia.edu: 5
pcmk__clone_assign: AdminDrbd:1 allocation score on orestes-corosync.nevis.columbia.edu: 5
pcmk__clone_assign: AdminLvm:0 allocation score on hypatia-corosync.nevis.columbia.edu: 0
pcmk__clone_assign: AdminLvm:0 allocation score on orestes-corosync.nevis.columbia.edu: 0
@@ -141,8 +141,8 @@ pcmk__clone_assign: Xinetd:1 allocation score on hypatia-corosync.nevis.columbia
pcmk__clone_assign: Xinetd:1 allocation score on orestes-corosync.nevis.columbia.edu: 0
pcmk__group_assign: AdminLvm:0 allocation score on hypatia-corosync.nevis.columbia.edu: 0
pcmk__group_assign: AdminLvm:0 allocation score on orestes-corosync.nevis.columbia.edu: 0
-pcmk__group_assign: AdminLvm:1 allocation score on hypatia-corosync.nevis.columbia.edu: -INFINITY
-pcmk__group_assign: AdminLvm:1 allocation score on orestes-corosync.nevis.columbia.edu: 0
+pcmk__group_assign: AdminLvm:1 allocation score on hypatia-corosync.nevis.columbia.edu: 0
+pcmk__group_assign: AdminLvm:1 allocation score on orestes-corosync.nevis.columbia.edu: -INFINITY
pcmk__group_assign: ClusterIP:0 allocation score on hypatia-corosync.nevis.columbia.edu: 0
pcmk__group_assign: ClusterIP:0 allocation score on orestes-corosync.nevis.columbia.edu: 0
pcmk__group_assign: ClusterIP:1 allocation score on hypatia-corosync.nevis.columbia.edu: 0
@@ -207,28 +207,28 @@ pcmk__group_assign: ExportsGroup:1 allocation score on hypatia-corosync.nevis.co
pcmk__group_assign: ExportsGroup:1 allocation score on orestes-corosync.nevis.columbia.edu: 0
pcmk__group_assign: FSMail:0 allocation score on hypatia-corosync.nevis.columbia.edu: 0
pcmk__group_assign: FSMail:0 allocation score on orestes-corosync.nevis.columbia.edu: 0
-pcmk__group_assign: FSMail:1 allocation score on hypatia-corosync.nevis.columbia.edu: -INFINITY
-pcmk__group_assign: FSMail:1 allocation score on orestes-corosync.nevis.columbia.edu: 0
+pcmk__group_assign: FSMail:1 allocation score on hypatia-corosync.nevis.columbia.edu: 0
+pcmk__group_assign: FSMail:1 allocation score on orestes-corosync.nevis.columbia.edu: -INFINITY
pcmk__group_assign: FSUsrNevis:0 allocation score on hypatia-corosync.nevis.columbia.edu: 0
pcmk__group_assign: FSUsrNevis:0 allocation score on orestes-corosync.nevis.columbia.edu: 0
-pcmk__group_assign: FSUsrNevis:1 allocation score on hypatia-corosync.nevis.columbia.edu: -INFINITY
-pcmk__group_assign: FSUsrNevis:1 allocation score on orestes-corosync.nevis.columbia.edu: 0
+pcmk__group_assign: FSUsrNevis:1 allocation score on hypatia-corosync.nevis.columbia.edu: 0
+pcmk__group_assign: FSUsrNevis:1 allocation score on orestes-corosync.nevis.columbia.edu: -INFINITY
pcmk__group_assign: FSVarNevis:0 allocation score on hypatia-corosync.nevis.columbia.edu: 0
pcmk__group_assign: FSVarNevis:0 allocation score on orestes-corosync.nevis.columbia.edu: 0
-pcmk__group_assign: FSVarNevis:1 allocation score on hypatia-corosync.nevis.columbia.edu: -INFINITY
-pcmk__group_assign: FSVarNevis:1 allocation score on orestes-corosync.nevis.columbia.edu: 0
+pcmk__group_assign: FSVarNevis:1 allocation score on hypatia-corosync.nevis.columbia.edu: 0
+pcmk__group_assign: FSVarNevis:1 allocation score on orestes-corosync.nevis.columbia.edu: -INFINITY
pcmk__group_assign: FSVirtualMachines:0 allocation score on hypatia-corosync.nevis.columbia.edu: 0
pcmk__group_assign: FSVirtualMachines:0 allocation score on orestes-corosync.nevis.columbia.edu: 0
-pcmk__group_assign: FSVirtualMachines:1 allocation score on hypatia-corosync.nevis.columbia.edu: -INFINITY
-pcmk__group_assign: FSVirtualMachines:1 allocation score on orestes-corosync.nevis.columbia.edu: 0
+pcmk__group_assign: FSVirtualMachines:1 allocation score on hypatia-corosync.nevis.columbia.edu: 0
+pcmk__group_assign: FSVirtualMachines:1 allocation score on orestes-corosync.nevis.columbia.edu: -INFINITY
pcmk__group_assign: FSWork:0 allocation score on hypatia-corosync.nevis.columbia.edu: 0
pcmk__group_assign: FSWork:0 allocation score on orestes-corosync.nevis.columbia.edu: 0
-pcmk__group_assign: FSWork:1 allocation score on hypatia-corosync.nevis.columbia.edu: -INFINITY
-pcmk__group_assign: FSWork:1 allocation score on orestes-corosync.nevis.columbia.edu: 0
+pcmk__group_assign: FSWork:1 allocation score on hypatia-corosync.nevis.columbia.edu: 0
+pcmk__group_assign: FSWork:1 allocation score on orestes-corosync.nevis.columbia.edu: -INFINITY
pcmk__group_assign: FilesystemGroup:0 allocation score on hypatia-corosync.nevis.columbia.edu: 0
pcmk__group_assign: FilesystemGroup:0 allocation score on orestes-corosync.nevis.columbia.edu: 0
-pcmk__group_assign: FilesystemGroup:1 allocation score on hypatia-corosync.nevis.columbia.edu: -INFINITY
-pcmk__group_assign: FilesystemGroup:1 allocation score on orestes-corosync.nevis.columbia.edu: 0
+pcmk__group_assign: FilesystemGroup:1 allocation score on hypatia-corosync.nevis.columbia.edu: 0
+pcmk__group_assign: FilesystemGroup:1 allocation score on orestes-corosync.nevis.columbia.edu: -INFINITY
pcmk__group_assign: IPGroup:0 allocation score on hypatia-corosync.nevis.columbia.edu: 0
pcmk__group_assign: IPGroup:0 allocation score on orestes-corosync.nevis.columbia.edu: 0
pcmk__group_assign: IPGroup:1 allocation score on hypatia-corosync.nevis.columbia.edu: 0
@@ -272,13 +272,13 @@ pcmk__group_assign: Xinetd:0 allocation score on orestes-corosync.nevis.columbia
pcmk__group_assign: Xinetd:1 allocation score on hypatia-corosync.nevis.columbia.edu: -INFINITY
pcmk__group_assign: Xinetd:1 allocation score on orestes-corosync.nevis.columbia.edu: 0
pcmk__primitive_assign: AdminDrbd:0 allocation score on hypatia-corosync.nevis.columbia.edu: 5
-pcmk__primitive_assign: AdminDrbd:0 allocation score on orestes-corosync.nevis.columbia.edu: 0
-pcmk__primitive_assign: AdminDrbd:1 allocation score on hypatia-corosync.nevis.columbia.edu: -INFINITY
-pcmk__primitive_assign: AdminDrbd:1 allocation score on orestes-corosync.nevis.columbia.edu: 5
+pcmk__primitive_assign: AdminDrbd:0 allocation score on orestes-corosync.nevis.columbia.edu: INFINITY
+pcmk__primitive_assign: AdminDrbd:1 allocation score on hypatia-corosync.nevis.columbia.edu: 5
+pcmk__primitive_assign: AdminDrbd:1 allocation score on orestes-corosync.nevis.columbia.edu: -INFINITY
pcmk__primitive_assign: AdminLvm:0 allocation score on hypatia-corosync.nevis.columbia.edu: 5
-pcmk__primitive_assign: AdminLvm:0 allocation score on orestes-corosync.nevis.columbia.edu: 5
-pcmk__primitive_assign: AdminLvm:1 allocation score on hypatia-corosync.nevis.columbia.edu: -INFINITY
-pcmk__primitive_assign: AdminLvm:1 allocation score on orestes-corosync.nevis.columbia.edu: 5
+pcmk__primitive_assign: AdminLvm:0 allocation score on orestes-corosync.nevis.columbia.edu: INFINITY
+pcmk__primitive_assign: AdminLvm:1 allocation score on hypatia-corosync.nevis.columbia.edu: 5
+pcmk__primitive_assign: AdminLvm:1 allocation score on orestes-corosync.nevis.columbia.edu: -INFINITY
pcmk__primitive_assign: ClusterIP:0 allocation score on hypatia-corosync.nevis.columbia.edu: 0
pcmk__primitive_assign: ClusterIP:0 allocation score on orestes-corosync.nevis.columbia.edu: 0
pcmk__primitive_assign: ClusterIP:1 allocation score on hypatia-corosync.nevis.columbia.edu: 0
@@ -333,32 +333,32 @@ pcmk__primitive_assign: ExportWWW:0 allocation score on hypatia-corosync.nevis.c
pcmk__primitive_assign: ExportWWW:0 allocation score on orestes-corosync.nevis.columbia.edu: -INFINITY
pcmk__primitive_assign: ExportWWW:1 allocation score on hypatia-corosync.nevis.columbia.edu: -INFINITY
pcmk__primitive_assign: ExportWWW:1 allocation score on orestes-corosync.nevis.columbia.edu: 0
-pcmk__primitive_assign: FSMail:0 allocation score on hypatia-corosync.nevis.columbia.edu: 0
-pcmk__primitive_assign: FSMail:0 allocation score on orestes-corosync.nevis.columbia.edu: -INFINITY
-pcmk__primitive_assign: FSMail:1 allocation score on hypatia-corosync.nevis.columbia.edu: -INFINITY
-pcmk__primitive_assign: FSMail:1 allocation score on orestes-corosync.nevis.columbia.edu: 0
-pcmk__primitive_assign: FSUsrNevis:0 allocation score on hypatia-corosync.nevis.columbia.edu: 0
-pcmk__primitive_assign: FSUsrNevis:0 allocation score on orestes-corosync.nevis.columbia.edu: -INFINITY
-pcmk__primitive_assign: FSUsrNevis:1 allocation score on hypatia-corosync.nevis.columbia.edu: -INFINITY
-pcmk__primitive_assign: FSUsrNevis:1 allocation score on orestes-corosync.nevis.columbia.edu: 0
-pcmk__primitive_assign: FSVarNevis:0 allocation score on hypatia-corosync.nevis.columbia.edu: 0
-pcmk__primitive_assign: FSVarNevis:0 allocation score on orestes-corosync.nevis.columbia.edu: -INFINITY
-pcmk__primitive_assign: FSVarNevis:1 allocation score on hypatia-corosync.nevis.columbia.edu: -INFINITY
-pcmk__primitive_assign: FSVarNevis:1 allocation score on orestes-corosync.nevis.columbia.edu: 0
-pcmk__primitive_assign: FSVirtualMachines:0 allocation score on hypatia-corosync.nevis.columbia.edu: 0
-pcmk__primitive_assign: FSVirtualMachines:0 allocation score on orestes-corosync.nevis.columbia.edu: -INFINITY
-pcmk__primitive_assign: FSVirtualMachines:1 allocation score on hypatia-corosync.nevis.columbia.edu: -INFINITY
-pcmk__primitive_assign: FSVirtualMachines:1 allocation score on orestes-corosync.nevis.columbia.edu: 0
-pcmk__primitive_assign: FSWork:0 allocation score on hypatia-corosync.nevis.columbia.edu: 0
-pcmk__primitive_assign: FSWork:0 allocation score on orestes-corosync.nevis.columbia.edu: -INFINITY
-pcmk__primitive_assign: FSWork:1 allocation score on hypatia-corosync.nevis.columbia.edu: -INFINITY
-pcmk__primitive_assign: FSWork:1 allocation score on orestes-corosync.nevis.columbia.edu: 0
+pcmk__primitive_assign: FSMail:0 allocation score on hypatia-corosync.nevis.columbia.edu: -INFINITY
+pcmk__primitive_assign: FSMail:0 allocation score on orestes-corosync.nevis.columbia.edu: INFINITY
+pcmk__primitive_assign: FSMail:1 allocation score on hypatia-corosync.nevis.columbia.edu: 0
+pcmk__primitive_assign: FSMail:1 allocation score on orestes-corosync.nevis.columbia.edu: -INFINITY
+pcmk__primitive_assign: FSUsrNevis:0 allocation score on hypatia-corosync.nevis.columbia.edu: -INFINITY
+pcmk__primitive_assign: FSUsrNevis:0 allocation score on orestes-corosync.nevis.columbia.edu: INFINITY
+pcmk__primitive_assign: FSUsrNevis:1 allocation score on hypatia-corosync.nevis.columbia.edu: 0
+pcmk__primitive_assign: FSUsrNevis:1 allocation score on orestes-corosync.nevis.columbia.edu: -INFINITY
+pcmk__primitive_assign: FSVarNevis:0 allocation score on hypatia-corosync.nevis.columbia.edu: -INFINITY
+pcmk__primitive_assign: FSVarNevis:0 allocation score on orestes-corosync.nevis.columbia.edu: INFINITY
+pcmk__primitive_assign: FSVarNevis:1 allocation score on hypatia-corosync.nevis.columbia.edu: 0
+pcmk__primitive_assign: FSVarNevis:1 allocation score on orestes-corosync.nevis.columbia.edu: -INFINITY
+pcmk__primitive_assign: FSVirtualMachines:0 allocation score on hypatia-corosync.nevis.columbia.edu: -INFINITY
+pcmk__primitive_assign: FSVirtualMachines:0 allocation score on orestes-corosync.nevis.columbia.edu: INFINITY
+pcmk__primitive_assign: FSVirtualMachines:1 allocation score on hypatia-corosync.nevis.columbia.edu: 0
+pcmk__primitive_assign: FSVirtualMachines:1 allocation score on orestes-corosync.nevis.columbia.edu: -INFINITY
+pcmk__primitive_assign: FSWork:0 allocation score on hypatia-corosync.nevis.columbia.edu: -INFINITY
+pcmk__primitive_assign: FSWork:0 allocation score on orestes-corosync.nevis.columbia.edu: INFINITY
+pcmk__primitive_assign: FSWork:1 allocation score on hypatia-corosync.nevis.columbia.edu: 0
+pcmk__primitive_assign: FSWork:1 allocation score on orestes-corosync.nevis.columbia.edu: -INFINITY
pcmk__primitive_assign: KVM-guest allocation score on hypatia-corosync.nevis.columbia.edu: 0
pcmk__primitive_assign: KVM-guest allocation score on orestes-corosync.nevis.columbia.edu: 0
pcmk__primitive_assign: Libvirtd:0 allocation score on hypatia-corosync.nevis.columbia.edu: 0
pcmk__primitive_assign: Libvirtd:0 allocation score on orestes-corosync.nevis.columbia.edu: -INFINITY
pcmk__primitive_assign: Libvirtd:1 allocation score on hypatia-corosync.nevis.columbia.edu: -INFINITY
-pcmk__primitive_assign: Libvirtd:1 allocation score on orestes-corosync.nevis.columbia.edu: 0
+pcmk__primitive_assign: Libvirtd:1 allocation score on orestes-corosync.nevis.columbia.edu: INFINITY
pcmk__primitive_assign: Proxy allocation score on hypatia-corosync.nevis.columbia.edu: 0
pcmk__primitive_assign: Proxy allocation score on orestes-corosync.nevis.columbia.edu: INFINITY
pcmk__primitive_assign: StonithHypatia allocation score on hypatia-corosync.nevis.columbia.edu: -INFINITY
@@ -376,7 +376,7 @@ pcmk__primitive_assign: SymlinkDhcpdLeases allocation score on orestes-corosync.
pcmk__primitive_assign: SymlinkEtcLibvirt:0 allocation score on hypatia-corosync.nevis.columbia.edu: 0
pcmk__primitive_assign: SymlinkEtcLibvirt:0 allocation score on orestes-corosync.nevis.columbia.edu: -INFINITY
pcmk__primitive_assign: SymlinkEtcLibvirt:1 allocation score on hypatia-corosync.nevis.columbia.edu: -INFINITY
-pcmk__primitive_assign: SymlinkEtcLibvirt:1 allocation score on orestes-corosync.nevis.columbia.edu: 0
+pcmk__primitive_assign: SymlinkEtcLibvirt:1 allocation score on orestes-corosync.nevis.columbia.edu: INFINITY
pcmk__primitive_assign: SymlinkSysconfigDhcpd allocation score on hypatia-corosync.nevis.columbia.edu: -INFINITY
pcmk__primitive_assign: SymlinkSysconfigDhcpd allocation score on orestes-corosync.nevis.columbia.edu: 0
pcmk__primitive_assign: SymlinkTftp:0 allocation score on hypatia-corosync.nevis.columbia.edu: 0
diff --git a/cts/scheduler/scores/remote-connection-shutdown.scores b/cts/scheduler/scores/remote-connection-shutdown.scores
index 176580b..c1d43ec 100644
--- a/cts/scheduler/scores/remote-connection-shutdown.scores
+++ b/cts/scheduler/scores/remote-connection-shutdown.scores
@@ -2,495 +2,495 @@
galera:0 promotion score on galera-bundle-0: 100
galera:1 promotion score on galera-bundle-1: 100
galera:2 promotion score on galera-bundle-2: 100
-ovndb_servers:0 promotion score on ovn-dbs-bundle-0: 10
+ovndb_servers:0 promotion score on ovn-dbs-bundle-0: INFINITY
ovndb_servers:1 promotion score on ovn-dbs-bundle-1: 5
ovndb_servers:2 promotion score on ovn-dbs-bundle-2: 5
-pcmk__bundle_allocate: galera-bundle allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle allocation score on controller-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle allocation score on controller-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle allocation score on controller-2: -INFINITY
-pcmk__bundle_allocate: galera-bundle allocation score on database-0: 0
-pcmk__bundle_allocate: galera-bundle allocation score on database-1: 0
-pcmk__bundle_allocate: galera-bundle allocation score on database-2: 0
-pcmk__bundle_allocate: galera-bundle allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: galera-bundle-0 allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-0 allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-0: 0
-pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: galera-bundle-0 allocation score on database-0: 0
-pcmk__bundle_allocate: galera-bundle-0 allocation score on database-1: 0
-pcmk__bundle_allocate: galera-bundle-0 allocation score on database-2: 0
-pcmk__bundle_allocate: galera-bundle-0 allocation score on messaging-0: 0
-pcmk__bundle_allocate: galera-bundle-0 allocation score on messaging-1: 0
-pcmk__bundle_allocate: galera-bundle-0 allocation score on messaging-2: 0
-pcmk__bundle_allocate: galera-bundle-1 allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-1 allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-1: 0
-pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-2: 0
-pcmk__bundle_allocate: galera-bundle-1 allocation score on database-0: 0
-pcmk__bundle_allocate: galera-bundle-1 allocation score on database-1: 0
-pcmk__bundle_allocate: galera-bundle-1 allocation score on database-2: 0
-pcmk__bundle_allocate: galera-bundle-1 allocation score on messaging-0: 0
-pcmk__bundle_allocate: galera-bundle-1 allocation score on messaging-1: 0
-pcmk__bundle_allocate: galera-bundle-1 allocation score on messaging-2: 0
-pcmk__bundle_allocate: galera-bundle-2 allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-2 allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-2: 0
-pcmk__bundle_allocate: galera-bundle-2 allocation score on database-0: 0
-pcmk__bundle_allocate: galera-bundle-2 allocation score on database-1: 0
-pcmk__bundle_allocate: galera-bundle-2 allocation score on database-2: 0
-pcmk__bundle_allocate: galera-bundle-2 allocation score on messaging-0: 0
-pcmk__bundle_allocate: galera-bundle-2 allocation score on messaging-1: 0
-pcmk__bundle_allocate: galera-bundle-2 allocation score on messaging-2: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on compute-0: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on compute-1: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on controller-0: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on controller-1: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on controller-2: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on database-0: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on database-1: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on database-2: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-2: -INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on messaging-0: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on messaging-1: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on messaging-2: 0
-pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on controller-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on controller-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on controller-2: -INFINITY
-pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on database-0: 0
-pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on database-1: 0
-pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on database-2: 0
-pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-podman-0 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on controller-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on controller-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on controller-2: -INFINITY
-pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on database-0: 0
-pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on database-1: 0
-pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on database-2: 0
-pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-podman-1 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on controller-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on controller-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on controller-2: -INFINITY
-pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on database-0: 0
-pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on database-1: 0
-pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on database-2: 0
-pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-podman-2 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: galera:0 allocation score on galera-bundle-0: 501
-pcmk__bundle_allocate: galera:1 allocation score on galera-bundle-1: 501
-pcmk__bundle_allocate: galera:2 allocation score on galera-bundle-2: 501
-pcmk__bundle_allocate: haproxy-bundle allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on controller-0: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on controller-0: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on controller-1: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on controller-1: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on controller-2: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on controller-2: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-0: 0
-pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-0: INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-1: INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on controller-2: INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-0 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-0: INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-1: 0
-pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-1: INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on controller-2: 0
-pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-1 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-1: INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on controller-2: 0
-pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-podman-2 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: openstack-cinder-volume allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: openstack-cinder-volume allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: openstack-cinder-volume allocation score on controller-0: 0
-pcmk__bundle_allocate: openstack-cinder-volume allocation score on controller-1: 0
-pcmk__bundle_allocate: openstack-cinder-volume allocation score on controller-2: 0
-pcmk__bundle_allocate: openstack-cinder-volume allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: openstack-cinder-volume allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: openstack-cinder-volume allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: openstack-cinder-volume allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: openstack-cinder-volume allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: openstack-cinder-volume allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on controller-0: 0
-pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: openstack-cinder-volume-podman-0 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle allocation score on controller-0: 0
-pcmk__bundle_allocate: ovn-dbs-bundle allocation score on controller-1: 0
-pcmk__bundle_allocate: ovn-dbs-bundle allocation score on controller-2: 0
-pcmk__bundle_allocate: ovn-dbs-bundle allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on controller-0: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on database-0: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on database-1: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on database-2: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on messaging-0: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on messaging-1: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-0 allocation score on messaging-2: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on controller-1: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on controller-2: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on database-0: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on database-1: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on database-2: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on messaging-0: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on messaging-1: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-1 allocation score on messaging-2: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on controller-2: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on database-0: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on database-1: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on database-2: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on messaging-0: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on messaging-1: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-2 allocation score on messaging-2: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on compute-0: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on compute-1: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on controller-0: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on controller-1: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on controller-2: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on database-0: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on database-1: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on database-2: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on messaging-0: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on messaging-1: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on messaging-2: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-0: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-1: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-2: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on controller-0: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-0 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on controller-1: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on controller-2: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-1 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on controller-2: 0
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: ovn-dbs-bundle-podman-2 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: ovndb_servers:0 allocation score on ovn-dbs-bundle-0: 501
-pcmk__bundle_allocate: ovndb_servers:1 allocation score on ovn-dbs-bundle-1: 501
-pcmk__bundle_allocate: ovndb_servers:2 allocation score on ovn-dbs-bundle-2: 501
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on messaging-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on messaging-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on messaging-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on database-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on database-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on database-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on messaging-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on messaging-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on messaging-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on database-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on database-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on database-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on messaging-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on messaging-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on messaging-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on database-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on database-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on database-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on messaging-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on messaging-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on messaging-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on compute-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on compute-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on database-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on database-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on database-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on messaging-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on messaging-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on messaging-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on controller-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on controller-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on controller-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on messaging-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on messaging-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-podman-0 allocation score on messaging-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on controller-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on controller-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on controller-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on messaging-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on messaging-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-podman-1 allocation score on messaging-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on controller-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on controller-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on controller-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on messaging-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on messaging-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-podman-2 allocation score on messaging-2: 0
-pcmk__bundle_allocate: rabbitmq:0 allocation score on rabbitmq-bundle-0: 501
-pcmk__bundle_allocate: rabbitmq:1 allocation score on rabbitmq-bundle-1: 501
-pcmk__bundle_allocate: rabbitmq:2 allocation score on rabbitmq-bundle-2: 501
-pcmk__bundle_allocate: redis-bundle allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle allocation score on controller-2: 0
-pcmk__bundle_allocate: redis-bundle allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: redis-bundle allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: redis-bundle-0 allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-0 allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: redis-bundle-0 allocation score on database-0: 0
-pcmk__bundle_allocate: redis-bundle-0 allocation score on database-1: 0
-pcmk__bundle_allocate: redis-bundle-0 allocation score on database-2: 0
-pcmk__bundle_allocate: redis-bundle-0 allocation score on messaging-0: 0
-pcmk__bundle_allocate: redis-bundle-0 allocation score on messaging-1: 0
-pcmk__bundle_allocate: redis-bundle-0 allocation score on messaging-2: 0
-pcmk__bundle_allocate: redis-bundle-1 allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-1 allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-2: 0
-pcmk__bundle_allocate: redis-bundle-1 allocation score on database-0: 0
-pcmk__bundle_allocate: redis-bundle-1 allocation score on database-1: 0
-pcmk__bundle_allocate: redis-bundle-1 allocation score on database-2: 0
-pcmk__bundle_allocate: redis-bundle-1 allocation score on messaging-0: 0
-pcmk__bundle_allocate: redis-bundle-1 allocation score on messaging-1: 0
-pcmk__bundle_allocate: redis-bundle-1 allocation score on messaging-2: 0
-pcmk__bundle_allocate: redis-bundle-2 allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-2 allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-2: 0
-pcmk__bundle_allocate: redis-bundle-2 allocation score on database-0: 0
-pcmk__bundle_allocate: redis-bundle-2 allocation score on database-1: 0
-pcmk__bundle_allocate: redis-bundle-2 allocation score on database-2: 0
-pcmk__bundle_allocate: redis-bundle-2 allocation score on messaging-0: 0
-pcmk__bundle_allocate: redis-bundle-2 allocation score on messaging-1: 0
-pcmk__bundle_allocate: redis-bundle-2 allocation score on messaging-2: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on compute-0: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on compute-1: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on controller-2: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on database-0: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on database-1: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on database-2: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on messaging-0: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on messaging-1: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on messaging-2: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-2: -INFINITY
-pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-podman-0 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on controller-2: 0
-pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-podman-1 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on compute-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on compute-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on controller-2: 0
-pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on database-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on database-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on database-2: -INFINITY
-pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on messaging-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on messaging-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-podman-2 allocation score on messaging-2: -INFINITY
-pcmk__bundle_allocate: redis:0 allocation score on redis-bundle-0: 501
-pcmk__bundle_allocate: redis:1 allocation score on redis-bundle-1: 501
-pcmk__bundle_allocate: redis:2 allocation score on redis-bundle-2: 501
+pcmk__bundle_assign: galera-bundle allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: galera-bundle allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: galera-bundle allocation score on controller-0: -INFINITY
+pcmk__bundle_assign: galera-bundle allocation score on controller-1: -INFINITY
+pcmk__bundle_assign: galera-bundle allocation score on controller-2: -INFINITY
+pcmk__bundle_assign: galera-bundle allocation score on database-0: 0
+pcmk__bundle_assign: galera-bundle allocation score on database-1: 0
+pcmk__bundle_assign: galera-bundle allocation score on database-2: 0
+pcmk__bundle_assign: galera-bundle allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: galera-bundle allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: galera-bundle allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: galera-bundle-0 allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-0 allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-0 allocation score on controller-0: 0
+pcmk__bundle_assign: galera-bundle-0 allocation score on controller-1: 0
+pcmk__bundle_assign: galera-bundle-0 allocation score on controller-2: 0
+pcmk__bundle_assign: galera-bundle-0 allocation score on database-0: 0
+pcmk__bundle_assign: galera-bundle-0 allocation score on database-1: 0
+pcmk__bundle_assign: galera-bundle-0 allocation score on database-2: 0
+pcmk__bundle_assign: galera-bundle-0 allocation score on messaging-0: 0
+pcmk__bundle_assign: galera-bundle-0 allocation score on messaging-1: 0
+pcmk__bundle_assign: galera-bundle-0 allocation score on messaging-2: 0
+pcmk__bundle_assign: galera-bundle-1 allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-1 allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-1 allocation score on controller-0: 0
+pcmk__bundle_assign: galera-bundle-1 allocation score on controller-1: 0
+pcmk__bundle_assign: galera-bundle-1 allocation score on controller-2: 0
+pcmk__bundle_assign: galera-bundle-1 allocation score on database-0: 0
+pcmk__bundle_assign: galera-bundle-1 allocation score on database-1: 0
+pcmk__bundle_assign: galera-bundle-1 allocation score on database-2: 0
+pcmk__bundle_assign: galera-bundle-1 allocation score on messaging-0: 0
+pcmk__bundle_assign: galera-bundle-1 allocation score on messaging-1: 0
+pcmk__bundle_assign: galera-bundle-1 allocation score on messaging-2: 0
+pcmk__bundle_assign: galera-bundle-2 allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-2 allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-2 allocation score on controller-0: 0
+pcmk__bundle_assign: galera-bundle-2 allocation score on controller-1: 0
+pcmk__bundle_assign: galera-bundle-2 allocation score on controller-2: 0
+pcmk__bundle_assign: galera-bundle-2 allocation score on database-0: 0
+pcmk__bundle_assign: galera-bundle-2 allocation score on database-1: 0
+pcmk__bundle_assign: galera-bundle-2 allocation score on database-2: 0
+pcmk__bundle_assign: galera-bundle-2 allocation score on messaging-0: 0
+pcmk__bundle_assign: galera-bundle-2 allocation score on messaging-1: 0
+pcmk__bundle_assign: galera-bundle-2 allocation score on messaging-2: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on compute-0: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on compute-1: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on controller-0: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on controller-1: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on controller-2: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on database-0: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on database-1: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on database-2: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-2: -INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on messaging-0: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on messaging-1: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on messaging-2: 0
+pcmk__bundle_assign: galera-bundle-podman-0 allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-podman-0 allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-podman-0 allocation score on controller-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-podman-0 allocation score on controller-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-podman-0 allocation score on controller-2: -INFINITY
+pcmk__bundle_assign: galera-bundle-podman-0 allocation score on database-0: 0
+pcmk__bundle_assign: galera-bundle-podman-0 allocation score on database-1: 0
+pcmk__bundle_assign: galera-bundle-podman-0 allocation score on database-2: 0
+pcmk__bundle_assign: galera-bundle-podman-0 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-podman-0 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-podman-0 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: galera-bundle-podman-1 allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-podman-1 allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-podman-1 allocation score on controller-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-podman-1 allocation score on controller-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-podman-1 allocation score on controller-2: -INFINITY
+pcmk__bundle_assign: galera-bundle-podman-1 allocation score on database-0: 0
+pcmk__bundle_assign: galera-bundle-podman-1 allocation score on database-1: 0
+pcmk__bundle_assign: galera-bundle-podman-1 allocation score on database-2: 0
+pcmk__bundle_assign: galera-bundle-podman-1 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-podman-1 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-podman-1 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: galera-bundle-podman-2 allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-podman-2 allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-podman-2 allocation score on controller-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-podman-2 allocation score on controller-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-podman-2 allocation score on controller-2: -INFINITY
+pcmk__bundle_assign: galera-bundle-podman-2 allocation score on database-0: 0
+pcmk__bundle_assign: galera-bundle-podman-2 allocation score on database-1: 0
+pcmk__bundle_assign: galera-bundle-podman-2 allocation score on database-2: 0
+pcmk__bundle_assign: galera-bundle-podman-2 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-podman-2 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-podman-2 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: galera:0 allocation score on galera-bundle-0: 501
+pcmk__bundle_assign: galera:1 allocation score on galera-bundle-1: 501
+pcmk__bundle_assign: galera:2 allocation score on galera-bundle-2: 501
+pcmk__bundle_assign: haproxy-bundle allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on controller-0: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on controller-0: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on controller-1: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on controller-1: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on controller-2: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on controller-2: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on database-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on database-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on database-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on database-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on database-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on database-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on controller-0: 0
+pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on controller-0: INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on controller-1: 0
+pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on controller-1: INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on controller-2: 0
+pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on controller-2: INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-0 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on controller-0: 0
+pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on controller-0: INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on controller-1: 0
+pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on controller-1: INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on controller-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on controller-2: 0
+pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-1 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on controller-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on controller-0: 0
+pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on controller-1: 0
+pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on controller-1: INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on controller-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on controller-2: 0
+pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-podman-2 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: openstack-cinder-volume allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: openstack-cinder-volume allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: openstack-cinder-volume allocation score on controller-0: 0
+pcmk__bundle_assign: openstack-cinder-volume allocation score on controller-1: 0
+pcmk__bundle_assign: openstack-cinder-volume allocation score on controller-2: 0
+pcmk__bundle_assign: openstack-cinder-volume allocation score on database-0: -INFINITY
+pcmk__bundle_assign: openstack-cinder-volume allocation score on database-1: -INFINITY
+pcmk__bundle_assign: openstack-cinder-volume allocation score on database-2: -INFINITY
+pcmk__bundle_assign: openstack-cinder-volume allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: openstack-cinder-volume allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: openstack-cinder-volume allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: openstack-cinder-volume-podman-0 allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: openstack-cinder-volume-podman-0 allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: openstack-cinder-volume-podman-0 allocation score on controller-0: 0
+pcmk__bundle_assign: openstack-cinder-volume-podman-0 allocation score on controller-1: 0
+pcmk__bundle_assign: openstack-cinder-volume-podman-0 allocation score on controller-2: 0
+pcmk__bundle_assign: openstack-cinder-volume-podman-0 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: openstack-cinder-volume-podman-0 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: openstack-cinder-volume-podman-0 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: openstack-cinder-volume-podman-0 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: openstack-cinder-volume-podman-0 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: openstack-cinder-volume-podman-0 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle allocation score on controller-0: 0
+pcmk__bundle_assign: ovn-dbs-bundle allocation score on controller-1: 0
+pcmk__bundle_assign: ovn-dbs-bundle allocation score on controller-2: 0
+pcmk__bundle_assign: ovn-dbs-bundle allocation score on database-0: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle allocation score on database-1: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle allocation score on database-2: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-0 allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-0 allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-0 allocation score on controller-0: 0
+pcmk__bundle_assign: ovn-dbs-bundle-0 allocation score on controller-1: 0
+pcmk__bundle_assign: ovn-dbs-bundle-0 allocation score on controller-2: 0
+pcmk__bundle_assign: ovn-dbs-bundle-0 allocation score on database-0: 0
+pcmk__bundle_assign: ovn-dbs-bundle-0 allocation score on database-1: 0
+pcmk__bundle_assign: ovn-dbs-bundle-0 allocation score on database-2: 0
+pcmk__bundle_assign: ovn-dbs-bundle-0 allocation score on messaging-0: 0
+pcmk__bundle_assign: ovn-dbs-bundle-0 allocation score on messaging-1: 0
+pcmk__bundle_assign: ovn-dbs-bundle-0 allocation score on messaging-2: 0
+pcmk__bundle_assign: ovn-dbs-bundle-1 allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-1 allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-1 allocation score on controller-0: 0
+pcmk__bundle_assign: ovn-dbs-bundle-1 allocation score on controller-1: 0
+pcmk__bundle_assign: ovn-dbs-bundle-1 allocation score on controller-2: 0
+pcmk__bundle_assign: ovn-dbs-bundle-1 allocation score on database-0: 0
+pcmk__bundle_assign: ovn-dbs-bundle-1 allocation score on database-1: 0
+pcmk__bundle_assign: ovn-dbs-bundle-1 allocation score on database-2: 0
+pcmk__bundle_assign: ovn-dbs-bundle-1 allocation score on messaging-0: 0
+pcmk__bundle_assign: ovn-dbs-bundle-1 allocation score on messaging-1: 0
+pcmk__bundle_assign: ovn-dbs-bundle-1 allocation score on messaging-2: 0
+pcmk__bundle_assign: ovn-dbs-bundle-2 allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-2 allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-2 allocation score on controller-0: 0
+pcmk__bundle_assign: ovn-dbs-bundle-2 allocation score on controller-1: 0
+pcmk__bundle_assign: ovn-dbs-bundle-2 allocation score on controller-2: 0
+pcmk__bundle_assign: ovn-dbs-bundle-2 allocation score on database-0: 0
+pcmk__bundle_assign: ovn-dbs-bundle-2 allocation score on database-1: 0
+pcmk__bundle_assign: ovn-dbs-bundle-2 allocation score on database-2: 0
+pcmk__bundle_assign: ovn-dbs-bundle-2 allocation score on messaging-0: 0
+pcmk__bundle_assign: ovn-dbs-bundle-2 allocation score on messaging-1: 0
+pcmk__bundle_assign: ovn-dbs-bundle-2 allocation score on messaging-2: 0
+pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on compute-0: 0
+pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on compute-1: 0
+pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on controller-0: 0
+pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on controller-1: 0
+pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on controller-2: 0
+pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on database-0: 0
+pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on database-1: 0
+pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on database-2: 0
+pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on messaging-0: 0
+pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on messaging-1: 0
+pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on messaging-2: 0
+pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-0: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-1: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-master allocation score on ovn-dbs-bundle-2: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-podman-0 allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-podman-0 allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-podman-0 allocation score on controller-0: 0
+pcmk__bundle_assign: ovn-dbs-bundle-podman-0 allocation score on controller-1: 0
+pcmk__bundle_assign: ovn-dbs-bundle-podman-0 allocation score on controller-2: 0
+pcmk__bundle_assign: ovn-dbs-bundle-podman-0 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-podman-0 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-podman-0 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-podman-0 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-podman-0 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-podman-0 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-podman-1 allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-podman-1 allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-podman-1 allocation score on controller-0: 0
+pcmk__bundle_assign: ovn-dbs-bundle-podman-1 allocation score on controller-1: 0
+pcmk__bundle_assign: ovn-dbs-bundle-podman-1 allocation score on controller-2: 0
+pcmk__bundle_assign: ovn-dbs-bundle-podman-1 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-podman-1 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-podman-1 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-podman-1 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-podman-1 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-podman-1 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-podman-2 allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-podman-2 allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-podman-2 allocation score on controller-0: 0
+pcmk__bundle_assign: ovn-dbs-bundle-podman-2 allocation score on controller-1: 0
+pcmk__bundle_assign: ovn-dbs-bundle-podman-2 allocation score on controller-2: 0
+pcmk__bundle_assign: ovn-dbs-bundle-podman-2 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-podman-2 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-podman-2 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-podman-2 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-podman-2 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: ovn-dbs-bundle-podman-2 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: ovndb_servers:0 allocation score on ovn-dbs-bundle-0: 501
+pcmk__bundle_assign: ovndb_servers:1 allocation score on ovn-dbs-bundle-1: 501
+pcmk__bundle_assign: ovndb_servers:2 allocation score on ovn-dbs-bundle-2: 501
+pcmk__bundle_assign: rabbitmq-bundle allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle allocation score on controller-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle allocation score on controller-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle allocation score on controller-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle allocation score on database-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle allocation score on database-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle allocation score on database-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle allocation score on messaging-0: 0
+pcmk__bundle_assign: rabbitmq-bundle allocation score on messaging-1: 0
+pcmk__bundle_assign: rabbitmq-bundle allocation score on messaging-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on database-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on database-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on database-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on messaging-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on messaging-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on messaging-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on database-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on database-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on database-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on messaging-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on messaging-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on messaging-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on database-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on database-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on database-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on messaging-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on messaging-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on messaging-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on compute-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on compute-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on database-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on database-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on database-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on messaging-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on messaging-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on messaging-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-podman-0 allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-podman-0 allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-podman-0 allocation score on controller-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-podman-0 allocation score on controller-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-podman-0 allocation score on controller-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-podman-0 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-podman-0 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-podman-0 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-podman-0 allocation score on messaging-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-podman-0 allocation score on messaging-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-podman-0 allocation score on messaging-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-podman-1 allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-podman-1 allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-podman-1 allocation score on controller-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-podman-1 allocation score on controller-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-podman-1 allocation score on controller-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-podman-1 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-podman-1 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-podman-1 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-podman-1 allocation score on messaging-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-podman-1 allocation score on messaging-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-podman-1 allocation score on messaging-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-podman-2 allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-podman-2 allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-podman-2 allocation score on controller-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-podman-2 allocation score on controller-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-podman-2 allocation score on controller-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-podman-2 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-podman-2 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-podman-2 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-podman-2 allocation score on messaging-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-podman-2 allocation score on messaging-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-podman-2 allocation score on messaging-2: 0
+pcmk__bundle_assign: rabbitmq:0 allocation score on rabbitmq-bundle-0: 501
+pcmk__bundle_assign: rabbitmq:1 allocation score on rabbitmq-bundle-1: 501
+pcmk__bundle_assign: rabbitmq:2 allocation score on rabbitmq-bundle-2: 501
+pcmk__bundle_assign: redis-bundle allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: redis-bundle allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: redis-bundle allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle allocation score on controller-2: 0
+pcmk__bundle_assign: redis-bundle allocation score on database-0: -INFINITY
+pcmk__bundle_assign: redis-bundle allocation score on database-1: -INFINITY
+pcmk__bundle_assign: redis-bundle allocation score on database-2: -INFINITY
+pcmk__bundle_assign: redis-bundle allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: redis-bundle allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: redis-bundle allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: redis-bundle-0 allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-0 allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-0 allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle-0 allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle-0 allocation score on controller-2: 0
+pcmk__bundle_assign: redis-bundle-0 allocation score on database-0: 0
+pcmk__bundle_assign: redis-bundle-0 allocation score on database-1: 0
+pcmk__bundle_assign: redis-bundle-0 allocation score on database-2: 0
+pcmk__bundle_assign: redis-bundle-0 allocation score on messaging-0: 0
+pcmk__bundle_assign: redis-bundle-0 allocation score on messaging-1: 0
+pcmk__bundle_assign: redis-bundle-0 allocation score on messaging-2: 0
+pcmk__bundle_assign: redis-bundle-1 allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-1 allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-1 allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle-1 allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle-1 allocation score on controller-2: 0
+pcmk__bundle_assign: redis-bundle-1 allocation score on database-0: 0
+pcmk__bundle_assign: redis-bundle-1 allocation score on database-1: 0
+pcmk__bundle_assign: redis-bundle-1 allocation score on database-2: 0
+pcmk__bundle_assign: redis-bundle-1 allocation score on messaging-0: 0
+pcmk__bundle_assign: redis-bundle-1 allocation score on messaging-1: 0
+pcmk__bundle_assign: redis-bundle-1 allocation score on messaging-2: 0
+pcmk__bundle_assign: redis-bundle-2 allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-2 allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-2 allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle-2 allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle-2 allocation score on controller-2: 0
+pcmk__bundle_assign: redis-bundle-2 allocation score on database-0: 0
+pcmk__bundle_assign: redis-bundle-2 allocation score on database-1: 0
+pcmk__bundle_assign: redis-bundle-2 allocation score on database-2: 0
+pcmk__bundle_assign: redis-bundle-2 allocation score on messaging-0: 0
+pcmk__bundle_assign: redis-bundle-2 allocation score on messaging-1: 0
+pcmk__bundle_assign: redis-bundle-2 allocation score on messaging-2: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on compute-0: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on compute-1: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on controller-2: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on database-0: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on database-1: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on database-2: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on messaging-0: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on messaging-1: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on messaging-2: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-2: -INFINITY
+pcmk__bundle_assign: redis-bundle-podman-0 allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-podman-0 allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-podman-0 allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle-podman-0 allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle-podman-0 allocation score on controller-2: 0
+pcmk__bundle_assign: redis-bundle-podman-0 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-podman-0 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-podman-0 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: redis-bundle-podman-0 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-podman-0 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-podman-0 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: redis-bundle-podman-1 allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-podman-1 allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-podman-1 allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle-podman-1 allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle-podman-1 allocation score on controller-2: 0
+pcmk__bundle_assign: redis-bundle-podman-1 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-podman-1 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-podman-1 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: redis-bundle-podman-1 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-podman-1 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-podman-1 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: redis-bundle-podman-2 allocation score on compute-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-podman-2 allocation score on compute-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-podman-2 allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle-podman-2 allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle-podman-2 allocation score on controller-2: 0
+pcmk__bundle_assign: redis-bundle-podman-2 allocation score on database-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-podman-2 allocation score on database-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-podman-2 allocation score on database-2: -INFINITY
+pcmk__bundle_assign: redis-bundle-podman-2 allocation score on messaging-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-podman-2 allocation score on messaging-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-podman-2 allocation score on messaging-2: -INFINITY
+pcmk__bundle_assign: redis:0 allocation score on redis-bundle-0: 501
+pcmk__bundle_assign: redis:1 allocation score on redis-bundle-1: 501
+pcmk__bundle_assign: redis:2 allocation score on redis-bundle-2: 501
pcmk__clone_assign: compute-unfence-trigger-clone allocation score on compute-0: 0
pcmk__clone_assign: compute-unfence-trigger-clone allocation score on compute-1: 0
pcmk__clone_assign: compute-unfence-trigger-clone allocation score on controller-0: -INFINITY
diff --git a/cts/scheduler/scores/remote-fence-unclean-3.scores b/cts/scheduler/scores/remote-fence-unclean-3.scores
index dc157a3..b3bcf0c 100644
--- a/cts/scheduler/scores/remote-fence-unclean-3.scores
+++ b/cts/scheduler/scores/remote-fence-unclean-3.scores
@@ -2,177 +2,177 @@
galera:0 promotion score on galera-bundle-0: 100
galera:1 promotion score on galera-bundle-1: 100
galera:2 promotion score on galera-bundle-2: 100
-pcmk__bundle_allocate: galera-bundle allocation score on overcloud-controller-0: 0
-pcmk__bundle_allocate: galera-bundle allocation score on overcloud-controller-1: 0
-pcmk__bundle_allocate: galera-bundle allocation score on overcloud-controller-2: 0
-pcmk__bundle_allocate: galera-bundle allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-0 allocation score on overcloud-controller-0: INFINITY
-pcmk__bundle_allocate: galera-bundle-0 allocation score on overcloud-controller-1: 0
-pcmk__bundle_allocate: galera-bundle-0 allocation score on overcloud-controller-2: 0
-pcmk__bundle_allocate: galera-bundle-0 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-1 allocation score on overcloud-controller-0: 0
-pcmk__bundle_allocate: galera-bundle-1 allocation score on overcloud-controller-1: INFINITY
-pcmk__bundle_allocate: galera-bundle-1 allocation score on overcloud-controller-2: 0
-pcmk__bundle_allocate: galera-bundle-1 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-2 allocation score on overcloud-controller-0: 0
-pcmk__bundle_allocate: galera-bundle-2 allocation score on overcloud-controller-1: 0
-pcmk__bundle_allocate: galera-bundle-2 allocation score on overcloud-controller-2: INFINITY
-pcmk__bundle_allocate: galera-bundle-2 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on overcloud-controller-0: INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on overcloud-controller-1: 0
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on overcloud-controller-2: 0
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on overcloud-controller-0: 0
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on overcloud-controller-1: INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on overcloud-controller-2: 0
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on overcloud-controller-0: 0
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on overcloud-controller-1: 0
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on overcloud-controller-2: INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-2: -INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on overcloud-controller-0: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on overcloud-controller-1: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on overcloud-controller-2: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on overcloud-novacompute-0: 0
-pcmk__bundle_allocate: galera:0 allocation score on galera-bundle-0: INFINITY
-pcmk__bundle_allocate: galera:1 allocation score on galera-bundle-1: INFINITY
-pcmk__bundle_allocate: galera:2 allocation score on galera-bundle-2: INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on overcloud-controller-0: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on overcloud-controller-0: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on overcloud-controller-1: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on overcloud-controller-1: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on overcloud-controller-2: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on overcloud-controller-2: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on overcloud-controller-0: INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on overcloud-controller-0: INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on overcloud-controller-1: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on overcloud-controller-1: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on overcloud-controller-2: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on overcloud-controller-2: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on overcloud-controller-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on overcloud-controller-0: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on overcloud-controller-1: INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on overcloud-controller-1: INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on overcloud-controller-2: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on overcloud-controller-2: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on overcloud-controller-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on overcloud-controller-0: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on overcloud-controller-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on overcloud-controller-1: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on overcloud-controller-2: INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on overcloud-controller-2: INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: openstack-cinder-backup allocation score on overcloud-controller-0: 0
-pcmk__bundle_allocate: openstack-cinder-backup allocation score on overcloud-controller-1: 0
-pcmk__bundle_allocate: openstack-cinder-backup allocation score on overcloud-controller-2: 0
-pcmk__bundle_allocate: openstack-cinder-backup allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: openstack-cinder-backup-docker-0 allocation score on overcloud-controller-0: 0
-pcmk__bundle_allocate: openstack-cinder-backup-docker-0 allocation score on overcloud-controller-1: INFINITY
-pcmk__bundle_allocate: openstack-cinder-backup-docker-0 allocation score on overcloud-controller-2: 0
-pcmk__bundle_allocate: openstack-cinder-backup-docker-0 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: openstack-cinder-volume allocation score on overcloud-controller-0: 0
-pcmk__bundle_allocate: openstack-cinder-volume allocation score on overcloud-controller-1: 0
-pcmk__bundle_allocate: openstack-cinder-volume allocation score on overcloud-controller-2: 0
-pcmk__bundle_allocate: openstack-cinder-volume allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: openstack-cinder-volume-docker-0 allocation score on overcloud-controller-0: INFINITY
-pcmk__bundle_allocate: openstack-cinder-volume-docker-0 allocation score on overcloud-controller-1: 0
-pcmk__bundle_allocate: openstack-cinder-volume-docker-0 allocation score on overcloud-controller-2: 0
-pcmk__bundle_allocate: openstack-cinder-volume-docker-0 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on overcloud-controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on overcloud-controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on overcloud-controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on overcloud-controller-0: INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on overcloud-controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on overcloud-controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on rabbitmq-bundle-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on rabbitmq-bundle-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on overcloud-controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on overcloud-controller-1: INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on overcloud-controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on rabbitmq-bundle-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on overcloud-controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on overcloud-controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on overcloud-controller-2: INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on overcloud-controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on overcloud-controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on overcloud-controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on overcloud-novacompute-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on overcloud-controller-0: INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on overcloud-controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on overcloud-controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on rabbitmq-bundle-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on rabbitmq-bundle-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on overcloud-controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on overcloud-controller-1: INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on overcloud-controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on rabbitmq-bundle-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on rabbitmq-bundle-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on overcloud-controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on overcloud-controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on overcloud-controller-2: INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on rabbitmq-bundle-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq:0 allocation score on rabbitmq-bundle-0: INFINITY
-pcmk__bundle_allocate: rabbitmq:1 allocation score on rabbitmq-bundle-1: INFINITY
-pcmk__bundle_allocate: rabbitmq:2 allocation score on rabbitmq-bundle-2: INFINITY
-pcmk__bundle_allocate: redis-bundle allocation score on overcloud-controller-0: 0
-pcmk__bundle_allocate: redis-bundle allocation score on overcloud-controller-1: 0
-pcmk__bundle_allocate: redis-bundle allocation score on overcloud-controller-2: 0
-pcmk__bundle_allocate: redis-bundle allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-0 allocation score on overcloud-controller-0: INFINITY
-pcmk__bundle_allocate: redis-bundle-0 allocation score on overcloud-controller-1: 0
-pcmk__bundle_allocate: redis-bundle-0 allocation score on overcloud-controller-2: 0
-pcmk__bundle_allocate: redis-bundle-0 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-1 allocation score on overcloud-controller-0: 0
-pcmk__bundle_allocate: redis-bundle-1 allocation score on overcloud-controller-1: INFINITY
-pcmk__bundle_allocate: redis-bundle-1 allocation score on overcloud-controller-2: 0
-pcmk__bundle_allocate: redis-bundle-1 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-2 allocation score on overcloud-controller-0: 0
-pcmk__bundle_allocate: redis-bundle-2 allocation score on overcloud-controller-1: 0
-pcmk__bundle_allocate: redis-bundle-2 allocation score on overcloud-controller-2: INFINITY
-pcmk__bundle_allocate: redis-bundle-2 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on overcloud-controller-0: INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on overcloud-controller-1: 0
-pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on overcloud-controller-2: 0
-pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on overcloud-controller-0: 0
-pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on overcloud-controller-1: INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on overcloud-controller-2: 0
-pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on overcloud-controller-0: 0
-pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on overcloud-controller-1: 0
-pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on overcloud-controller-2: INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-master allocation score on overcloud-controller-0: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on overcloud-controller-1: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on overcloud-controller-2: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on overcloud-novacompute-0: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-2: -INFINITY
-pcmk__bundle_allocate: redis:0 allocation score on redis-bundle-0: INFINITY
-pcmk__bundle_allocate: redis:1 allocation score on redis-bundle-1: INFINITY
-pcmk__bundle_allocate: redis:2 allocation score on redis-bundle-2: INFINITY
+pcmk__bundle_assign: galera-bundle allocation score on overcloud-controller-0: 0
+pcmk__bundle_assign: galera-bundle allocation score on overcloud-controller-1: 0
+pcmk__bundle_assign: galera-bundle allocation score on overcloud-controller-2: 0
+pcmk__bundle_assign: galera-bundle allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-0 allocation score on overcloud-controller-0: INFINITY
+pcmk__bundle_assign: galera-bundle-0 allocation score on overcloud-controller-1: 0
+pcmk__bundle_assign: galera-bundle-0 allocation score on overcloud-controller-2: 0
+pcmk__bundle_assign: galera-bundle-0 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-1 allocation score on overcloud-controller-0: 0
+pcmk__bundle_assign: galera-bundle-1 allocation score on overcloud-controller-1: INFINITY
+pcmk__bundle_assign: galera-bundle-1 allocation score on overcloud-controller-2: 0
+pcmk__bundle_assign: galera-bundle-1 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-2 allocation score on overcloud-controller-0: 0
+pcmk__bundle_assign: galera-bundle-2 allocation score on overcloud-controller-1: 0
+pcmk__bundle_assign: galera-bundle-2 allocation score on overcloud-controller-2: INFINITY
+pcmk__bundle_assign: galera-bundle-2 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on overcloud-controller-0: INFINITY
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on overcloud-controller-1: 0
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on overcloud-controller-2: 0
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on overcloud-controller-0: 0
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on overcloud-controller-1: INFINITY
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on overcloud-controller-2: 0
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on overcloud-controller-0: 0
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on overcloud-controller-1: 0
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on overcloud-controller-2: INFINITY
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-2: -INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on overcloud-controller-0: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on overcloud-controller-1: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on overcloud-controller-2: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on overcloud-novacompute-0: 0
+pcmk__bundle_assign: galera:0 allocation score on galera-bundle-0: INFINITY
+pcmk__bundle_assign: galera:1 allocation score on galera-bundle-1: INFINITY
+pcmk__bundle_assign: galera:2 allocation score on galera-bundle-2: INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on overcloud-controller-0: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on overcloud-controller-0: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on overcloud-controller-1: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on overcloud-controller-1: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on overcloud-controller-2: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on overcloud-controller-2: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on overcloud-controller-0: INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on overcloud-controller-0: INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on overcloud-controller-1: 0
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on overcloud-controller-1: INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on overcloud-controller-2: 0
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on overcloud-controller-2: INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on overcloud-controller-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on overcloud-controller-0: 0
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on overcloud-controller-1: INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on overcloud-controller-1: INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on overcloud-controller-2: 0
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on overcloud-controller-2: INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on overcloud-controller-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on overcloud-controller-0: 0
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on overcloud-controller-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on overcloud-controller-1: 0
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on overcloud-controller-2: INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on overcloud-controller-2: INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: openstack-cinder-backup allocation score on overcloud-controller-0: 0
+pcmk__bundle_assign: openstack-cinder-backup allocation score on overcloud-controller-1: 0
+pcmk__bundle_assign: openstack-cinder-backup allocation score on overcloud-controller-2: 0
+pcmk__bundle_assign: openstack-cinder-backup allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: openstack-cinder-backup-docker-0 allocation score on overcloud-controller-0: 0
+pcmk__bundle_assign: openstack-cinder-backup-docker-0 allocation score on overcloud-controller-1: INFINITY
+pcmk__bundle_assign: openstack-cinder-backup-docker-0 allocation score on overcloud-controller-2: 0
+pcmk__bundle_assign: openstack-cinder-backup-docker-0 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: openstack-cinder-volume allocation score on overcloud-controller-0: 0
+pcmk__bundle_assign: openstack-cinder-volume allocation score on overcloud-controller-1: 0
+pcmk__bundle_assign: openstack-cinder-volume allocation score on overcloud-controller-2: 0
+pcmk__bundle_assign: openstack-cinder-volume allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: openstack-cinder-volume-docker-0 allocation score on overcloud-controller-0: INFINITY
+pcmk__bundle_assign: openstack-cinder-volume-docker-0 allocation score on overcloud-controller-1: 0
+pcmk__bundle_assign: openstack-cinder-volume-docker-0 allocation score on overcloud-controller-2: 0
+pcmk__bundle_assign: openstack-cinder-volume-docker-0 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle allocation score on overcloud-controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle allocation score on overcloud-controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle allocation score on overcloud-controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on overcloud-controller-0: INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on overcloud-controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on overcloud-controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on rabbitmq-bundle-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on rabbitmq-bundle-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on overcloud-controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on overcloud-controller-1: INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on overcloud-controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on rabbitmq-bundle-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on overcloud-controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on overcloud-controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on overcloud-controller-2: INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on overcloud-controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on overcloud-controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on overcloud-controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on overcloud-novacompute-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on overcloud-controller-0: INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on overcloud-controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on overcloud-controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on rabbitmq-bundle-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on rabbitmq-bundle-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on rabbitmq-bundle-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on overcloud-controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on overcloud-controller-1: INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on overcloud-controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on rabbitmq-bundle-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on rabbitmq-bundle-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on overcloud-controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on overcloud-controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on overcloud-controller-2: INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on rabbitmq-bundle-2: -INFINITY
+pcmk__bundle_assign: rabbitmq:0 allocation score on rabbitmq-bundle-0: INFINITY
+pcmk__bundle_assign: rabbitmq:1 allocation score on rabbitmq-bundle-1: INFINITY
+pcmk__bundle_assign: rabbitmq:2 allocation score on rabbitmq-bundle-2: INFINITY
+pcmk__bundle_assign: redis-bundle allocation score on overcloud-controller-0: 0
+pcmk__bundle_assign: redis-bundle allocation score on overcloud-controller-1: 0
+pcmk__bundle_assign: redis-bundle allocation score on overcloud-controller-2: 0
+pcmk__bundle_assign: redis-bundle allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-0 allocation score on overcloud-controller-0: INFINITY
+pcmk__bundle_assign: redis-bundle-0 allocation score on overcloud-controller-1: 0
+pcmk__bundle_assign: redis-bundle-0 allocation score on overcloud-controller-2: 0
+pcmk__bundle_assign: redis-bundle-0 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-1 allocation score on overcloud-controller-0: 0
+pcmk__bundle_assign: redis-bundle-1 allocation score on overcloud-controller-1: INFINITY
+pcmk__bundle_assign: redis-bundle-1 allocation score on overcloud-controller-2: 0
+pcmk__bundle_assign: redis-bundle-1 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-2 allocation score on overcloud-controller-0: 0
+pcmk__bundle_assign: redis-bundle-2 allocation score on overcloud-controller-1: 0
+pcmk__bundle_assign: redis-bundle-2 allocation score on overcloud-controller-2: INFINITY
+pcmk__bundle_assign: redis-bundle-2 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-docker-0 allocation score on overcloud-controller-0: INFINITY
+pcmk__bundle_assign: redis-bundle-docker-0 allocation score on overcloud-controller-1: 0
+pcmk__bundle_assign: redis-bundle-docker-0 allocation score on overcloud-controller-2: 0
+pcmk__bundle_assign: redis-bundle-docker-0 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-docker-1 allocation score on overcloud-controller-0: 0
+pcmk__bundle_assign: redis-bundle-docker-1 allocation score on overcloud-controller-1: INFINITY
+pcmk__bundle_assign: redis-bundle-docker-1 allocation score on overcloud-controller-2: 0
+pcmk__bundle_assign: redis-bundle-docker-1 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-docker-2 allocation score on overcloud-controller-0: 0
+pcmk__bundle_assign: redis-bundle-docker-2 allocation score on overcloud-controller-1: 0
+pcmk__bundle_assign: redis-bundle-docker-2 allocation score on overcloud-controller-2: INFINITY
+pcmk__bundle_assign: redis-bundle-docker-2 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-master allocation score on overcloud-controller-0: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on overcloud-controller-1: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on overcloud-controller-2: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on overcloud-novacompute-0: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-2: -INFINITY
+pcmk__bundle_assign: redis:0 allocation score on redis-bundle-0: INFINITY
+pcmk__bundle_assign: redis:1 allocation score on redis-bundle-1: INFINITY
+pcmk__bundle_assign: redis:2 allocation score on redis-bundle-2: INFINITY
pcmk__clone_assign: galera-bundle-master allocation score on galera-bundle-0: 0
pcmk__clone_assign: galera-bundle-master allocation score on galera-bundle-1: 0
pcmk__clone_assign: galera-bundle-master allocation score on galera-bundle-2: 0
@@ -238,12 +238,12 @@ pcmk__primitive_assign: galera:0 allocation score on galera-bundle-0: INFINITY
pcmk__primitive_assign: galera:1 allocation score on galera-bundle-1: INFINITY
pcmk__primitive_assign: galera:2 allocation score on galera-bundle-2: INFINITY
pcmk__primitive_assign: haproxy-bundle-docker-0 allocation score on overcloud-controller-0: INFINITY
-pcmk__primitive_assign: haproxy-bundle-docker-0 allocation score on overcloud-controller-1: 0
-pcmk__primitive_assign: haproxy-bundle-docker-0 allocation score on overcloud-controller-2: 0
+pcmk__primitive_assign: haproxy-bundle-docker-0 allocation score on overcloud-controller-1: INFINITY
+pcmk__primitive_assign: haproxy-bundle-docker-0 allocation score on overcloud-controller-2: INFINITY
pcmk__primitive_assign: haproxy-bundle-docker-0 allocation score on overcloud-novacompute-0: -INFINITY
pcmk__primitive_assign: haproxy-bundle-docker-1 allocation score on overcloud-controller-0: -INFINITY
pcmk__primitive_assign: haproxy-bundle-docker-1 allocation score on overcloud-controller-1: INFINITY
-pcmk__primitive_assign: haproxy-bundle-docker-1 allocation score on overcloud-controller-2: 0
+pcmk__primitive_assign: haproxy-bundle-docker-1 allocation score on overcloud-controller-2: INFINITY
pcmk__primitive_assign: haproxy-bundle-docker-1 allocation score on overcloud-novacompute-0: -INFINITY
pcmk__primitive_assign: haproxy-bundle-docker-2 allocation score on overcloud-controller-0: -INFINITY
pcmk__primitive_assign: haproxy-bundle-docker-2 allocation score on overcloud-controller-1: -INFINITY
diff --git a/cts/scheduler/scores/route-remote-notify.scores b/cts/scheduler/scores/route-remote-notify.scores
index e6fc549..e25fe64 100644
--- a/cts/scheduler/scores/route-remote-notify.scores
+++ b/cts/scheduler/scores/route-remote-notify.scores
@@ -1,64 +1,64 @@
-pcmk__bundle_allocate: haproxy-bundle allocation score on controller-0: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on controller-0: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on controller-1: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on controller-1: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on controller-2: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on controller-2: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-0: INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-1: INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-1: INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-2: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-2: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-2: INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-2: INFINITY
-pcmk__bundle_allocate: openstack-cinder-volume allocation score on controller-0: 0
-pcmk__bundle_allocate: openstack-cinder-volume allocation score on controller-1: 0
-pcmk__bundle_allocate: openstack-cinder-volume allocation score on controller-2: 0
-pcmk__bundle_allocate: openstack-cinder-volume-docker-0 allocation score on controller-0: INFINITY
-pcmk__bundle_allocate: openstack-cinder-volume-docker-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: openstack-cinder-volume-docker-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-0: INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-1: INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-2: INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on controller-0: INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on controller-1: INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on controller-2: INFINITY
-pcmk__bundle_allocate: rabbitmq:0 allocation score on rabbitmq-bundle-0: INFINITY
-pcmk__bundle_allocate: rabbitmq:1 allocation score on rabbitmq-bundle-1: INFINITY
-pcmk__bundle_allocate: rabbitmq:2 allocation score on rabbitmq-bundle-2: INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on controller-0: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on controller-0: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on controller-1: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on controller-1: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on controller-2: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on controller-2: 0
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-0: INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-1: 0
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-2: 0
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-0: 0
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-1: INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-1: INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-2: 0
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-2: INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-0: 0
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-1: 0
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-2: INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-2: INFINITY
+pcmk__bundle_assign: openstack-cinder-volume allocation score on controller-0: 0
+pcmk__bundle_assign: openstack-cinder-volume allocation score on controller-1: 0
+pcmk__bundle_assign: openstack-cinder-volume allocation score on controller-2: 0
+pcmk__bundle_assign: openstack-cinder-volume-docker-0 allocation score on controller-0: INFINITY
+pcmk__bundle_assign: openstack-cinder-volume-docker-0 allocation score on controller-1: 0
+pcmk__bundle_assign: openstack-cinder-volume-docker-0 allocation score on controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle allocation score on controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle allocation score on controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle allocation score on controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on controller-0: INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on controller-1: INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on controller-2: INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on controller-0: INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on controller-1: INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on controller-2: INFINITY
+pcmk__bundle_assign: rabbitmq:0 allocation score on rabbitmq-bundle-0: INFINITY
+pcmk__bundle_assign: rabbitmq:1 allocation score on rabbitmq-bundle-1: INFINITY
+pcmk__bundle_assign: rabbitmq:2 allocation score on rabbitmq-bundle-2: INFINITY
pcmk__clone_assign: rabbitmq-bundle-clone allocation score on controller-0: -INFINITY
pcmk__clone_assign: rabbitmq-bundle-clone allocation score on controller-1: -INFINITY
pcmk__clone_assign: rabbitmq-bundle-clone allocation score on controller-2: -INFINITY
@@ -73,7 +73,7 @@ pcmk__primitive_assign: haproxy-bundle-docker-0 allocation score on controller-1
pcmk__primitive_assign: haproxy-bundle-docker-0 allocation score on controller-2: -INFINITY
pcmk__primitive_assign: haproxy-bundle-docker-1 allocation score on controller-0: -INFINITY
pcmk__primitive_assign: haproxy-bundle-docker-1 allocation score on controller-1: INFINITY
-pcmk__primitive_assign: haproxy-bundle-docker-1 allocation score on controller-2: 0
+pcmk__primitive_assign: haproxy-bundle-docker-1 allocation score on controller-2: INFINITY
pcmk__primitive_assign: haproxy-bundle-docker-2 allocation score on controller-0: -INFINITY
pcmk__primitive_assign: haproxy-bundle-docker-2 allocation score on controller-1: -INFINITY
pcmk__primitive_assign: haproxy-bundle-docker-2 allocation score on controller-2: INFINITY
diff --git a/cts/scheduler/scores/rsc-sets-clone-1.scores b/cts/scheduler/scores/rsc-sets-clone-1.scores
index 8552073..4dc2187 100644
--- a/cts/scheduler/scores/rsc-sets-clone-1.scores
+++ b/cts/scheduler/scores/rsc-sets-clone-1.scores
@@ -82,23 +82,23 @@ pcmk__group_assign: vg2:1 allocation score on sys3: 0
pcmk__primitive_assign: clvmd:0 allocation score on sys2: INFINITY
pcmk__primitive_assign: clvmd:0 allocation score on sys3: -INFINITY
pcmk__primitive_assign: clvmd:1 allocation score on sys2: -INFINITY
-pcmk__primitive_assign: clvmd:1 allocation score on sys3: 0
+pcmk__primitive_assign: clvmd:1 allocation score on sys3: 10000
pcmk__primitive_assign: controld:0 allocation score on sys2: INFINITY
-pcmk__primitive_assign: controld:0 allocation score on sys3: 0
+pcmk__primitive_assign: controld:0 allocation score on sys3: 10000
pcmk__primitive_assign: controld:1 allocation score on sys2: -INFINITY
-pcmk__primitive_assign: controld:1 allocation score on sys3: 0
+pcmk__primitive_assign: controld:1 allocation score on sys3: 10000
pcmk__primitive_assign: fs2:0 allocation score on sys2: INFINITY
pcmk__primitive_assign: fs2:0 allocation score on sys3: -INFINITY
pcmk__primitive_assign: fs2:1 allocation score on sys2: -INFINITY
-pcmk__primitive_assign: fs2:1 allocation score on sys3: 0
+pcmk__primitive_assign: fs2:1 allocation score on sys3: 10000
pcmk__primitive_assign: iscsi1:0 allocation score on sys2: INFINITY
pcmk__primitive_assign: iscsi1:0 allocation score on sys3: -INFINITY
pcmk__primitive_assign: iscsi1:1 allocation score on sys2: -INFINITY
-pcmk__primitive_assign: iscsi1:1 allocation score on sys3: 0
+pcmk__primitive_assign: iscsi1:1 allocation score on sys3: 10000
pcmk__primitive_assign: iscsi2:0 allocation score on sys2: INFINITY
pcmk__primitive_assign: iscsi2:0 allocation score on sys3: -INFINITY
pcmk__primitive_assign: iscsi2:1 allocation score on sys2: -INFINITY
-pcmk__primitive_assign: iscsi2:1 allocation score on sys3: 0
+pcmk__primitive_assign: iscsi2:1 allocation score on sys3: 10000
pcmk__primitive_assign: nfs1:0 allocation score on sys2: -INFINITY
pcmk__primitive_assign: nfs1:0 allocation score on sys3: -INFINITY
pcmk__primitive_assign: nfs1:1 allocation score on sys2: -INFINITY
@@ -106,7 +106,7 @@ pcmk__primitive_assign: nfs1:1 allocation score on sys3: -INFINITY
pcmk__primitive_assign: o2cb:0 allocation score on sys2: INFINITY
pcmk__primitive_assign: o2cb:0 allocation score on sys3: -INFINITY
pcmk__primitive_assign: o2cb:1 allocation score on sys2: -INFINITY
-pcmk__primitive_assign: o2cb:1 allocation score on sys3: 0
+pcmk__primitive_assign: o2cb:1 allocation score on sys3: 10000
pcmk__primitive_assign: stonithsys2 allocation score on sys2: -INFINITY
pcmk__primitive_assign: stonithsys2 allocation score on sys3: 0
pcmk__primitive_assign: stonithsys3 allocation score on sys2: INFINITY
@@ -114,11 +114,11 @@ pcmk__primitive_assign: stonithsys3 allocation score on sys3: -INFINITY
pcmk__primitive_assign: vg1:0 allocation score on sys2: INFINITY
pcmk__primitive_assign: vg1:0 allocation score on sys3: -INFINITY
pcmk__primitive_assign: vg1:1 allocation score on sys2: -INFINITY
-pcmk__primitive_assign: vg1:1 allocation score on sys3: 0
+pcmk__primitive_assign: vg1:1 allocation score on sys3: 10000
pcmk__primitive_assign: vg2:0 allocation score on sys2: INFINITY
pcmk__primitive_assign: vg2:0 allocation score on sys3: -INFINITY
pcmk__primitive_assign: vg2:1 allocation score on sys2: -INFINITY
-pcmk__primitive_assign: vg2:1 allocation score on sys3: 0
+pcmk__primitive_assign: vg2:1 allocation score on sys3: 10000
pcmk__primitive_assign: vm1 allocation score on sys2: INFINITY
pcmk__primitive_assign: vm1 allocation score on sys3: 0
pcmk__primitive_assign: vm2 allocation score on sys2: -INFINITY
diff --git a/cts/scheduler/scores/start-then-stop-with-unfence.scores b/cts/scheduler/scores/start-then-stop-with-unfence.scores
index 5cc77e5..d8cd4ac 100644
--- a/cts/scheduler/scores/start-then-stop-with-unfence.scores
+++ b/cts/scheduler/scores/start-then-stop-with-unfence.scores
@@ -9,9 +9,9 @@ pcmk__primitive_assign: ip1 allocation score on rhel7-node1.example.com: 500
pcmk__primitive_assign: ip1 allocation score on rhel7-node2.example.com: 0
pcmk__primitive_assign: ip2 allocation score on rhel7-node1.example.com: 0
pcmk__primitive_assign: ip2 allocation score on rhel7-node2.example.com: 500
-pcmk__primitive_assign: jrummy:0 allocation score on rhel7-node1.example.com: 0
-pcmk__primitive_assign: jrummy:0 allocation score on rhel7-node2.example.com: 1
-pcmk__primitive_assign: jrummy:1 allocation score on rhel7-node1.example.com: 0
+pcmk__primitive_assign: jrummy:0 allocation score on rhel7-node1.example.com: 500
+pcmk__primitive_assign: jrummy:0 allocation score on rhel7-node2.example.com: 501
+pcmk__primitive_assign: jrummy:1 allocation score on rhel7-node1.example.com: 500
pcmk__primitive_assign: jrummy:1 allocation score on rhel7-node2.example.com: -INFINITY
pcmk__primitive_assign: mpath-node1 allocation score on rhel7-node1.example.com: 0
pcmk__primitive_assign: mpath-node1 allocation score on rhel7-node2.example.com: 0
diff --git a/cts/scheduler/scores/stop-all-resources.scores b/cts/scheduler/scores/stop-all-resources.scores
index 119ac99..d471564 100644
--- a/cts/scheduler/scores/stop-all-resources.scores
+++ b/cts/scheduler/scores/stop-all-resources.scores
@@ -1,32 +1,32 @@
-pcmk__bundle_allocate: httpd-bundle allocation score on cluster01: 0
-pcmk__bundle_allocate: httpd-bundle allocation score on cluster02: 0
-pcmk__bundle_allocate: httpd-bundle-0 allocation score on cluster01: 0
-pcmk__bundle_allocate: httpd-bundle-0 allocation score on cluster02: 0
-pcmk__bundle_allocate: httpd-bundle-1 allocation score on cluster01: 0
-pcmk__bundle_allocate: httpd-bundle-1 allocation score on cluster02: 0
-pcmk__bundle_allocate: httpd-bundle-2 allocation score on cluster01: 0
-pcmk__bundle_allocate: httpd-bundle-2 allocation score on cluster02: 0
-pcmk__bundle_allocate: httpd-bundle-clone allocation score on cluster01: 0
-pcmk__bundle_allocate: httpd-bundle-clone allocation score on cluster02: 0
-pcmk__bundle_allocate: httpd-bundle-clone allocation score on httpd-bundle-0: -INFINITY
-pcmk__bundle_allocate: httpd-bundle-clone allocation score on httpd-bundle-1: -INFINITY
-pcmk__bundle_allocate: httpd-bundle-clone allocation score on httpd-bundle-2: -INFINITY
-pcmk__bundle_allocate: httpd-bundle-docker-0 allocation score on cluster01: 0
-pcmk__bundle_allocate: httpd-bundle-docker-0 allocation score on cluster02: 0
-pcmk__bundle_allocate: httpd-bundle-docker-1 allocation score on cluster01: 0
-pcmk__bundle_allocate: httpd-bundle-docker-1 allocation score on cluster02: 0
-pcmk__bundle_allocate: httpd-bundle-docker-2 allocation score on cluster01: 0
-pcmk__bundle_allocate: httpd-bundle-docker-2 allocation score on cluster02: 0
-pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.131 allocation score on cluster01: 0
-pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.131 allocation score on cluster02: 0
-pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.132 allocation score on cluster01: 0
-pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.132 allocation score on cluster02: 0
-pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.133 allocation score on cluster01: 0
-pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.133 allocation score on cluster02: 0
-pcmk__bundle_allocate: httpd:0 allocation score on httpd-bundle-0: 500
-pcmk__bundle_allocate: httpd:1 allocation score on httpd-bundle-1: 500
-pcmk__bundle_allocate: httpd:2 allocation score on httpd-bundle-2: 500
+pcmk__bundle_assign: httpd-bundle allocation score on cluster01: 0
+pcmk__bundle_assign: httpd-bundle allocation score on cluster02: 0
+pcmk__bundle_assign: httpd-bundle-0 allocation score on cluster01: 0
+pcmk__bundle_assign: httpd-bundle-0 allocation score on cluster02: 0
+pcmk__bundle_assign: httpd-bundle-1 allocation score on cluster01: 0
+pcmk__bundle_assign: httpd-bundle-1 allocation score on cluster02: 0
+pcmk__bundle_assign: httpd-bundle-2 allocation score on cluster01: 0
+pcmk__bundle_assign: httpd-bundle-2 allocation score on cluster02: 0
+pcmk__bundle_assign: httpd-bundle-clone allocation score on cluster01: 0
+pcmk__bundle_assign: httpd-bundle-clone allocation score on cluster02: 0
+pcmk__bundle_assign: httpd-bundle-clone allocation score on httpd-bundle-0: -INFINITY
+pcmk__bundle_assign: httpd-bundle-clone allocation score on httpd-bundle-1: -INFINITY
+pcmk__bundle_assign: httpd-bundle-clone allocation score on httpd-bundle-2: -INFINITY
+pcmk__bundle_assign: httpd-bundle-docker-0 allocation score on cluster01: 0
+pcmk__bundle_assign: httpd-bundle-docker-0 allocation score on cluster02: 0
+pcmk__bundle_assign: httpd-bundle-docker-1 allocation score on cluster01: 0
+pcmk__bundle_assign: httpd-bundle-docker-1 allocation score on cluster02: 0
+pcmk__bundle_assign: httpd-bundle-docker-2 allocation score on cluster01: 0
+pcmk__bundle_assign: httpd-bundle-docker-2 allocation score on cluster02: 0
+pcmk__bundle_assign: httpd-bundle-ip-192.168.122.131 allocation score on cluster01: 0
+pcmk__bundle_assign: httpd-bundle-ip-192.168.122.131 allocation score on cluster02: 0
+pcmk__bundle_assign: httpd-bundle-ip-192.168.122.132 allocation score on cluster01: 0
+pcmk__bundle_assign: httpd-bundle-ip-192.168.122.132 allocation score on cluster02: 0
+pcmk__bundle_assign: httpd-bundle-ip-192.168.122.133 allocation score on cluster01: 0
+pcmk__bundle_assign: httpd-bundle-ip-192.168.122.133 allocation score on cluster02: 0
+pcmk__bundle_assign: httpd:0 allocation score on httpd-bundle-0: 500
+pcmk__bundle_assign: httpd:1 allocation score on httpd-bundle-1: 500
+pcmk__bundle_assign: httpd:2 allocation score on httpd-bundle-2: 500
pcmk__clone_assign: httpd-bundle-clone allocation score on cluster01: -INFINITY
pcmk__clone_assign: httpd-bundle-clone allocation score on cluster02: -INFINITY
pcmk__clone_assign: httpd-bundle-clone allocation score on httpd-bundle-0: 0
diff --git a/cts/scheduler/scores/timeout-by-node.scores b/cts/scheduler/scores/timeout-by-node.scores
new file mode 100644
index 0000000..adb96a5
--- /dev/null
+++ b/cts/scheduler/scores/timeout-by-node.scores
@@ -0,0 +1,61 @@
+
+pcmk__clone_assign: rsc1-clone allocation score on node1: 0
+pcmk__clone_assign: rsc1-clone allocation score on node2: 0
+pcmk__clone_assign: rsc1-clone allocation score on node3: 0
+pcmk__clone_assign: rsc1-clone allocation score on node4: 0
+pcmk__clone_assign: rsc1-clone allocation score on node5: 0
+pcmk__clone_assign: rsc1:0 allocation score on node1: 0
+pcmk__clone_assign: rsc1:0 allocation score on node2: 0
+pcmk__clone_assign: rsc1:0 allocation score on node3: 0
+pcmk__clone_assign: rsc1:0 allocation score on node4: 0
+pcmk__clone_assign: rsc1:0 allocation score on node5: 0
+pcmk__clone_assign: rsc1:1 allocation score on node1: 0
+pcmk__clone_assign: rsc1:1 allocation score on node2: 0
+pcmk__clone_assign: rsc1:1 allocation score on node3: 0
+pcmk__clone_assign: rsc1:1 allocation score on node4: 0
+pcmk__clone_assign: rsc1:1 allocation score on node5: 0
+pcmk__clone_assign: rsc1:2 allocation score on node1: 0
+pcmk__clone_assign: rsc1:2 allocation score on node2: 0
+pcmk__clone_assign: rsc1:2 allocation score on node3: 0
+pcmk__clone_assign: rsc1:2 allocation score on node4: 0
+pcmk__clone_assign: rsc1:2 allocation score on node5: 0
+pcmk__clone_assign: rsc1:3 allocation score on node1: 0
+pcmk__clone_assign: rsc1:3 allocation score on node2: 0
+pcmk__clone_assign: rsc1:3 allocation score on node3: 0
+pcmk__clone_assign: rsc1:3 allocation score on node4: 0
+pcmk__clone_assign: rsc1:3 allocation score on node5: 0
+pcmk__clone_assign: rsc1:4 allocation score on node1: 0
+pcmk__clone_assign: rsc1:4 allocation score on node2: 0
+pcmk__clone_assign: rsc1:4 allocation score on node3: 0
+pcmk__clone_assign: rsc1:4 allocation score on node4: 0
+pcmk__clone_assign: rsc1:4 allocation score on node5: 0
+pcmk__primitive_assign: Fencing allocation score on node1: 0
+pcmk__primitive_assign: Fencing allocation score on node2: 0
+pcmk__primitive_assign: Fencing allocation score on node3: 0
+pcmk__primitive_assign: Fencing allocation score on node4: 0
+pcmk__primitive_assign: Fencing allocation score on node5: 0
+pcmk__primitive_assign: rsc1:0 allocation score on node1: 0
+pcmk__primitive_assign: rsc1:0 allocation score on node2: 0
+pcmk__primitive_assign: rsc1:0 allocation score on node3: 0
+pcmk__primitive_assign: rsc1:0 allocation score on node4: 0
+pcmk__primitive_assign: rsc1:0 allocation score on node5: 0
+pcmk__primitive_assign: rsc1:1 allocation score on node1: 0
+pcmk__primitive_assign: rsc1:1 allocation score on node2: -INFINITY
+pcmk__primitive_assign: rsc1:1 allocation score on node3: 0
+pcmk__primitive_assign: rsc1:1 allocation score on node4: 0
+pcmk__primitive_assign: rsc1:1 allocation score on node5: 0
+pcmk__primitive_assign: rsc1:2 allocation score on node1: 0
+pcmk__primitive_assign: rsc1:2 allocation score on node2: -INFINITY
+pcmk__primitive_assign: rsc1:2 allocation score on node3: -INFINITY
+pcmk__primitive_assign: rsc1:2 allocation score on node4: 0
+pcmk__primitive_assign: rsc1:2 allocation score on node5: 0
+pcmk__primitive_assign: rsc1:3 allocation score on node1: 0
+pcmk__primitive_assign: rsc1:3 allocation score on node2: -INFINITY
+pcmk__primitive_assign: rsc1:3 allocation score on node3: -INFINITY
+pcmk__primitive_assign: rsc1:3 allocation score on node4: -INFINITY
+pcmk__primitive_assign: rsc1:3 allocation score on node5: 0
+pcmk__primitive_assign: rsc1:4 allocation score on node1: 0
+pcmk__primitive_assign: rsc1:4 allocation score on node2: -INFINITY
+pcmk__primitive_assign: rsc1:4 allocation score on node3: -INFINITY
+pcmk__primitive_assign: rsc1:4 allocation score on node4: -INFINITY
+pcmk__primitive_assign: rsc1:4 allocation score on node5: -INFINITY
diff --git a/cts/scheduler/scores/unrunnable-2.scores b/cts/scheduler/scores/unrunnable-2.scores
index 52b7ffd..50ecabc 100644
--- a/cts/scheduler/scores/unrunnable-2.scores
+++ b/cts/scheduler/scores/unrunnable-2.scores
@@ -476,7 +476,7 @@ pcmk__primitive_assign: galera:1 allocation score on overcloud-controller-2: 0
pcmk__primitive_assign: galera:2 allocation score on overcloud-controller-0: -INFINITY
pcmk__primitive_assign: galera:2 allocation score on overcloud-controller-1: -INFINITY
pcmk__primitive_assign: galera:2 allocation score on overcloud-controller-2: INFINITY
-pcmk__primitive_assign: haproxy:0 allocation score on overcloud-controller-0: 0
+pcmk__primitive_assign: haproxy:0 allocation score on overcloud-controller-0: INFINITY
pcmk__primitive_assign: haproxy:0 allocation score on overcloud-controller-1: INFINITY
pcmk__primitive_assign: haproxy:0 allocation score on overcloud-controller-2: 0
pcmk__primitive_assign: haproxy:1 allocation score on overcloud-controller-0: INFINITY
diff --git a/cts/scheduler/scores/utilization-complex.scores b/cts/scheduler/scores/utilization-complex.scores
index 29bc92c..c37023d 100644
--- a/cts/scheduler/scores/utilization-complex.scores
+++ b/cts/scheduler/scores/utilization-complex.scores
@@ -1,80 +1,80 @@
-pcmk__bundle_allocate: httpd-bundle allocation score on rhel8-1: 0
-pcmk__bundle_allocate: httpd-bundle allocation score on rhel8-2: 0
-pcmk__bundle_allocate: httpd-bundle allocation score on rhel8-3: 0
-pcmk__bundle_allocate: httpd-bundle allocation score on rhel8-4: 0
-pcmk__bundle_allocate: httpd-bundle allocation score on rhel8-5: 0
-pcmk__bundle_allocate: httpd-bundle-0 allocation score on httpd-bundle-1: -INFINITY
-pcmk__bundle_allocate: httpd-bundle-0 allocation score on httpd-bundle-2: -INFINITY
-pcmk__bundle_allocate: httpd-bundle-0 allocation score on rhel8-1: 0
-pcmk__bundle_allocate: httpd-bundle-0 allocation score on rhel8-2: 0
-pcmk__bundle_allocate: httpd-bundle-0 allocation score on rhel8-3: 0
-pcmk__bundle_allocate: httpd-bundle-0 allocation score on rhel8-4: 0
-pcmk__bundle_allocate: httpd-bundle-0 allocation score on rhel8-5: 0
-pcmk__bundle_allocate: httpd-bundle-1 allocation score on httpd-bundle-2: -INFINITY
-pcmk__bundle_allocate: httpd-bundle-1 allocation score on rhel8-1: 0
-pcmk__bundle_allocate: httpd-bundle-1 allocation score on rhel8-2: 0
-pcmk__bundle_allocate: httpd-bundle-1 allocation score on rhel8-3: 0
-pcmk__bundle_allocate: httpd-bundle-1 allocation score on rhel8-4: 0
-pcmk__bundle_allocate: httpd-bundle-1 allocation score on rhel8-5: 0
-pcmk__bundle_allocate: httpd-bundle-2 allocation score on rhel8-1: 0
-pcmk__bundle_allocate: httpd-bundle-2 allocation score on rhel8-2: 0
-pcmk__bundle_allocate: httpd-bundle-2 allocation score on rhel8-3: 0
-pcmk__bundle_allocate: httpd-bundle-2 allocation score on rhel8-4: 0
-pcmk__bundle_allocate: httpd-bundle-2 allocation score on rhel8-5: 0
-pcmk__bundle_allocate: httpd-bundle-clone allocation score on httpd-bundle-0: -INFINITY
-pcmk__bundle_allocate: httpd-bundle-clone allocation score on httpd-bundle-1: -INFINITY
-pcmk__bundle_allocate: httpd-bundle-clone allocation score on httpd-bundle-2: -INFINITY
-pcmk__bundle_allocate: httpd-bundle-clone allocation score on rhel8-1: 0
-pcmk__bundle_allocate: httpd-bundle-clone allocation score on rhel8-2: 0
-pcmk__bundle_allocate: httpd-bundle-clone allocation score on rhel8-3: 0
-pcmk__bundle_allocate: httpd-bundle-clone allocation score on rhel8-4: 0
-pcmk__bundle_allocate: httpd-bundle-clone allocation score on rhel8-5: 0
-pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.131 allocation score on httpd-bundle-0: 0
-pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.131 allocation score on httpd-bundle-1: 0
-pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.131 allocation score on httpd-bundle-2: 0
-pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.131 allocation score on rhel8-1: 0
-pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.131 allocation score on rhel8-2: 0
-pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.131 allocation score on rhel8-3: 0
-pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.131 allocation score on rhel8-4: 0
-pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.131 allocation score on rhel8-5: 0
-pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.132 allocation score on httpd-bundle-1: 0
-pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.132 allocation score on httpd-bundle-2: 0
-pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.132 allocation score on rhel8-1: 0
-pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.132 allocation score on rhel8-2: 0
-pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.132 allocation score on rhel8-3: 0
-pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.132 allocation score on rhel8-4: 0
-pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.132 allocation score on rhel8-5: 0
-pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.133 allocation score on httpd-bundle-2: 0
-pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.133 allocation score on rhel8-1: 0
-pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.133 allocation score on rhel8-2: 0
-pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.133 allocation score on rhel8-3: 0
-pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.133 allocation score on rhel8-4: 0
-pcmk__bundle_allocate: httpd-bundle-ip-192.168.122.133 allocation score on rhel8-5: 0
-pcmk__bundle_allocate: httpd-bundle-podman-0 allocation score on httpd-bundle-0: 0
-pcmk__bundle_allocate: httpd-bundle-podman-0 allocation score on httpd-bundle-1: 0
-pcmk__bundle_allocate: httpd-bundle-podman-0 allocation score on httpd-bundle-2: 0
-pcmk__bundle_allocate: httpd-bundle-podman-0 allocation score on rhel8-1: 0
-pcmk__bundle_allocate: httpd-bundle-podman-0 allocation score on rhel8-2: 0
-pcmk__bundle_allocate: httpd-bundle-podman-0 allocation score on rhel8-3: 0
-pcmk__bundle_allocate: httpd-bundle-podman-0 allocation score on rhel8-4: 0
-pcmk__bundle_allocate: httpd-bundle-podman-0 allocation score on rhel8-5: 0
-pcmk__bundle_allocate: httpd-bundle-podman-1 allocation score on httpd-bundle-1: 0
-pcmk__bundle_allocate: httpd-bundle-podman-1 allocation score on httpd-bundle-2: 0
-pcmk__bundle_allocate: httpd-bundle-podman-1 allocation score on rhel8-1: 0
-pcmk__bundle_allocate: httpd-bundle-podman-1 allocation score on rhel8-2: 0
-pcmk__bundle_allocate: httpd-bundle-podman-1 allocation score on rhel8-3: 0
-pcmk__bundle_allocate: httpd-bundle-podman-1 allocation score on rhel8-4: 0
-pcmk__bundle_allocate: httpd-bundle-podman-1 allocation score on rhel8-5: 0
-pcmk__bundle_allocate: httpd-bundle-podman-2 allocation score on httpd-bundle-2: 0
-pcmk__bundle_allocate: httpd-bundle-podman-2 allocation score on rhel8-1: 0
-pcmk__bundle_allocate: httpd-bundle-podman-2 allocation score on rhel8-2: 0
-pcmk__bundle_allocate: httpd-bundle-podman-2 allocation score on rhel8-3: 0
-pcmk__bundle_allocate: httpd-bundle-podman-2 allocation score on rhel8-4: 0
-pcmk__bundle_allocate: httpd-bundle-podman-2 allocation score on rhel8-5: 0
-pcmk__bundle_allocate: httpd:0 allocation score on httpd-bundle-0: 501
-pcmk__bundle_allocate: httpd:1 allocation score on httpd-bundle-1: 500
-pcmk__bundle_allocate: httpd:2 allocation score on httpd-bundle-2: 500
+pcmk__bundle_assign: httpd-bundle allocation score on rhel8-1: 0
+pcmk__bundle_assign: httpd-bundle allocation score on rhel8-2: 0
+pcmk__bundle_assign: httpd-bundle allocation score on rhel8-3: 0
+pcmk__bundle_assign: httpd-bundle allocation score on rhel8-4: 0
+pcmk__bundle_assign: httpd-bundle allocation score on rhel8-5: 0
+pcmk__bundle_assign: httpd-bundle-0 allocation score on httpd-bundle-1: -INFINITY
+pcmk__bundle_assign: httpd-bundle-0 allocation score on httpd-bundle-2: -INFINITY
+pcmk__bundle_assign: httpd-bundle-0 allocation score on rhel8-1: 0
+pcmk__bundle_assign: httpd-bundle-0 allocation score on rhel8-2: 0
+pcmk__bundle_assign: httpd-bundle-0 allocation score on rhel8-3: 0
+pcmk__bundle_assign: httpd-bundle-0 allocation score on rhel8-4: 0
+pcmk__bundle_assign: httpd-bundle-0 allocation score on rhel8-5: 0
+pcmk__bundle_assign: httpd-bundle-1 allocation score on httpd-bundle-2: -INFINITY
+pcmk__bundle_assign: httpd-bundle-1 allocation score on rhel8-1: 0
+pcmk__bundle_assign: httpd-bundle-1 allocation score on rhel8-2: 0
+pcmk__bundle_assign: httpd-bundle-1 allocation score on rhel8-3: 0
+pcmk__bundle_assign: httpd-bundle-1 allocation score on rhel8-4: 0
+pcmk__bundle_assign: httpd-bundle-1 allocation score on rhel8-5: 0
+pcmk__bundle_assign: httpd-bundle-2 allocation score on rhel8-1: 0
+pcmk__bundle_assign: httpd-bundle-2 allocation score on rhel8-2: 0
+pcmk__bundle_assign: httpd-bundle-2 allocation score on rhel8-3: 0
+pcmk__bundle_assign: httpd-bundle-2 allocation score on rhel8-4: 0
+pcmk__bundle_assign: httpd-bundle-2 allocation score on rhel8-5: 0
+pcmk__bundle_assign: httpd-bundle-clone allocation score on httpd-bundle-0: -INFINITY
+pcmk__bundle_assign: httpd-bundle-clone allocation score on httpd-bundle-1: -INFINITY
+pcmk__bundle_assign: httpd-bundle-clone allocation score on httpd-bundle-2: -INFINITY
+pcmk__bundle_assign: httpd-bundle-clone allocation score on rhel8-1: 0
+pcmk__bundle_assign: httpd-bundle-clone allocation score on rhel8-2: 0
+pcmk__bundle_assign: httpd-bundle-clone allocation score on rhel8-3: 0
+pcmk__bundle_assign: httpd-bundle-clone allocation score on rhel8-4: 0
+pcmk__bundle_assign: httpd-bundle-clone allocation score on rhel8-5: 0
+pcmk__bundle_assign: httpd-bundle-ip-192.168.122.131 allocation score on httpd-bundle-0: 0
+pcmk__bundle_assign: httpd-bundle-ip-192.168.122.131 allocation score on httpd-bundle-1: 0
+pcmk__bundle_assign: httpd-bundle-ip-192.168.122.131 allocation score on httpd-bundle-2: 0
+pcmk__bundle_assign: httpd-bundle-ip-192.168.122.131 allocation score on rhel8-1: 0
+pcmk__bundle_assign: httpd-bundle-ip-192.168.122.131 allocation score on rhel8-2: 0
+pcmk__bundle_assign: httpd-bundle-ip-192.168.122.131 allocation score on rhel8-3: 0
+pcmk__bundle_assign: httpd-bundle-ip-192.168.122.131 allocation score on rhel8-4: 0
+pcmk__bundle_assign: httpd-bundle-ip-192.168.122.131 allocation score on rhel8-5: 0
+pcmk__bundle_assign: httpd-bundle-ip-192.168.122.132 allocation score on httpd-bundle-1: 0
+pcmk__bundle_assign: httpd-bundle-ip-192.168.122.132 allocation score on httpd-bundle-2: 0
+pcmk__bundle_assign: httpd-bundle-ip-192.168.122.132 allocation score on rhel8-1: 0
+pcmk__bundle_assign: httpd-bundle-ip-192.168.122.132 allocation score on rhel8-2: 0
+pcmk__bundle_assign: httpd-bundle-ip-192.168.122.132 allocation score on rhel8-3: 0
+pcmk__bundle_assign: httpd-bundle-ip-192.168.122.132 allocation score on rhel8-4: 0
+pcmk__bundle_assign: httpd-bundle-ip-192.168.122.132 allocation score on rhel8-5: 0
+pcmk__bundle_assign: httpd-bundle-ip-192.168.122.133 allocation score on httpd-bundle-2: 0
+pcmk__bundle_assign: httpd-bundle-ip-192.168.122.133 allocation score on rhel8-1: 0
+pcmk__bundle_assign: httpd-bundle-ip-192.168.122.133 allocation score on rhel8-2: 0
+pcmk__bundle_assign: httpd-bundle-ip-192.168.122.133 allocation score on rhel8-3: 0
+pcmk__bundle_assign: httpd-bundle-ip-192.168.122.133 allocation score on rhel8-4: 0
+pcmk__bundle_assign: httpd-bundle-ip-192.168.122.133 allocation score on rhel8-5: 0
+pcmk__bundle_assign: httpd-bundle-podman-0 allocation score on httpd-bundle-0: 0
+pcmk__bundle_assign: httpd-bundle-podman-0 allocation score on httpd-bundle-1: 0
+pcmk__bundle_assign: httpd-bundle-podman-0 allocation score on httpd-bundle-2: 0
+pcmk__bundle_assign: httpd-bundle-podman-0 allocation score on rhel8-1: 0
+pcmk__bundle_assign: httpd-bundle-podman-0 allocation score on rhel8-2: 0
+pcmk__bundle_assign: httpd-bundle-podman-0 allocation score on rhel8-3: 0
+pcmk__bundle_assign: httpd-bundle-podman-0 allocation score on rhel8-4: 0
+pcmk__bundle_assign: httpd-bundle-podman-0 allocation score on rhel8-5: 0
+pcmk__bundle_assign: httpd-bundle-podman-1 allocation score on httpd-bundle-1: 0
+pcmk__bundle_assign: httpd-bundle-podman-1 allocation score on httpd-bundle-2: 0
+pcmk__bundle_assign: httpd-bundle-podman-1 allocation score on rhel8-1: 0
+pcmk__bundle_assign: httpd-bundle-podman-1 allocation score on rhel8-2: 0
+pcmk__bundle_assign: httpd-bundle-podman-1 allocation score on rhel8-3: 0
+pcmk__bundle_assign: httpd-bundle-podman-1 allocation score on rhel8-4: 0
+pcmk__bundle_assign: httpd-bundle-podman-1 allocation score on rhel8-5: 0
+pcmk__bundle_assign: httpd-bundle-podman-2 allocation score on httpd-bundle-2: 0
+pcmk__bundle_assign: httpd-bundle-podman-2 allocation score on rhel8-1: 0
+pcmk__bundle_assign: httpd-bundle-podman-2 allocation score on rhel8-2: 0
+pcmk__bundle_assign: httpd-bundle-podman-2 allocation score on rhel8-3: 0
+pcmk__bundle_assign: httpd-bundle-podman-2 allocation score on rhel8-4: 0
+pcmk__bundle_assign: httpd-bundle-podman-2 allocation score on rhel8-5: 0
+pcmk__bundle_assign: httpd:0 allocation score on httpd-bundle-0: 501
+pcmk__bundle_assign: httpd:1 allocation score on httpd-bundle-1: 500
+pcmk__bundle_assign: httpd:2 allocation score on httpd-bundle-2: 500
pcmk__clone_assign: clone1-clone allocation score on httpd-bundle-0: -INFINITY
pcmk__clone_assign: clone1-clone allocation score on httpd-bundle-1: -INFINITY
pcmk__clone_assign: clone1-clone allocation score on httpd-bundle-2: -INFINITY
@@ -312,18 +312,26 @@ pcmk__primitive_assign: clone1:2 allocation score on rhel8-4: 1
pcmk__primitive_assign: clone1:2 allocation score on rhel8-5: 0
pcmk__primitive_assign: clone1:3 allocation score on httpd-bundle-0: -INFINITY
pcmk__primitive_assign: clone1:3 allocation score on httpd-bundle-0: -INFINITY
+pcmk__primitive_assign: clone1:3 allocation score on httpd-bundle-0: -INFINITY
+pcmk__primitive_assign: clone1:3 allocation score on httpd-bundle-1: -INFINITY
pcmk__primitive_assign: clone1:3 allocation score on httpd-bundle-1: -INFINITY
pcmk__primitive_assign: clone1:3 allocation score on httpd-bundle-1: -INFINITY
pcmk__primitive_assign: clone1:3 allocation score on httpd-bundle-2: -INFINITY
pcmk__primitive_assign: clone1:3 allocation score on httpd-bundle-2: -INFINITY
+pcmk__primitive_assign: clone1:3 allocation score on httpd-bundle-2: -INFINITY
+pcmk__primitive_assign: clone1:3 allocation score on rhel8-1: -INFINITY
pcmk__primitive_assign: clone1:3 allocation score on rhel8-1: -INFINITY
pcmk__primitive_assign: clone1:3 allocation score on rhel8-1: 0
pcmk__primitive_assign: clone1:3 allocation score on rhel8-2: -INFINITY
pcmk__primitive_assign: clone1:3 allocation score on rhel8-2: -INFINITY
+pcmk__primitive_assign: clone1:3 allocation score on rhel8-2: -INFINITY
+pcmk__primitive_assign: clone1:3 allocation score on rhel8-3: -INFINITY
pcmk__primitive_assign: clone1:3 allocation score on rhel8-3: -INFINITY
pcmk__primitive_assign: clone1:3 allocation score on rhel8-3: -INFINITY
pcmk__primitive_assign: clone1:3 allocation score on rhel8-4: -INFINITY
pcmk__primitive_assign: clone1:3 allocation score on rhel8-4: -INFINITY
+pcmk__primitive_assign: clone1:3 allocation score on rhel8-4: -INFINITY
+pcmk__primitive_assign: clone1:3 allocation score on rhel8-5: 1
pcmk__primitive_assign: clone1:3 allocation score on rhel8-5: 1
pcmk__primitive_assign: clone1:3 allocation score on rhel8-5: 1
pcmk__primitive_assign: clone1:4 allocation score on httpd-bundle-0: -INFINITY
@@ -384,18 +392,26 @@ pcmk__primitive_assign: clone2:2 allocation score on rhel8-4: 1
pcmk__primitive_assign: clone2:2 allocation score on rhel8-5: -INFINITY
pcmk__primitive_assign: clone2:3 allocation score on httpd-bundle-0: -INFINITY
pcmk__primitive_assign: clone2:3 allocation score on httpd-bundle-0: -INFINITY
+pcmk__primitive_assign: clone2:3 allocation score on httpd-bundle-0: -INFINITY
pcmk__primitive_assign: clone2:3 allocation score on httpd-bundle-1: -INFINITY
pcmk__primitive_assign: clone2:3 allocation score on httpd-bundle-1: -INFINITY
+pcmk__primitive_assign: clone2:3 allocation score on httpd-bundle-1: -INFINITY
+pcmk__primitive_assign: clone2:3 allocation score on httpd-bundle-2: -INFINITY
pcmk__primitive_assign: clone2:3 allocation score on httpd-bundle-2: -INFINITY
pcmk__primitive_assign: clone2:3 allocation score on httpd-bundle-2: -INFINITY
pcmk__primitive_assign: clone2:3 allocation score on rhel8-1: -INFINITY
+pcmk__primitive_assign: clone2:3 allocation score on rhel8-1: -INFINITY
pcmk__primitive_assign: clone2:3 allocation score on rhel8-1: 0
pcmk__primitive_assign: clone2:3 allocation score on rhel8-2: -INFINITY
pcmk__primitive_assign: clone2:3 allocation score on rhel8-2: -INFINITY
+pcmk__primitive_assign: clone2:3 allocation score on rhel8-2: -INFINITY
+pcmk__primitive_assign: clone2:3 allocation score on rhel8-3: -INFINITY
pcmk__primitive_assign: clone2:3 allocation score on rhel8-3: -INFINITY
pcmk__primitive_assign: clone2:3 allocation score on rhel8-3: -INFINITY
pcmk__primitive_assign: clone2:3 allocation score on rhel8-4: -INFINITY
pcmk__primitive_assign: clone2:3 allocation score on rhel8-4: -INFINITY
+pcmk__primitive_assign: clone2:3 allocation score on rhel8-4: -INFINITY
+pcmk__primitive_assign: clone2:3 allocation score on rhel8-5: -INFINITY
pcmk__primitive_assign: clone2:3 allocation score on rhel8-5: -INFINITY
pcmk__primitive_assign: clone2:3 allocation score on rhel8-5: -INFINITY
pcmk__primitive_assign: clone2:4 allocation score on httpd-bundle-0: -INFINITY
@@ -535,18 +551,26 @@ pcmk__primitive_assign: httpd-bundle-ip-192.168.122.133 allocation score on rhel
pcmk__primitive_assign: httpd-bundle-ip-192.168.122.133 allocation score on rhel8-5: -INFINITY
pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on httpd-bundle-0: -INFINITY
pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on httpd-bundle-0: -INFINITY
+pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on httpd-bundle-0: -INFINITY
+pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on httpd-bundle-1: -INFINITY
pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on httpd-bundle-1: -INFINITY
pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on httpd-bundle-1: -INFINITY
pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on httpd-bundle-2: -INFINITY
pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on httpd-bundle-2: -INFINITY
+pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on httpd-bundle-2: -INFINITY
+pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-1: -INFINITY
pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-1: -INFINITY
pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-1: -INFINITY
pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-2: -INFINITY
pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-2: -INFINITY
+pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-2: -INFINITY
pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-3: -INFINITY
pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-3: -INFINITY
+pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-3: -INFINITY
+pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-4: -INFINITY
pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-4: -INFINITY
pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-4: -INFINITY
+pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-5: -INFINITY
pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-5: 0
pcmk__primitive_assign: httpd-bundle-podman-0 allocation score on rhel8-5: 0
pcmk__primitive_assign: httpd-bundle-podman-1 allocation score on httpd-bundle-1: -INFINITY
diff --git a/cts/scheduler/scores/utilization-order2.scores b/cts/scheduler/scores/utilization-order2.scores
index c4b49d9..4476b60 100644
--- a/cts/scheduler/scores/utilization-order2.scores
+++ b/cts/scheduler/scores/utilization-order2.scores
@@ -9,6 +9,8 @@ pcmk__primitive_assign: rsc1 allocation score on node1: 0
pcmk__primitive_assign: rsc1 allocation score on node2: 0
pcmk__primitive_assign: rsc2:0 allocation score on node1: 1
pcmk__primitive_assign: rsc2:0 allocation score on node1: 1
+pcmk__primitive_assign: rsc2:0 allocation score on node1: 1
+pcmk__primitive_assign: rsc2:0 allocation score on node2: -INFINITY
pcmk__primitive_assign: rsc2:0 allocation score on node2: -INFINITY
pcmk__primitive_assign: rsc2:0 allocation score on node2: 0
pcmk__primitive_assign: rsc2:1 allocation score on node1: 0
diff --git a/cts/scheduler/scores/utilization-order4.scores b/cts/scheduler/scores/utilization-order4.scores
index 21eef87..fdc6163 100644
--- a/cts/scheduler/scores/utilization-order4.scores
+++ b/cts/scheduler/scores/utilization-order4.scores
@@ -47,15 +47,15 @@ pcmk__primitive_assign: degllx63-vm allocation score on deglxen001: -INFINITY
pcmk__primitive_assign: degllx63-vm allocation score on deglxen002: -INFINITY
pcmk__primitive_assign: degllx64-vm allocation score on deglxen001: -INFINITY
pcmk__primitive_assign: degllx64-vm allocation score on deglxen002: -INFINITY
-pcmk__primitive_assign: nfs-xen_config:0 allocation score on deglxen001: 300000
+pcmk__primitive_assign: nfs-xen_config:0 allocation score on deglxen001: 410000
pcmk__primitive_assign: nfs-xen_config:0 allocation score on deglxen002: -INFINITY
pcmk__primitive_assign: nfs-xen_config:1 allocation score on deglxen001: -INFINITY
pcmk__primitive_assign: nfs-xen_config:1 allocation score on deglxen002: -INFINITY
-pcmk__primitive_assign: nfs-xen_images:0 allocation score on deglxen001: 100000
+pcmk__primitive_assign: nfs-xen_images:0 allocation score on deglxen001: 210000
pcmk__primitive_assign: nfs-xen_images:0 allocation score on deglxen002: -INFINITY
pcmk__primitive_assign: nfs-xen_images:1 allocation score on deglxen001: -INFINITY
pcmk__primitive_assign: nfs-xen_images:1 allocation score on deglxen002: -INFINITY
-pcmk__primitive_assign: nfs-xen_swapfiles:0 allocation score on deglxen001: 200000
+pcmk__primitive_assign: nfs-xen_swapfiles:0 allocation score on deglxen001: 310000
pcmk__primitive_assign: nfs-xen_swapfiles:0 allocation score on deglxen002: -INFINITY
pcmk__primitive_assign: nfs-xen_swapfiles:1 allocation score on deglxen001: -INFINITY
pcmk__primitive_assign: nfs-xen_swapfiles:1 allocation score on deglxen002: -INFINITY
diff --git a/cts/scheduler/scores/utilization-shuffle.scores b/cts/scheduler/scores/utilization-shuffle.scores
index 5568dd3..e58269b 100644
--- a/cts/scheduler/scores/utilization-shuffle.scores
+++ b/cts/scheduler/scores/utilization-shuffle.scores
@@ -210,13 +210,23 @@ pcmk__primitive_assign: prmApPostgreSQLDB3 allocation score on act3: -INFINITY
pcmk__primitive_assign: prmApPostgreSQLDB3 allocation score on sby1: -INFINITY
pcmk__primitive_assign: prmApPostgreSQLDB3 allocation score on sby2: -INFINITY
pcmk__primitive_assign: prmDiskd1:0 allocation score on act1: -INFINITY
+pcmk__primitive_assign: prmDiskd1:0 allocation score on act1: -INFINITY
+pcmk__primitive_assign: prmDiskd1:0 allocation score on act1: -INFINITY
+pcmk__primitive_assign: prmDiskd1:0 allocation score on act2: -INFINITY
+pcmk__primitive_assign: prmDiskd1:0 allocation score on act2: -INFINITY
pcmk__primitive_assign: prmDiskd1:0 allocation score on act2: -INFINITY
+pcmk__primitive_assign: prmDiskd1:0 allocation score on act3: -INFINITY
+pcmk__primitive_assign: prmDiskd1:0 allocation score on act3: -INFINITY
pcmk__primitive_assign: prmDiskd1:0 allocation score on act3: INFINITY
+pcmk__primitive_assign: prmDiskd1:0 allocation score on sby1: -INFINITY
+pcmk__primitive_assign: prmDiskd1:0 allocation score on sby1: -INFINITY
pcmk__primitive_assign: prmDiskd1:0 allocation score on sby1: 0
+pcmk__primitive_assign: prmDiskd1:0 allocation score on sby2: -INFINITY
+pcmk__primitive_assign: prmDiskd1:0 allocation score on sby2: 0
pcmk__primitive_assign: prmDiskd1:0 allocation score on sby2: 0
pcmk__primitive_assign: prmDiskd1:1 allocation score on act1: INFINITY
-pcmk__primitive_assign: prmDiskd1:1 allocation score on act2: 0
-pcmk__primitive_assign: prmDiskd1:1 allocation score on act3: 0
+pcmk__primitive_assign: prmDiskd1:1 allocation score on act2: INFINITY
+pcmk__primitive_assign: prmDiskd1:1 allocation score on act3: -INFINITY
pcmk__primitive_assign: prmDiskd1:1 allocation score on sby1: 0
pcmk__primitive_assign: prmDiskd1:1 allocation score on sby2: 0
pcmk__primitive_assign: prmDiskd1:2 allocation score on act1: -INFINITY
@@ -231,17 +241,27 @@ pcmk__primitive_assign: prmDiskd1:3 allocation score on sby1: -INFINITY
pcmk__primitive_assign: prmDiskd1:3 allocation score on sby2: INFINITY
pcmk__primitive_assign: prmDiskd1:4 allocation score on act1: -INFINITY
pcmk__primitive_assign: prmDiskd1:4 allocation score on act2: INFINITY
-pcmk__primitive_assign: prmDiskd1:4 allocation score on act3: 0
+pcmk__primitive_assign: prmDiskd1:4 allocation score on act3: -INFINITY
pcmk__primitive_assign: prmDiskd1:4 allocation score on sby1: 0
pcmk__primitive_assign: prmDiskd1:4 allocation score on sby2: 0
pcmk__primitive_assign: prmDiskd2:0 allocation score on act1: -INFINITY
+pcmk__primitive_assign: prmDiskd2:0 allocation score on act1: -INFINITY
+pcmk__primitive_assign: prmDiskd2:0 allocation score on act1: -INFINITY
+pcmk__primitive_assign: prmDiskd2:0 allocation score on act2: -INFINITY
pcmk__primitive_assign: prmDiskd2:0 allocation score on act2: -INFINITY
+pcmk__primitive_assign: prmDiskd2:0 allocation score on act2: -INFINITY
+pcmk__primitive_assign: prmDiskd2:0 allocation score on act3: -INFINITY
+pcmk__primitive_assign: prmDiskd2:0 allocation score on act3: -INFINITY
pcmk__primitive_assign: prmDiskd2:0 allocation score on act3: INFINITY
+pcmk__primitive_assign: prmDiskd2:0 allocation score on sby1: -INFINITY
+pcmk__primitive_assign: prmDiskd2:0 allocation score on sby1: -INFINITY
pcmk__primitive_assign: prmDiskd2:0 allocation score on sby1: 0
+pcmk__primitive_assign: prmDiskd2:0 allocation score on sby2: -INFINITY
+pcmk__primitive_assign: prmDiskd2:0 allocation score on sby2: 0
pcmk__primitive_assign: prmDiskd2:0 allocation score on sby2: 0
pcmk__primitive_assign: prmDiskd2:1 allocation score on act1: INFINITY
-pcmk__primitive_assign: prmDiskd2:1 allocation score on act2: 0
-pcmk__primitive_assign: prmDiskd2:1 allocation score on act3: 0
+pcmk__primitive_assign: prmDiskd2:1 allocation score on act2: INFINITY
+pcmk__primitive_assign: prmDiskd2:1 allocation score on act3: -INFINITY
pcmk__primitive_assign: prmDiskd2:1 allocation score on sby1: 0
pcmk__primitive_assign: prmDiskd2:1 allocation score on sby2: 0
pcmk__primitive_assign: prmDiskd2:2 allocation score on act1: -INFINITY
@@ -256,7 +276,7 @@ pcmk__primitive_assign: prmDiskd2:3 allocation score on sby1: -INFINITY
pcmk__primitive_assign: prmDiskd2:3 allocation score on sby2: INFINITY
pcmk__primitive_assign: prmDiskd2:4 allocation score on act1: -INFINITY
pcmk__primitive_assign: prmDiskd2:4 allocation score on act2: INFINITY
-pcmk__primitive_assign: prmDiskd2:4 allocation score on act3: 0
+pcmk__primitive_assign: prmDiskd2:4 allocation score on act3: -INFINITY
pcmk__primitive_assign: prmDiskd2:4 allocation score on sby1: 0
pcmk__primitive_assign: prmDiskd2:4 allocation score on sby2: 0
pcmk__primitive_assign: prmExPostgreSQLDB1 allocation score on act1: 200
@@ -335,13 +355,23 @@ pcmk__primitive_assign: prmIpPostgreSQLDB3 allocation score on act3: -INFINITY
pcmk__primitive_assign: prmIpPostgreSQLDB3 allocation score on sby1: -INFINITY
pcmk__primitive_assign: prmIpPostgreSQLDB3 allocation score on sby2: -INFINITY
pcmk__primitive_assign: prmPingd:0 allocation score on act1: -INFINITY
+pcmk__primitive_assign: prmPingd:0 allocation score on act1: -INFINITY
+pcmk__primitive_assign: prmPingd:0 allocation score on act1: -INFINITY
pcmk__primitive_assign: prmPingd:0 allocation score on act2: -INFINITY
+pcmk__primitive_assign: prmPingd:0 allocation score on act2: -INFINITY
+pcmk__primitive_assign: prmPingd:0 allocation score on act2: -INFINITY
+pcmk__primitive_assign: prmPingd:0 allocation score on act3: -INFINITY
+pcmk__primitive_assign: prmPingd:0 allocation score on act3: -INFINITY
pcmk__primitive_assign: prmPingd:0 allocation score on act3: INFINITY
+pcmk__primitive_assign: prmPingd:0 allocation score on sby1: -INFINITY
+pcmk__primitive_assign: prmPingd:0 allocation score on sby1: -INFINITY
pcmk__primitive_assign: prmPingd:0 allocation score on sby1: 0
+pcmk__primitive_assign: prmPingd:0 allocation score on sby2: -INFINITY
+pcmk__primitive_assign: prmPingd:0 allocation score on sby2: 0
pcmk__primitive_assign: prmPingd:0 allocation score on sby2: 0
pcmk__primitive_assign: prmPingd:1 allocation score on act1: INFINITY
-pcmk__primitive_assign: prmPingd:1 allocation score on act2: 0
-pcmk__primitive_assign: prmPingd:1 allocation score on act3: 0
+pcmk__primitive_assign: prmPingd:1 allocation score on act2: INFINITY
+pcmk__primitive_assign: prmPingd:1 allocation score on act3: -INFINITY
pcmk__primitive_assign: prmPingd:1 allocation score on sby1: 0
pcmk__primitive_assign: prmPingd:1 allocation score on sby2: 0
pcmk__primitive_assign: prmPingd:2 allocation score on act1: -INFINITY
@@ -356,6 +386,6 @@ pcmk__primitive_assign: prmPingd:3 allocation score on sby1: -INFINITY
pcmk__primitive_assign: prmPingd:3 allocation score on sby2: INFINITY
pcmk__primitive_assign: prmPingd:4 allocation score on act1: -INFINITY
pcmk__primitive_assign: prmPingd:4 allocation score on act2: INFINITY
-pcmk__primitive_assign: prmPingd:4 allocation score on act3: 0
+pcmk__primitive_assign: prmPingd:4 allocation score on act3: -INFINITY
pcmk__primitive_assign: prmPingd:4 allocation score on sby1: 0
pcmk__primitive_assign: prmPingd:4 allocation score on sby2: 0
diff --git a/cts/scheduler/scores/year-2038.scores b/cts/scheduler/scores/year-2038.scores
index 1605ec0..a2fe598 100644
--- a/cts/scheduler/scores/year-2038.scores
+++ b/cts/scheduler/scores/year-2038.scores
@@ -2,194 +2,194 @@
galera:0 promotion score on galera-bundle-0: 100
galera:1 promotion score on galera-bundle-1: 100
galera:2 promotion score on galera-bundle-2: 100
-pcmk__bundle_allocate: galera-bundle allocation score on controller-0: 0
-pcmk__bundle_allocate: galera-bundle allocation score on controller-1: 0
-pcmk__bundle_allocate: galera-bundle allocation score on controller-2: 0
-pcmk__bundle_allocate: galera-bundle allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle allocation score on overcloud-novacompute-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-0: 0
-pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: galera-bundle-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: galera-bundle-0 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-0 allocation score on overcloud-novacompute-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-1: 0
-pcmk__bundle_allocate: galera-bundle-1 allocation score on controller-2: 0
-pcmk__bundle_allocate: galera-bundle-1 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-1 allocation score on overcloud-novacompute-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: galera-bundle-2 allocation score on controller-2: 0
-pcmk__bundle_allocate: galera-bundle-2 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-2 allocation score on overcloud-novacompute-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on controller-0: 0
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-0 allocation score on overcloud-novacompute-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on controller-1: 0
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on controller-2: 0
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-1 allocation score on overcloud-novacompute-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on controller-2: 0
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-docker-2 allocation score on overcloud-novacompute-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on controller-0: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on controller-1: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on controller-2: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-0: -INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-1: -INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on galera-bundle-2: -INFINITY
-pcmk__bundle_allocate: galera-bundle-master allocation score on overcloud-novacompute-0: 0
-pcmk__bundle_allocate: galera-bundle-master allocation score on overcloud-novacompute-1: 0
-pcmk__bundle_allocate: galera:0 allocation score on galera-bundle-0: 501
-pcmk__bundle_allocate: galera:1 allocation score on galera-bundle-1: 501
-pcmk__bundle_allocate: galera:2 allocation score on galera-bundle-2: 501
-pcmk__bundle_allocate: haproxy-bundle allocation score on controller-0: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on controller-0: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on controller-1: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on controller-1: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on controller-2: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on controller-2: 0
-pcmk__bundle_allocate: haproxy-bundle allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on overcloud-novacompute-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle allocation score on overcloud-novacompute-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-0: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-0: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on overcloud-novacompute-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-0 allocation score on overcloud-novacompute-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-1: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-1: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on controller-2: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on overcloud-novacompute-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-1 allocation score on overcloud-novacompute-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-2: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on controller-2: 0
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on overcloud-novacompute-1: -INFINITY
-pcmk__bundle_allocate: haproxy-bundle-docker-2 allocation score on overcloud-novacompute-1: -INFINITY
-pcmk__bundle_allocate: openstack-cinder-volume allocation score on controller-0: 0
-pcmk__bundle_allocate: openstack-cinder-volume allocation score on controller-1: 0
-pcmk__bundle_allocate: openstack-cinder-volume allocation score on controller-2: 0
-pcmk__bundle_allocate: openstack-cinder-volume allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: openstack-cinder-volume allocation score on overcloud-novacompute-1: -INFINITY
-pcmk__bundle_allocate: openstack-cinder-volume-docker-0 allocation score on controller-0: 0
-pcmk__bundle_allocate: openstack-cinder-volume-docker-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: openstack-cinder-volume-docker-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: openstack-cinder-volume-docker-0 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: openstack-cinder-volume-docker-0 allocation score on overcloud-novacompute-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle allocation score on overcloud-novacompute-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-0 allocation score on overcloud-novacompute-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-1 allocation score on overcloud-novacompute-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-2 allocation score on overcloud-novacompute-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on overcloud-novacompute-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on overcloud-novacompute-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-2: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-0 allocation score on overcloud-novacompute-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-1 allocation score on overcloud-novacompute-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on controller-2: 0
-pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: rabbitmq-bundle-docker-2 allocation score on overcloud-novacompute-1: -INFINITY
-pcmk__bundle_allocate: rabbitmq:0 allocation score on rabbitmq-bundle-0: 501
-pcmk__bundle_allocate: rabbitmq:1 allocation score on rabbitmq-bundle-1: 501
-pcmk__bundle_allocate: rabbitmq:2 allocation score on rabbitmq-bundle-2: 501
-pcmk__bundle_allocate: redis-bundle allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle allocation score on controller-2: 0
-pcmk__bundle_allocate: redis-bundle allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle allocation score on overcloud-novacompute-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: redis-bundle-0 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-0 allocation score on overcloud-novacompute-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle-1 allocation score on controller-2: 0
-pcmk__bundle_allocate: redis-bundle-1 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-1 allocation score on overcloud-novacompute-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle-2 allocation score on controller-2: 0
-pcmk__bundle_allocate: redis-bundle-2 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-2 allocation score on overcloud-novacompute-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on controller-2: 0
-pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-0 allocation score on overcloud-novacompute-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on controller-2: 0
-pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-1 allocation score on overcloud-novacompute-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on controller-2: 0
-pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on overcloud-novacompute-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-docker-2 allocation score on overcloud-novacompute-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-master allocation score on controller-0: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on controller-1: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on controller-2: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on overcloud-novacompute-0: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on overcloud-novacompute-1: 0
-pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-0: -INFINITY
-pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-1: -INFINITY
-pcmk__bundle_allocate: redis-bundle-master allocation score on redis-bundle-2: -INFINITY
-pcmk__bundle_allocate: redis:0 allocation score on redis-bundle-0: 501
-pcmk__bundle_allocate: redis:1 allocation score on redis-bundle-1: 501
-pcmk__bundle_allocate: redis:2 allocation score on redis-bundle-2: 501
+pcmk__bundle_assign: galera-bundle allocation score on controller-0: 0
+pcmk__bundle_assign: galera-bundle allocation score on controller-1: 0
+pcmk__bundle_assign: galera-bundle allocation score on controller-2: 0
+pcmk__bundle_assign: galera-bundle allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: galera-bundle allocation score on overcloud-novacompute-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-0 allocation score on controller-0: 0
+pcmk__bundle_assign: galera-bundle-0 allocation score on controller-1: 0
+pcmk__bundle_assign: galera-bundle-0 allocation score on controller-2: 0
+pcmk__bundle_assign: galera-bundle-0 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-0 allocation score on overcloud-novacompute-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-1 allocation score on controller-0: 0
+pcmk__bundle_assign: galera-bundle-1 allocation score on controller-1: 0
+pcmk__bundle_assign: galera-bundle-1 allocation score on controller-2: 0
+pcmk__bundle_assign: galera-bundle-1 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-1 allocation score on overcloud-novacompute-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-2 allocation score on controller-0: 0
+pcmk__bundle_assign: galera-bundle-2 allocation score on controller-1: 0
+pcmk__bundle_assign: galera-bundle-2 allocation score on controller-2: 0
+pcmk__bundle_assign: galera-bundle-2 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-2 allocation score on overcloud-novacompute-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on controller-0: 0
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on controller-1: 0
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on controller-2: 0
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-0 allocation score on overcloud-novacompute-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on controller-0: 0
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on controller-1: 0
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on controller-2: 0
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-1 allocation score on overcloud-novacompute-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on controller-0: 0
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on controller-1: 0
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on controller-2: 0
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-docker-2 allocation score on overcloud-novacompute-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on controller-0: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on controller-1: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on controller-2: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-0: -INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-1: -INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on galera-bundle-2: -INFINITY
+pcmk__bundle_assign: galera-bundle-master allocation score on overcloud-novacompute-0: 0
+pcmk__bundle_assign: galera-bundle-master allocation score on overcloud-novacompute-1: 0
+pcmk__bundle_assign: galera:0 allocation score on galera-bundle-0: 501
+pcmk__bundle_assign: galera:1 allocation score on galera-bundle-1: 501
+pcmk__bundle_assign: galera:2 allocation score on galera-bundle-2: 501
+pcmk__bundle_assign: haproxy-bundle allocation score on controller-0: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on controller-0: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on controller-1: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on controller-1: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on controller-2: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on controller-2: 0
+pcmk__bundle_assign: haproxy-bundle allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on overcloud-novacompute-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle allocation score on overcloud-novacompute-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-0: 0
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-0: 0
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-1: 0
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-1: 0
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-2: 0
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on controller-2: 0
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on overcloud-novacompute-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-0 allocation score on overcloud-novacompute-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-0: 0
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-0: 0
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-1: 0
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-1: 0
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on controller-2: 0
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on overcloud-novacompute-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-1 allocation score on overcloud-novacompute-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-0: 0
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-1: 0
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-1: 0
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-2: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on controller-2: 0
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on overcloud-novacompute-1: -INFINITY
+pcmk__bundle_assign: haproxy-bundle-docker-2 allocation score on overcloud-novacompute-1: -INFINITY
+pcmk__bundle_assign: openstack-cinder-volume allocation score on controller-0: 0
+pcmk__bundle_assign: openstack-cinder-volume allocation score on controller-1: 0
+pcmk__bundle_assign: openstack-cinder-volume allocation score on controller-2: 0
+pcmk__bundle_assign: openstack-cinder-volume allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: openstack-cinder-volume allocation score on overcloud-novacompute-1: -INFINITY
+pcmk__bundle_assign: openstack-cinder-volume-docker-0 allocation score on controller-0: 0
+pcmk__bundle_assign: openstack-cinder-volume-docker-0 allocation score on controller-1: 0
+pcmk__bundle_assign: openstack-cinder-volume-docker-0 allocation score on controller-2: 0
+pcmk__bundle_assign: openstack-cinder-volume-docker-0 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: openstack-cinder-volume-docker-0 allocation score on overcloud-novacompute-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle allocation score on controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle allocation score on controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle allocation score on controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle allocation score on overcloud-novacompute-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-0 allocation score on overcloud-novacompute-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-1 allocation score on overcloud-novacompute-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-2 allocation score on overcloud-novacompute-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on overcloud-novacompute-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on overcloud-novacompute-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-clone allocation score on rabbitmq-bundle-2: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-0 allocation score on overcloud-novacompute-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-1 allocation score on overcloud-novacompute-1: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on controller-0: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on controller-1: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on controller-2: 0
+pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: rabbitmq-bundle-docker-2 allocation score on overcloud-novacompute-1: -INFINITY
+pcmk__bundle_assign: rabbitmq:0 allocation score on rabbitmq-bundle-0: 501
+pcmk__bundle_assign: rabbitmq:1 allocation score on rabbitmq-bundle-1: 501
+pcmk__bundle_assign: rabbitmq:2 allocation score on rabbitmq-bundle-2: 501
+pcmk__bundle_assign: redis-bundle allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle allocation score on controller-2: 0
+pcmk__bundle_assign: redis-bundle allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: redis-bundle allocation score on overcloud-novacompute-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-0 allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle-0 allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle-0 allocation score on controller-2: 0
+pcmk__bundle_assign: redis-bundle-0 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-0 allocation score on overcloud-novacompute-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-1 allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle-1 allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle-1 allocation score on controller-2: 0
+pcmk__bundle_assign: redis-bundle-1 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-1 allocation score on overcloud-novacompute-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-2 allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle-2 allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle-2 allocation score on controller-2: 0
+pcmk__bundle_assign: redis-bundle-2 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-2 allocation score on overcloud-novacompute-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-docker-0 allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle-docker-0 allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle-docker-0 allocation score on controller-2: 0
+pcmk__bundle_assign: redis-bundle-docker-0 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-docker-0 allocation score on overcloud-novacompute-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-docker-1 allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle-docker-1 allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle-docker-1 allocation score on controller-2: 0
+pcmk__bundle_assign: redis-bundle-docker-1 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-docker-1 allocation score on overcloud-novacompute-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-docker-2 allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle-docker-2 allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle-docker-2 allocation score on controller-2: 0
+pcmk__bundle_assign: redis-bundle-docker-2 allocation score on overcloud-novacompute-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-docker-2 allocation score on overcloud-novacompute-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-master allocation score on controller-0: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on controller-1: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on controller-2: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on overcloud-novacompute-0: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on overcloud-novacompute-1: 0
+pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-0: -INFINITY
+pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-1: -INFINITY
+pcmk__bundle_assign: redis-bundle-master allocation score on redis-bundle-2: -INFINITY
+pcmk__bundle_assign: redis:0 allocation score on redis-bundle-0: 501
+pcmk__bundle_assign: redis:1 allocation score on redis-bundle-1: 501
+pcmk__bundle_assign: redis:2 allocation score on redis-bundle-2: 501
pcmk__clone_assign: compute-unfence-trigger-clone allocation score on controller-0: -INFINITY
pcmk__clone_assign: compute-unfence-trigger-clone allocation score on controller-1: -INFINITY
pcmk__clone_assign: compute-unfence-trigger-clone allocation score on controller-2: -INFINITY
diff --git a/cts/scheduler/summary/11-a-then-bm-b-move-a-clone-starting.summary b/cts/scheduler/summary/11-a-then-bm-b-move-a-clone-starting.summary
index 7bd3b49..7388644 100644
--- a/cts/scheduler/summary/11-a-then-bm-b-move-a-clone-starting.summary
+++ b/cts/scheduler/summary/11-a-then-bm-b-move-a-clone-starting.summary
@@ -11,7 +11,7 @@ Current cluster status:
Transition Summary:
* Move myclone:0 ( f20node1 -> f20node2 )
- * Move vm ( f20node1 -> f20node2 ) due to unrunnable myclone-clone stop
+ * Move vm ( f20node1 -> f20node2 ) due to unmigrateable myclone-clone stop
Executing Cluster Transition:
* Resource action: myclone monitor on f20node2
diff --git a/cts/scheduler/summary/5-am-then-bm-a-not-migratable.summary b/cts/scheduler/summary/5-am-then-bm-a-not-migratable.summary
index 2c88bc3..2a755e1 100644
--- a/cts/scheduler/summary/5-am-then-bm-a-not-migratable.summary
+++ b/cts/scheduler/summary/5-am-then-bm-a-not-migratable.summary
@@ -8,7 +8,7 @@ Current cluster status:
Transition Summary:
* Move A ( 18node1 -> 18node2 )
- * Move B ( 18node2 -> 18node1 ) due to unrunnable A stop
+ * Move B ( 18node2 -> 18node1 ) due to unmigrateable A stop
Executing Cluster Transition:
* Resource action: B stop on 18node2
diff --git a/cts/scheduler/summary/7-migrate-group-one-unmigratable.summary b/cts/scheduler/summary/7-migrate-group-one-unmigratable.summary
index 0d0c7ff..92eecaf 100644
--- a/cts/scheduler/summary/7-migrate-group-one-unmigratable.summary
+++ b/cts/scheduler/summary/7-migrate-group-one-unmigratable.summary
@@ -11,7 +11,7 @@ Current cluster status:
Transition Summary:
* Migrate A ( 18node1 -> 18node2 )
* Move B ( 18node1 -> 18node2 )
- * Move C ( 18node1 -> 18node2 ) due to unrunnable B stop
+ * Move C ( 18node1 -> 18node2 ) due to unmigrateable B stop
Executing Cluster Transition:
* Pseudo action: thegroup_stop_0
diff --git a/cts/scheduler/summary/bundle-interleave-start.summary b/cts/scheduler/summary/bundle-interleave-start.summary
index 1648e92..5a59847 100644
--- a/cts/scheduler/summary/bundle-interleave-start.summary
+++ b/cts/scheduler/summary/bundle-interleave-start.summary
@@ -14,24 +14,24 @@ Current cluster status:
* app-bundle-2 (ocf:pacemaker:Stateful): Stopped
Transition Summary:
- * Start base-bundle-podman-0 ( node2 )
- * Start base-bundle-0 ( node2 )
- * Start base:0 ( base-bundle-0 )
- * Start base-bundle-podman-1 ( node3 )
- * Start base-bundle-1 ( node3 )
- * Start base:1 ( base-bundle-1 )
- * Start base-bundle-podman-2 ( node4 )
- * Start base-bundle-2 ( node4 )
- * Start base:2 ( base-bundle-2 )
- * Start app-bundle-podman-0 ( node2 )
- * Start app-bundle-0 ( node2 )
- * Start app:0 ( app-bundle-0 )
- * Start app-bundle-podman-1 ( node3 )
- * Start app-bundle-1 ( node3 )
- * Start app:1 ( app-bundle-1 )
- * Start app-bundle-podman-2 ( node4 )
- * Start app-bundle-2 ( node4 )
- * Start app:2 ( app-bundle-2 )
+ * Start base-bundle-podman-0 ( node2 )
+ * Start base-bundle-0 ( node2 )
+ * Start base:0 ( base-bundle-0 )
+ * Start base-bundle-podman-1 ( node3 )
+ * Start base-bundle-1 ( node3 )
+ * Start base:1 ( base-bundle-1 )
+ * Start base-bundle-podman-2 ( node4 )
+ * Start base-bundle-2 ( node4 )
+ * Promote base:2 ( Stopped -> Promoted base-bundle-2 )
+ * Start app-bundle-podman-0 ( node2 )
+ * Start app-bundle-0 ( node2 )
+ * Start app:0 ( app-bundle-0 )
+ * Start app-bundle-podman-1 ( node3 )
+ * Start app-bundle-1 ( node3 )
+ * Start app:1 ( app-bundle-1 )
+ * Start app-bundle-podman-2 ( node4 )
+ * Start app-bundle-2 ( node4 )
+ * Promote app:2 ( Stopped -> Promoted app-bundle-2 )
Executing Cluster Transition:
* Resource action: base-bundle-podman-0 monitor on node5
@@ -91,17 +91,18 @@ Executing Cluster Transition:
* Resource action: base-bundle-podman-2 monitor=60000 on node4
* Resource action: base-bundle-2 start on node4
* Resource action: base:0 start on base-bundle-0
- * Resource action: base:1 start on base-bundle-1
- * Resource action: base:2 start on base-bundle-2
- * Pseudo action: base-bundle-clone_running_0
* Resource action: base-bundle-0 monitor=30000 on node2
* Resource action: base-bundle-1 monitor=30000 on node3
* Resource action: base-bundle-2 monitor=30000 on node4
- * Pseudo action: base-bundle_running_0
+ * Resource action: base:1 start on base-bundle-1
* Resource action: base:0 monitor=16000 on base-bundle-0
+ * Resource action: base:2 start on base-bundle-2
* Resource action: base:1 monitor=16000 on base-bundle-1
- * Resource action: base:2 monitor=16000 on base-bundle-2
+ * Pseudo action: base-bundle-clone_running_0
+ * Pseudo action: base-bundle_running_0
* Pseudo action: app-bundle_start_0
+ * Pseudo action: base-bundle_promote_0
+ * Pseudo action: base-bundle-clone_promote_0
* Pseudo action: app-bundle-clone_start_0
* Resource action: app-bundle-podman-0 start on node2
* Resource action: app-bundle-0 monitor on node5
@@ -121,23 +122,32 @@ Executing Cluster Transition:
* Resource action: app-bundle-2 monitor on node3
* Resource action: app-bundle-2 monitor on node2
* Resource action: app-bundle-2 monitor on node1
+ * Resource action: base:2 promote on base-bundle-2
+ * Pseudo action: base-bundle-clone_promoted_0
* Resource action: app-bundle-podman-0 monitor=60000 on node2
* Resource action: app-bundle-0 start on node2
* Resource action: app-bundle-podman-1 monitor=60000 on node3
* Resource action: app-bundle-1 start on node3
* Resource action: app-bundle-podman-2 monitor=60000 on node4
* Resource action: app-bundle-2 start on node4
+ * Pseudo action: base-bundle_promoted_0
+ * Resource action: base:2 monitor=15000 on base-bundle-2
* Resource action: app:0 start on app-bundle-0
- * Resource action: app:1 start on app-bundle-1
- * Resource action: app:2 start on app-bundle-2
- * Pseudo action: app-bundle-clone_running_0
* Resource action: app-bundle-0 monitor=30000 on node2
* Resource action: app-bundle-1 monitor=30000 on node3
* Resource action: app-bundle-2 monitor=30000 on node4
- * Pseudo action: app-bundle_running_0
+ * Resource action: app:1 start on app-bundle-1
* Resource action: app:0 monitor=16000 on app-bundle-0
+ * Resource action: app:2 start on app-bundle-2
* Resource action: app:1 monitor=16000 on app-bundle-1
- * Resource action: app:2 monitor=16000 on app-bundle-2
+ * Pseudo action: app-bundle-clone_running_0
+ * Pseudo action: app-bundle_running_0
+ * Pseudo action: app-bundle_promote_0
+ * Pseudo action: app-bundle-clone_promote_0
+ * Resource action: app:2 promote on app-bundle-2
+ * Pseudo action: app-bundle-clone_promoted_0
+ * Pseudo action: app-bundle_promoted_0
+ * Resource action: app:2 monitor=15000 on app-bundle-2
Revised Cluster Status:
* Node List:
@@ -149,8 +159,8 @@ Revised Cluster Status:
* Container bundle set: base-bundle [localhost/pcmktest:base]:
* base-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node2
* base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node3
- * base-bundle-2 (ocf:pacemaker:Stateful): Unpromoted node4
+ * base-bundle-2 (ocf:pacemaker:Stateful): Promoted node4
* Container bundle set: app-bundle [localhost/pcmktest:app]:
* app-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node2
* app-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node3
- * app-bundle-2 (ocf:pacemaker:Stateful): Unpromoted node4
+ * app-bundle-2 (ocf:pacemaker:Stateful): Promoted node4
diff --git a/cts/scheduler/summary/bundle-order-fencing.summary b/cts/scheduler/summary/bundle-order-fencing.summary
index e3a25c2..4088c15 100644
--- a/cts/scheduler/summary/bundle-order-fencing.summary
+++ b/cts/scheduler/summary/bundle-order-fencing.summary
@@ -145,6 +145,7 @@ Executing Cluster Transition:
* Pseudo action: galera-bundle_stopped_0
* Resource action: rabbitmq notify on rabbitmq-bundle-1
* Resource action: rabbitmq notify on rabbitmq-bundle-2
+ * Pseudo action: rabbitmq_notified_0
* Pseudo action: rabbitmq-bundle-clone_confirmed-post_notify_stopped_0
* Pseudo action: rabbitmq-bundle-clone_pre_notify_start_0
* Pseudo action: galera-bundle-master_running_0
@@ -155,7 +156,6 @@ Executing Cluster Transition:
* Pseudo action: redis-bundle-docker-0_stop_0
* Pseudo action: galera-bundle_running_0
* Pseudo action: rabbitmq-bundle_stopped_0
- * Pseudo action: rabbitmq_notified_0
* Pseudo action: rabbitmq-bundle-clone_confirmed-pre_notify_start_0
* Pseudo action: rabbitmq-bundle-clone_start_0
* Pseudo action: redis_stop_0
@@ -165,11 +165,11 @@ Executing Cluster Transition:
* Pseudo action: rabbitmq-bundle-clone_post_notify_running_0
* Resource action: redis notify on redis-bundle-1
* Resource action: redis notify on redis-bundle-2
+ * Pseudo action: redis_notified_0
* Pseudo action: redis-bundle-master_confirmed-post_notify_stopped_0
* Pseudo action: redis-bundle-master_pre_notify_start_0
* Pseudo action: redis-bundle_stopped_0
* Pseudo action: rabbitmq-bundle-clone_confirmed-post_notify_running_0
- * Pseudo action: redis_notified_0
* Pseudo action: redis-bundle-master_confirmed-pre_notify_start_0
* Pseudo action: redis-bundle-master_start_0
* Pseudo action: rabbitmq-bundle_running_0
diff --git a/cts/scheduler/summary/bundle-order-stop-on-remote.summary b/cts/scheduler/summary/bundle-order-stop-on-remote.summary
index 5e2e367..612e701 100644
--- a/cts/scheduler/summary/bundle-order-stop-on-remote.summary
+++ b/cts/scheduler/summary/bundle-order-stop-on-remote.summary
@@ -140,8 +140,8 @@ Executing Cluster Transition:
* Resource action: galera-bundle-docker-2 monitor=60000 on database-2
* Resource action: galera-bundle-2 start on controller-1
* Resource action: redis notify on redis-bundle-0
- * Resource action: redis notify on redis-bundle-1
* Resource action: redis notify on redis-bundle-2
+ * Resource action: redis notify on redis-bundle-1
* Pseudo action: redis-bundle-master_confirmed-post_notify_running_0
* Pseudo action: redis-bundle_running_0
* Resource action: galera start on galera-bundle-0
@@ -153,8 +153,8 @@ Executing Cluster Transition:
* Pseudo action: redis-bundle_promote_0
* Pseudo action: galera-bundle_running_0
* Resource action: redis notify on redis-bundle-0
- * Resource action: redis notify on redis-bundle-1
* Resource action: redis notify on redis-bundle-2
+ * Resource action: redis notify on redis-bundle-1
* Pseudo action: redis-bundle-master_confirmed-pre_notify_promote_0
* Pseudo action: redis-bundle-master_promote_0
* Pseudo action: galera-bundle_promote_0
@@ -169,8 +169,8 @@ Executing Cluster Transition:
* Resource action: galera monitor=10000 on galera-bundle-0
* Resource action: galera monitor=10000 on galera-bundle-2
* Resource action: redis notify on redis-bundle-0
- * Resource action: redis notify on redis-bundle-1
* Resource action: redis notify on redis-bundle-2
+ * Resource action: redis notify on redis-bundle-1
* Pseudo action: redis-bundle-master_confirmed-post_notify_promoted_0
* Pseudo action: redis-bundle_promoted_0
* Resource action: redis monitor=20000 on redis-bundle-0
diff --git a/cts/scheduler/summary/bundle-promoted-anticolocation-1.summary b/cts/scheduler/summary/bundle-promoted-anticolocation-1.summary
new file mode 100644
index 0000000..ec6cf2b
--- /dev/null
+++ b/cts/scheduler/summary/bundle-promoted-anticolocation-1.summary
@@ -0,0 +1,33 @@
+Current cluster status:
+ * Node List:
+ * Online: [ node1 node2 node3 ]
+ * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node2
+ * Container bundle set: base-bundle [localhost/pcmktest]:
+ * base-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node1
+ * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node2
+ * base-bundle-2 (ocf:pacemaker:Stateful): Promoted node3
+ * vip (ocf:heartbeat:IPaddr2): Started node3
+
+Transition Summary:
+ * Move vip ( node3 -> node1 )
+
+Executing Cluster Transition:
+ * Resource action: vip stop on node3
+ * Resource action: vip start on node1
+ * Resource action: vip monitor=10000 on node1
+
+Revised Cluster Status:
+ * Node List:
+ * Online: [ node1 node2 node3 ]
+ * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node2
+ * Container bundle set: base-bundle [localhost/pcmktest]:
+ * base-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node1
+ * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node2
+ * base-bundle-2 (ocf:pacemaker:Stateful): Promoted node3
+ * vip (ocf:heartbeat:IPaddr2): Started node1
diff --git a/cts/scheduler/summary/bundle-promoted-anticolocation-2.summary b/cts/scheduler/summary/bundle-promoted-anticolocation-2.summary
new file mode 100644
index 0000000..ec6cf2b
--- /dev/null
+++ b/cts/scheduler/summary/bundle-promoted-anticolocation-2.summary
@@ -0,0 +1,33 @@
+Current cluster status:
+ * Node List:
+ * Online: [ node1 node2 node3 ]
+ * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node2
+ * Container bundle set: base-bundle [localhost/pcmktest]:
+ * base-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node1
+ * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node2
+ * base-bundle-2 (ocf:pacemaker:Stateful): Promoted node3
+ * vip (ocf:heartbeat:IPaddr2): Started node3
+
+Transition Summary:
+ * Move vip ( node3 -> node1 )
+
+Executing Cluster Transition:
+ * Resource action: vip stop on node3
+ * Resource action: vip start on node1
+ * Resource action: vip monitor=10000 on node1
+
+Revised Cluster Status:
+ * Node List:
+ * Online: [ node1 node2 node3 ]
+ * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node2
+ * Container bundle set: base-bundle [localhost/pcmktest]:
+ * base-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node1
+ * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node2
+ * base-bundle-2 (ocf:pacemaker:Stateful): Promoted node3
+ * vip (ocf:heartbeat:IPaddr2): Started node1
diff --git a/cts/scheduler/summary/bundle-promoted-anticolocation-3.summary b/cts/scheduler/summary/bundle-promoted-anticolocation-3.summary
new file mode 100644
index 0000000..e9db462
--- /dev/null
+++ b/cts/scheduler/summary/bundle-promoted-anticolocation-3.summary
@@ -0,0 +1,45 @@
+Current cluster status:
+ * Node List:
+ * Online: [ node1 node2 node3 ]
+ * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node2
+ * Container bundle set: base-bundle [localhost/pcmktest]:
+ * base-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node1
+ * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node2
+ * base-bundle-2 (ocf:pacemaker:Stateful): Promoted node3
+ * vip (ocf:heartbeat:IPaddr2): Started node3
+
+Transition Summary:
+ * Promote base:1 ( Unpromoted -> Promoted base-bundle-1 )
+ * Demote base:2 ( Promoted -> Unpromoted base-bundle-2 )
+
+Executing Cluster Transition:
+ * Resource action: base cancel=16000 on base-bundle-1
+ * Resource action: base cancel=15000 on base-bundle-2
+ * Pseudo action: base-bundle_demote_0
+ * Pseudo action: base-bundle-clone_demote_0
+ * Resource action: base demote on base-bundle-2
+ * Pseudo action: base-bundle-clone_demoted_0
+ * Pseudo action: base-bundle_demoted_0
+ * Pseudo action: base-bundle_promote_0
+ * Resource action: base monitor=16000 on base-bundle-2
+ * Pseudo action: base-bundle-clone_promote_0
+ * Resource action: base promote on base-bundle-1
+ * Pseudo action: base-bundle-clone_promoted_0
+ * Pseudo action: base-bundle_promoted_0
+ * Resource action: base monitor=15000 on base-bundle-1
+
+Revised Cluster Status:
+ * Node List:
+ * Online: [ node1 node2 node3 ]
+ * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node2
+ * Container bundle set: base-bundle [localhost/pcmktest]:
+ * base-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node1
+ * base-bundle-1 (ocf:pacemaker:Stateful): Promoted node2
+ * base-bundle-2 (ocf:pacemaker:Stateful): Unpromoted node3
+ * vip (ocf:heartbeat:IPaddr2): Started node3
diff --git a/cts/scheduler/summary/bundle-promoted-anticolocation-4.summary b/cts/scheduler/summary/bundle-promoted-anticolocation-4.summary
new file mode 100644
index 0000000..e9db462
--- /dev/null
+++ b/cts/scheduler/summary/bundle-promoted-anticolocation-4.summary
@@ -0,0 +1,45 @@
+Current cluster status:
+ * Node List:
+ * Online: [ node1 node2 node3 ]
+ * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node2
+ * Container bundle set: base-bundle [localhost/pcmktest]:
+ * base-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node1
+ * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node2
+ * base-bundle-2 (ocf:pacemaker:Stateful): Promoted node3
+ * vip (ocf:heartbeat:IPaddr2): Started node3
+
+Transition Summary:
+ * Promote base:1 ( Unpromoted -> Promoted base-bundle-1 )
+ * Demote base:2 ( Promoted -> Unpromoted base-bundle-2 )
+
+Executing Cluster Transition:
+ * Resource action: base cancel=16000 on base-bundle-1
+ * Resource action: base cancel=15000 on base-bundle-2
+ * Pseudo action: base-bundle_demote_0
+ * Pseudo action: base-bundle-clone_demote_0
+ * Resource action: base demote on base-bundle-2
+ * Pseudo action: base-bundle-clone_demoted_0
+ * Pseudo action: base-bundle_demoted_0
+ * Pseudo action: base-bundle_promote_0
+ * Resource action: base monitor=16000 on base-bundle-2
+ * Pseudo action: base-bundle-clone_promote_0
+ * Resource action: base promote on base-bundle-1
+ * Pseudo action: base-bundle-clone_promoted_0
+ * Pseudo action: base-bundle_promoted_0
+ * Resource action: base monitor=15000 on base-bundle-1
+
+Revised Cluster Status:
+ * Node List:
+ * Online: [ node1 node2 node3 ]
+ * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node2
+ * Container bundle set: base-bundle [localhost/pcmktest]:
+ * base-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node1
+ * base-bundle-1 (ocf:pacemaker:Stateful): Promoted node2
+ * base-bundle-2 (ocf:pacemaker:Stateful): Unpromoted node3
+ * vip (ocf:heartbeat:IPaddr2): Started node3
diff --git a/cts/scheduler/summary/bundle-promoted-anticolocation-5.summary b/cts/scheduler/summary/bundle-promoted-anticolocation-5.summary
new file mode 100644
index 0000000..c35f2e0
--- /dev/null
+++ b/cts/scheduler/summary/bundle-promoted-anticolocation-5.summary
@@ -0,0 +1,51 @@
+Current cluster status:
+ * Node List:
+ * Online: [ node1 node2 node3 ]
+ * GuestOnline: [ bundle-a-0 bundle-a-1 bundle-a-2 bundle-b-0 bundle-b-1 bundle-b-2 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node2
+ * Container bundle set: bundle-a [localhost/pcmktest]:
+ * bundle-a-0 (ocf:pacemaker:Stateful): Unpromoted node1
+ * bundle-a-1 (ocf:pacemaker:Stateful): Promoted node3
+ * bundle-a-2 (ocf:pacemaker:Stateful): Unpromoted node2
+ * Container bundle set: bundle-b [localhost/pcmktest]:
+ * bundle-b-0 (ocf:pacemaker:Stateful): Unpromoted node1
+ * bundle-b-1 (ocf:pacemaker:Stateful): Promoted node3
+ * bundle-b-2 (ocf:pacemaker:Stateful): Unpromoted node2
+
+Transition Summary:
+ * Demote bundle-a-rsc:1 ( Promoted -> Unpromoted bundle-a-1 )
+ * Promote bundle-a-rsc:2 ( Unpromoted -> Promoted bundle-a-2 )
+
+Executing Cluster Transition:
+ * Resource action: bundle-a-rsc cancel=16000 on bundle-a-2
+ * Resource action: bundle-a-rsc cancel=15000 on bundle-a-1
+ * Pseudo action: bundle-a_demote_0
+ * Pseudo action: bundle-a-clone_demote_0
+ * Resource action: bundle-a-rsc demote on bundle-a-1
+ * Pseudo action: bundle-a-clone_demoted_0
+ * Pseudo action: bundle-a_demoted_0
+ * Pseudo action: bundle-a_promote_0
+ * Resource action: bundle-a-rsc monitor=16000 on bundle-a-1
+ * Pseudo action: bundle-a-clone_promote_0
+ * Resource action: bundle-a-rsc promote on bundle-a-2
+ * Pseudo action: bundle-a-clone_promoted_0
+ * Pseudo action: bundle-a_promoted_0
+ * Resource action: bundle-a-rsc monitor=15000 on bundle-a-2
+
+Revised Cluster Status:
+ * Node List:
+ * Online: [ node1 node2 node3 ]
+ * GuestOnline: [ bundle-a-0 bundle-a-1 bundle-a-2 bundle-b-0 bundle-b-1 bundle-b-2 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node2
+ * Container bundle set: bundle-a [localhost/pcmktest]:
+ * bundle-a-0 (ocf:pacemaker:Stateful): Unpromoted node1
+ * bundle-a-1 (ocf:pacemaker:Stateful): Unpromoted node3
+ * bundle-a-2 (ocf:pacemaker:Stateful): Promoted node2
+ * Container bundle set: bundle-b [localhost/pcmktest]:
+ * bundle-b-0 (ocf:pacemaker:Stateful): Unpromoted node1
+ * bundle-b-1 (ocf:pacemaker:Stateful): Promoted node3
+ * bundle-b-2 (ocf:pacemaker:Stateful): Unpromoted node2
diff --git a/cts/scheduler/summary/bundle-promoted-anticolocation-6.summary b/cts/scheduler/summary/bundle-promoted-anticolocation-6.summary
new file mode 100644
index 0000000..c35f2e0
--- /dev/null
+++ b/cts/scheduler/summary/bundle-promoted-anticolocation-6.summary
@@ -0,0 +1,51 @@
+Current cluster status:
+ * Node List:
+ * Online: [ node1 node2 node3 ]
+ * GuestOnline: [ bundle-a-0 bundle-a-1 bundle-a-2 bundle-b-0 bundle-b-1 bundle-b-2 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node2
+ * Container bundle set: bundle-a [localhost/pcmktest]:
+ * bundle-a-0 (ocf:pacemaker:Stateful): Unpromoted node1
+ * bundle-a-1 (ocf:pacemaker:Stateful): Promoted node3
+ * bundle-a-2 (ocf:pacemaker:Stateful): Unpromoted node2
+ * Container bundle set: bundle-b [localhost/pcmktest]:
+ * bundle-b-0 (ocf:pacemaker:Stateful): Unpromoted node1
+ * bundle-b-1 (ocf:pacemaker:Stateful): Promoted node3
+ * bundle-b-2 (ocf:pacemaker:Stateful): Unpromoted node2
+
+Transition Summary:
+ * Demote bundle-a-rsc:1 ( Promoted -> Unpromoted bundle-a-1 )
+ * Promote bundle-a-rsc:2 ( Unpromoted -> Promoted bundle-a-2 )
+
+Executing Cluster Transition:
+ * Resource action: bundle-a-rsc cancel=16000 on bundle-a-2
+ * Resource action: bundle-a-rsc cancel=15000 on bundle-a-1
+ * Pseudo action: bundle-a_demote_0
+ * Pseudo action: bundle-a-clone_demote_0
+ * Resource action: bundle-a-rsc demote on bundle-a-1
+ * Pseudo action: bundle-a-clone_demoted_0
+ * Pseudo action: bundle-a_demoted_0
+ * Pseudo action: bundle-a_promote_0
+ * Resource action: bundle-a-rsc monitor=16000 on bundle-a-1
+ * Pseudo action: bundle-a-clone_promote_0
+ * Resource action: bundle-a-rsc promote on bundle-a-2
+ * Pseudo action: bundle-a-clone_promoted_0
+ * Pseudo action: bundle-a_promoted_0
+ * Resource action: bundle-a-rsc monitor=15000 on bundle-a-2
+
+Revised Cluster Status:
+ * Node List:
+ * Online: [ node1 node2 node3 ]
+ * GuestOnline: [ bundle-a-0 bundle-a-1 bundle-a-2 bundle-b-0 bundle-b-1 bundle-b-2 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node2
+ * Container bundle set: bundle-a [localhost/pcmktest]:
+ * bundle-a-0 (ocf:pacemaker:Stateful): Unpromoted node1
+ * bundle-a-1 (ocf:pacemaker:Stateful): Unpromoted node3
+ * bundle-a-2 (ocf:pacemaker:Stateful): Promoted node2
+ * Container bundle set: bundle-b [localhost/pcmktest]:
+ * bundle-b-0 (ocf:pacemaker:Stateful): Unpromoted node1
+ * bundle-b-1 (ocf:pacemaker:Stateful): Promoted node3
+ * bundle-b-2 (ocf:pacemaker:Stateful): Unpromoted node2
diff --git a/cts/scheduler/summary/bundle-promoted-colocation-1.summary b/cts/scheduler/summary/bundle-promoted-colocation-1.summary
new file mode 100644
index 0000000..61cc974
--- /dev/null
+++ b/cts/scheduler/summary/bundle-promoted-colocation-1.summary
@@ -0,0 +1,33 @@
+Current cluster status:
+ * Node List:
+ * Online: [ node1 node2 node3 ]
+ * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node2
+ * Container bundle set: base-bundle [localhost/pcmktest]:
+ * base-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node1
+ * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node2
+ * base-bundle-2 (ocf:pacemaker:Stateful): Promoted node3
+ * vip (ocf:heartbeat:IPaddr2): Started node1
+
+Transition Summary:
+ * Move vip ( node1 -> node3 )
+
+Executing Cluster Transition:
+ * Resource action: vip stop on node1
+ * Resource action: vip start on node3
+ * Resource action: vip monitor=10000 on node3
+
+Revised Cluster Status:
+ * Node List:
+ * Online: [ node1 node2 node3 ]
+ * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node2
+ * Container bundle set: base-bundle [localhost/pcmktest]:
+ * base-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node1
+ * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node2
+ * base-bundle-2 (ocf:pacemaker:Stateful): Promoted node3
+ * vip (ocf:heartbeat:IPaddr2): Started node3
diff --git a/cts/scheduler/summary/bundle-promoted-colocation-2.summary b/cts/scheduler/summary/bundle-promoted-colocation-2.summary
new file mode 100644
index 0000000..61cc974
--- /dev/null
+++ b/cts/scheduler/summary/bundle-promoted-colocation-2.summary
@@ -0,0 +1,33 @@
+Current cluster status:
+ * Node List:
+ * Online: [ node1 node2 node3 ]
+ * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node2
+ * Container bundle set: base-bundle [localhost/pcmktest]:
+ * base-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node1
+ * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node2
+ * base-bundle-2 (ocf:pacemaker:Stateful): Promoted node3
+ * vip (ocf:heartbeat:IPaddr2): Started node1
+
+Transition Summary:
+ * Move vip ( node1 -> node3 )
+
+Executing Cluster Transition:
+ * Resource action: vip stop on node1
+ * Resource action: vip start on node3
+ * Resource action: vip monitor=10000 on node3
+
+Revised Cluster Status:
+ * Node List:
+ * Online: [ node1 node2 node3 ]
+ * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node2
+ * Container bundle set: base-bundle [localhost/pcmktest]:
+ * base-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node1
+ * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node2
+ * base-bundle-2 (ocf:pacemaker:Stateful): Promoted node3
+ * vip (ocf:heartbeat:IPaddr2): Started node3
diff --git a/cts/scheduler/summary/bundle-promoted-colocation-3.summary b/cts/scheduler/summary/bundle-promoted-colocation-3.summary
new file mode 100644
index 0000000..64b4157
--- /dev/null
+++ b/cts/scheduler/summary/bundle-promoted-colocation-3.summary
@@ -0,0 +1,45 @@
+Current cluster status:
+ * Node List:
+ * Online: [ node1 node2 node3 ]
+ * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node2
+ * Container bundle set: base-bundle [localhost/pcmktest]:
+ * base-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node1
+ * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node2
+ * base-bundle-2 (ocf:pacemaker:Stateful): Promoted node3
+ * vip (ocf:heartbeat:IPaddr2): Started node1
+
+Transition Summary:
+ * Promote base:0 ( Unpromoted -> Promoted base-bundle-0 )
+ * Demote base:2 ( Promoted -> Unpromoted base-bundle-2 )
+
+Executing Cluster Transition:
+ * Resource action: base cancel=16000 on base-bundle-0
+ * Resource action: base cancel=15000 on base-bundle-2
+ * Pseudo action: base-bundle_demote_0
+ * Pseudo action: base-bundle-clone_demote_0
+ * Resource action: base demote on base-bundle-2
+ * Pseudo action: base-bundle-clone_demoted_0
+ * Pseudo action: base-bundle_demoted_0
+ * Pseudo action: base-bundle_promote_0
+ * Resource action: base monitor=16000 on base-bundle-2
+ * Pseudo action: base-bundle-clone_promote_0
+ * Resource action: base promote on base-bundle-0
+ * Pseudo action: base-bundle-clone_promoted_0
+ * Pseudo action: base-bundle_promoted_0
+ * Resource action: base monitor=15000 on base-bundle-0
+
+Revised Cluster Status:
+ * Node List:
+ * Online: [ node1 node2 node3 ]
+ * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node2
+ * Container bundle set: base-bundle [localhost/pcmktest]:
+ * base-bundle-0 (ocf:pacemaker:Stateful): Promoted node1
+ * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node2
+ * base-bundle-2 (ocf:pacemaker:Stateful): Unpromoted node3
+ * vip (ocf:heartbeat:IPaddr2): Started node1
diff --git a/cts/scheduler/summary/bundle-promoted-colocation-4.summary b/cts/scheduler/summary/bundle-promoted-colocation-4.summary
new file mode 100644
index 0000000..64b4157
--- /dev/null
+++ b/cts/scheduler/summary/bundle-promoted-colocation-4.summary
@@ -0,0 +1,45 @@
+Current cluster status:
+ * Node List:
+ * Online: [ node1 node2 node3 ]
+ * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node2
+ * Container bundle set: base-bundle [localhost/pcmktest]:
+ * base-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node1
+ * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node2
+ * base-bundle-2 (ocf:pacemaker:Stateful): Promoted node3
+ * vip (ocf:heartbeat:IPaddr2): Started node1
+
+Transition Summary:
+ * Promote base:0 ( Unpromoted -> Promoted base-bundle-0 )
+ * Demote base:2 ( Promoted -> Unpromoted base-bundle-2 )
+
+Executing Cluster Transition:
+ * Resource action: base cancel=16000 on base-bundle-0
+ * Resource action: base cancel=15000 on base-bundle-2
+ * Pseudo action: base-bundle_demote_0
+ * Pseudo action: base-bundle-clone_demote_0
+ * Resource action: base demote on base-bundle-2
+ * Pseudo action: base-bundle-clone_demoted_0
+ * Pseudo action: base-bundle_demoted_0
+ * Pseudo action: base-bundle_promote_0
+ * Resource action: base monitor=16000 on base-bundle-2
+ * Pseudo action: base-bundle-clone_promote_0
+ * Resource action: base promote on base-bundle-0
+ * Pseudo action: base-bundle-clone_promoted_0
+ * Pseudo action: base-bundle_promoted_0
+ * Resource action: base monitor=15000 on base-bundle-0
+
+Revised Cluster Status:
+ * Node List:
+ * Online: [ node1 node2 node3 ]
+ * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node2
+ * Container bundle set: base-bundle [localhost/pcmktest]:
+ * base-bundle-0 (ocf:pacemaker:Stateful): Promoted node1
+ * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node2
+ * base-bundle-2 (ocf:pacemaker:Stateful): Unpromoted node3
+ * vip (ocf:heartbeat:IPaddr2): Started node1
diff --git a/cts/scheduler/summary/bundle-promoted-colocation-5.summary b/cts/scheduler/summary/bundle-promoted-colocation-5.summary
new file mode 100644
index 0000000..dbcf940
--- /dev/null
+++ b/cts/scheduler/summary/bundle-promoted-colocation-5.summary
@@ -0,0 +1,51 @@
+Current cluster status:
+ * Node List:
+ * Online: [ node1 node2 node3 ]
+ * GuestOnline: [ bundle-a-0 bundle-a-1 bundle-a-2 bundle-b-0 bundle-b-1 bundle-b-2 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node2
+ * Container bundle set: bundle-a [localhost/pcmktest]:
+ * bundle-a-0 (ocf:pacemaker:Stateful): Unpromoted node1
+ * bundle-a-1 (ocf:pacemaker:Stateful): Promoted node3
+ * bundle-a-2 (ocf:pacemaker:Stateful): Unpromoted node2
+ * Container bundle set: bundle-b [localhost/pcmktest]:
+ * bundle-b-0 (ocf:pacemaker:Stateful): Unpromoted node1
+ * bundle-b-1 (ocf:pacemaker:Stateful): Unpromoted node3
+ * bundle-b-2 (ocf:pacemaker:Stateful): Promoted node2
+
+Transition Summary:
+ * Demote bundle-a-rsc:1 ( Promoted -> Unpromoted bundle-a-1 )
+ * Promote bundle-a-rsc:2 ( Unpromoted -> Promoted bundle-a-2 )
+
+Executing Cluster Transition:
+ * Resource action: bundle-a-rsc cancel=16000 on bundle-a-2
+ * Resource action: bundle-a-rsc cancel=15000 on bundle-a-1
+ * Pseudo action: bundle-a_demote_0
+ * Pseudo action: bundle-a-clone_demote_0
+ * Resource action: bundle-a-rsc demote on bundle-a-1
+ * Pseudo action: bundle-a-clone_demoted_0
+ * Pseudo action: bundle-a_demoted_0
+ * Pseudo action: bundle-a_promote_0
+ * Resource action: bundle-a-rsc monitor=16000 on bundle-a-1
+ * Pseudo action: bundle-a-clone_promote_0
+ * Resource action: bundle-a-rsc promote on bundle-a-2
+ * Pseudo action: bundle-a-clone_promoted_0
+ * Pseudo action: bundle-a_promoted_0
+ * Resource action: bundle-a-rsc monitor=15000 on bundle-a-2
+
+Revised Cluster Status:
+ * Node List:
+ * Online: [ node1 node2 node3 ]
+ * GuestOnline: [ bundle-a-0 bundle-a-1 bundle-a-2 bundle-b-0 bundle-b-1 bundle-b-2 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node2
+ * Container bundle set: bundle-a [localhost/pcmktest]:
+ * bundle-a-0 (ocf:pacemaker:Stateful): Unpromoted node1
+ * bundle-a-1 (ocf:pacemaker:Stateful): Unpromoted node3
+ * bundle-a-2 (ocf:pacemaker:Stateful): Promoted node2
+ * Container bundle set: bundle-b [localhost/pcmktest]:
+ * bundle-b-0 (ocf:pacemaker:Stateful): Unpromoted node1
+ * bundle-b-1 (ocf:pacemaker:Stateful): Unpromoted node3
+ * bundle-b-2 (ocf:pacemaker:Stateful): Promoted node2
diff --git a/cts/scheduler/summary/bundle-promoted-colocation-6.summary b/cts/scheduler/summary/bundle-promoted-colocation-6.summary
new file mode 100644
index 0000000..dbcf940
--- /dev/null
+++ b/cts/scheduler/summary/bundle-promoted-colocation-6.summary
@@ -0,0 +1,51 @@
+Current cluster status:
+ * Node List:
+ * Online: [ node1 node2 node3 ]
+ * GuestOnline: [ bundle-a-0 bundle-a-1 bundle-a-2 bundle-b-0 bundle-b-1 bundle-b-2 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node2
+ * Container bundle set: bundle-a [localhost/pcmktest]:
+ * bundle-a-0 (ocf:pacemaker:Stateful): Unpromoted node1
+ * bundle-a-1 (ocf:pacemaker:Stateful): Promoted node3
+ * bundle-a-2 (ocf:pacemaker:Stateful): Unpromoted node2
+ * Container bundle set: bundle-b [localhost/pcmktest]:
+ * bundle-b-0 (ocf:pacemaker:Stateful): Unpromoted node1
+ * bundle-b-1 (ocf:pacemaker:Stateful): Unpromoted node3
+ * bundle-b-2 (ocf:pacemaker:Stateful): Promoted node2
+
+Transition Summary:
+ * Demote bundle-a-rsc:1 ( Promoted -> Unpromoted bundle-a-1 )
+ * Promote bundle-a-rsc:2 ( Unpromoted -> Promoted bundle-a-2 )
+
+Executing Cluster Transition:
+ * Resource action: bundle-a-rsc cancel=16000 on bundle-a-2
+ * Resource action: bundle-a-rsc cancel=15000 on bundle-a-1
+ * Pseudo action: bundle-a_demote_0
+ * Pseudo action: bundle-a-clone_demote_0
+ * Resource action: bundle-a-rsc demote on bundle-a-1
+ * Pseudo action: bundle-a-clone_demoted_0
+ * Pseudo action: bundle-a_demoted_0
+ * Pseudo action: bundle-a_promote_0
+ * Resource action: bundle-a-rsc monitor=16000 on bundle-a-1
+ * Pseudo action: bundle-a-clone_promote_0
+ * Resource action: bundle-a-rsc promote on bundle-a-2
+ * Pseudo action: bundle-a-clone_promoted_0
+ * Pseudo action: bundle-a_promoted_0
+ * Resource action: bundle-a-rsc monitor=15000 on bundle-a-2
+
+Revised Cluster Status:
+ * Node List:
+ * Online: [ node1 node2 node3 ]
+ * GuestOnline: [ bundle-a-0 bundle-a-1 bundle-a-2 bundle-b-0 bundle-b-1 bundle-b-2 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node2
+ * Container bundle set: bundle-a [localhost/pcmktest]:
+ * bundle-a-0 (ocf:pacemaker:Stateful): Unpromoted node1
+ * bundle-a-1 (ocf:pacemaker:Stateful): Unpromoted node3
+ * bundle-a-2 (ocf:pacemaker:Stateful): Promoted node2
+ * Container bundle set: bundle-b [localhost/pcmktest]:
+ * bundle-b-0 (ocf:pacemaker:Stateful): Unpromoted node1
+ * bundle-b-1 (ocf:pacemaker:Stateful): Unpromoted node3
+ * bundle-b-2 (ocf:pacemaker:Stateful): Promoted node2
diff --git a/cts/scheduler/summary/bundle-promoted-location-1.summary b/cts/scheduler/summary/bundle-promoted-location-1.summary
new file mode 100644
index 0000000..4c0a0ab
--- /dev/null
+++ b/cts/scheduler/summary/bundle-promoted-location-1.summary
@@ -0,0 +1,27 @@
+Current cluster status:
+ * Node List:
+ * Online: [ node1 node2 node3 ]
+ * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node2
+ * Container bundle set: base-bundle [localhost/pcmktest]:
+ * base-bundle-0 (ocf:pacemaker:Stateful): Promoted node3
+ * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node2
+ * base-bundle-2 (ocf:pacemaker:Stateful): Unpromoted node1
+
+Transition Summary:
+
+Executing Cluster Transition:
+
+Revised Cluster Status:
+ * Node List:
+ * Online: [ node1 node2 node3 ]
+ * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node2
+ * Container bundle set: base-bundle [localhost/pcmktest]:
+ * base-bundle-0 (ocf:pacemaker:Stateful): Promoted node3
+ * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node2
+ * base-bundle-2 (ocf:pacemaker:Stateful): Unpromoted node1
diff --git a/cts/scheduler/summary/bundle-promoted-location-2.summary b/cts/scheduler/summary/bundle-promoted-location-2.summary
new file mode 100644
index 0000000..bd3b3a9
--- /dev/null
+++ b/cts/scheduler/summary/bundle-promoted-location-2.summary
@@ -0,0 +1,54 @@
+Current cluster status:
+ * Node List:
+ * Online: [ node1 node2 node3 ]
+ * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node2
+ * Container bundle set: base-bundle [localhost/pcmktest]:
+ * base-bundle-0 (ocf:pacemaker:Stateful): Promoted node3
+ * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node2
+ * base-bundle-2 (ocf:pacemaker:Stateful): Unpromoted node1
+
+Transition Summary:
+ * Stop base-bundle-podman-0 ( node3 ) due to node availability
+ * Stop base-bundle-0 ( node3 ) due to unrunnable base-bundle-podman-0 start
+ * Stop base:0 ( Promoted base-bundle-0 ) due to unrunnable base-bundle-podman-0 start
+ * Promote base:1 ( Unpromoted -> Promoted base-bundle-1 )
+
+Executing Cluster Transition:
+ * Resource action: base cancel=16000 on base-bundle-1
+ * Resource action: base cancel=15000 on base-bundle-0
+ * Pseudo action: base-bundle_demote_0
+ * Pseudo action: base-bundle-clone_demote_0
+ * Resource action: base demote on base-bundle-0
+ * Pseudo action: base-bundle-clone_demoted_0
+ * Pseudo action: base-bundle_demoted_0
+ * Pseudo action: base-bundle_stop_0
+ * Pseudo action: base-bundle-clone_stop_0
+ * Resource action: base stop on base-bundle-0
+ * Pseudo action: base-bundle-clone_stopped_0
+ * Pseudo action: base-bundle-clone_start_0
+ * Resource action: base-bundle-0 stop on node3
+ * Pseudo action: base-bundle-clone_running_0
+ * Resource action: base-bundle-podman-0 stop on node3
+ * Pseudo action: base-bundle_stopped_0
+ * Pseudo action: base-bundle_running_0
+ * Pseudo action: base-bundle_promote_0
+ * Pseudo action: base-bundle-clone_promote_0
+ * Resource action: base promote on base-bundle-1
+ * Pseudo action: base-bundle-clone_promoted_0
+ * Pseudo action: base-bundle_promoted_0
+ * Resource action: base monitor=15000 on base-bundle-1
+
+Revised Cluster Status:
+ * Node List:
+ * Online: [ node1 node2 node3 ]
+ * GuestOnline: [ base-bundle-1 base-bundle-2 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node2
+ * Container bundle set: base-bundle [localhost/pcmktest]:
+ * base-bundle-0 (ocf:pacemaker:Stateful): Stopped
+ * base-bundle-1 (ocf:pacemaker:Stateful): Promoted node2
+ * base-bundle-2 (ocf:pacemaker:Stateful): Unpromoted node1
diff --git a/cts/scheduler/summary/bundle-promoted-location-3.summary b/cts/scheduler/summary/bundle-promoted-location-3.summary
new file mode 100644
index 0000000..4c0a0ab
--- /dev/null
+++ b/cts/scheduler/summary/bundle-promoted-location-3.summary
@@ -0,0 +1,27 @@
+Current cluster status:
+ * Node List:
+ * Online: [ node1 node2 node3 ]
+ * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node2
+ * Container bundle set: base-bundle [localhost/pcmktest]:
+ * base-bundle-0 (ocf:pacemaker:Stateful): Promoted node3
+ * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node2
+ * base-bundle-2 (ocf:pacemaker:Stateful): Unpromoted node1
+
+Transition Summary:
+
+Executing Cluster Transition:
+
+Revised Cluster Status:
+ * Node List:
+ * Online: [ node1 node2 node3 ]
+ * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node2
+ * Container bundle set: base-bundle [localhost/pcmktest]:
+ * base-bundle-0 (ocf:pacemaker:Stateful): Promoted node3
+ * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node2
+ * base-bundle-2 (ocf:pacemaker:Stateful): Unpromoted node1
diff --git a/cts/scheduler/summary/bundle-promoted-location-4.summary b/cts/scheduler/summary/bundle-promoted-location-4.summary
new file mode 100644
index 0000000..4c0a0ab
--- /dev/null
+++ b/cts/scheduler/summary/bundle-promoted-location-4.summary
@@ -0,0 +1,27 @@
+Current cluster status:
+ * Node List:
+ * Online: [ node1 node2 node3 ]
+ * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node2
+ * Container bundle set: base-bundle [localhost/pcmktest]:
+ * base-bundle-0 (ocf:pacemaker:Stateful): Promoted node3
+ * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node2
+ * base-bundle-2 (ocf:pacemaker:Stateful): Unpromoted node1
+
+Transition Summary:
+
+Executing Cluster Transition:
+
+Revised Cluster Status:
+ * Node List:
+ * Online: [ node1 node2 node3 ]
+ * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node2
+ * Container bundle set: base-bundle [localhost/pcmktest]:
+ * base-bundle-0 (ocf:pacemaker:Stateful): Promoted node3
+ * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node2
+ * base-bundle-2 (ocf:pacemaker:Stateful): Unpromoted node1
diff --git a/cts/scheduler/summary/bundle-promoted-location-5.summary b/cts/scheduler/summary/bundle-promoted-location-5.summary
new file mode 100644
index 0000000..4c0a0ab
--- /dev/null
+++ b/cts/scheduler/summary/bundle-promoted-location-5.summary
@@ -0,0 +1,27 @@
+Current cluster status:
+ * Node List:
+ * Online: [ node1 node2 node3 ]
+ * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node2
+ * Container bundle set: base-bundle [localhost/pcmktest]:
+ * base-bundle-0 (ocf:pacemaker:Stateful): Promoted node3
+ * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node2
+ * base-bundle-2 (ocf:pacemaker:Stateful): Unpromoted node1
+
+Transition Summary:
+
+Executing Cluster Transition:
+
+Revised Cluster Status:
+ * Node List:
+ * Online: [ node1 node2 node3 ]
+ * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node2
+ * Container bundle set: base-bundle [localhost/pcmktest]:
+ * base-bundle-0 (ocf:pacemaker:Stateful): Promoted node3
+ * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node2
+ * base-bundle-2 (ocf:pacemaker:Stateful): Unpromoted node1
diff --git a/cts/scheduler/summary/bundle-promoted-location-6.summary b/cts/scheduler/summary/bundle-promoted-location-6.summary
new file mode 100644
index 0000000..5e1cce2
--- /dev/null
+++ b/cts/scheduler/summary/bundle-promoted-location-6.summary
@@ -0,0 +1,40 @@
+Current cluster status:
+ * Node List:
+ * Online: [ node1 node2 node3 ]
+ * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node2
+ * Container bundle set: base-bundle [localhost/pcmktest]:
+ * base-bundle-0 (ocf:pacemaker:Stateful): Promoted node3
+ * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node2
+ * base-bundle-2 (ocf:pacemaker:Stateful): Unpromoted node1
+
+Transition Summary:
+ * Stop base-bundle-podman-1 ( node2 ) due to node availability
+ * Stop base-bundle-1 ( node2 ) due to unrunnable base-bundle-podman-1 start
+ * Stop base:1 ( Unpromoted base-bundle-1 ) due to unrunnable base-bundle-podman-1 start
+
+Executing Cluster Transition:
+ * Pseudo action: base-bundle_stop_0
+ * Pseudo action: base-bundle-clone_stop_0
+ * Resource action: base stop on base-bundle-1
+ * Pseudo action: base-bundle-clone_stopped_0
+ * Pseudo action: base-bundle-clone_start_0
+ * Resource action: base-bundle-1 stop on node2
+ * Pseudo action: base-bundle-clone_running_0
+ * Resource action: base-bundle-podman-1 stop on node2
+ * Pseudo action: base-bundle_stopped_0
+ * Pseudo action: base-bundle_running_0
+
+Revised Cluster Status:
+ * Node List:
+ * Online: [ node1 node2 node3 ]
+ * GuestOnline: [ base-bundle-0 base-bundle-2 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node2
+ * Container bundle set: base-bundle [localhost/pcmktest]:
+ * base-bundle-0 (ocf:pacemaker:Stateful): Promoted node3
+ * base-bundle-1 (ocf:pacemaker:Stateful): Stopped
+ * base-bundle-2 (ocf:pacemaker:Stateful): Unpromoted node1
diff --git a/cts/scheduler/summary/cancel-behind-moving-remote.summary b/cts/scheduler/summary/cancel-behind-moving-remote.summary
index 7726876..945f3c8 100644
--- a/cts/scheduler/summary/cancel-behind-moving-remote.summary
+++ b/cts/scheduler/summary/cancel-behind-moving-remote.summary
@@ -58,22 +58,18 @@ Current cluster status:
Transition Summary:
* Start rabbitmq-bundle-1 ( controller-0 ) due to unrunnable rabbitmq-bundle-podman-1 start (blocked)
* Start rabbitmq:1 ( rabbitmq-bundle-1 ) due to unrunnable rabbitmq-bundle-podman-1 start (blocked)
- * Start ovn-dbs-bundle-podman-0 ( controller-2 )
- * Start ovn-dbs-bundle-0 ( controller-2 )
+ * Start ovn-dbs-bundle-podman-0 ( controller-0 )
+ * Start ovn-dbs-bundle-0 ( controller-0 )
* Start ovndb_servers:0 ( ovn-dbs-bundle-0 )
- * Move ovn-dbs-bundle-podman-1 ( controller-2 -> controller-0 )
- * Move ovn-dbs-bundle-1 ( controller-2 -> controller-0 )
- * Restart ovndb_servers:1 ( Unpromoted -> Promoted ovn-dbs-bundle-1 ) due to required ovn-dbs-bundle-podman-1 start
- * Start ip-172.17.1.87 ( controller-0 )
+ * Promote ovndb_servers:2 ( Unpromoted -> Promoted ovn-dbs-bundle-2 )
+ * Start ip-172.17.1.87 ( controller-1 )
* Move stonith-fence_ipmilan-52540040bb56 ( messaging-2 -> database-0 )
* Move stonith-fence_ipmilan-525400e1534e ( database-1 -> messaging-2 )
Executing Cluster Transition:
* Pseudo action: rabbitmq-bundle-clone_pre_notify_start_0
- * Resource action: ovndb_servers cancel=30000 on ovn-dbs-bundle-1
- * Pseudo action: ovn-dbs-bundle-master_pre_notify_stop_0
- * Cluster action: clear_failcount for ovn-dbs-bundle-0 on controller-0
- * Cluster action: clear_failcount for ovn-dbs-bundle-1 on controller-2
+ * Resource action: ovndb_servers cancel=30000 on ovn-dbs-bundle-2
+ * Pseudo action: ovn-dbs-bundle-master_pre_notify_start_0
* Cluster action: clear_failcount for stonith-fence_compute-fence-nova on messaging-0
* Cluster action: clear_failcount for nova-evacuate on messaging-0
* Cluster action: clear_failcount for stonith-fence_ipmilan-525400aa1373 on database-0
@@ -87,71 +83,53 @@ Executing Cluster Transition:
* Cluster action: clear_failcount for stonith-fence_ipmilan-52540060dbba on messaging-0
* Cluster action: clear_failcount for stonith-fence_ipmilan-525400e018b6 on database-0
* Cluster action: clear_failcount for stonith-fence_ipmilan-525400c87cdb on database-2
- * Pseudo action: ovn-dbs-bundle_stop_0
+ * Pseudo action: ovn-dbs-bundle_start_0
* Pseudo action: rabbitmq-bundle_start_0
* Pseudo action: rabbitmq-bundle-clone_confirmed-pre_notify_start_0
* Pseudo action: rabbitmq-bundle-clone_start_0
- * Resource action: ovndb_servers notify on ovn-dbs-bundle-1
* Resource action: ovndb_servers notify on ovn-dbs-bundle-2
- * Pseudo action: ovn-dbs-bundle-master_confirmed-pre_notify_stop_0
- * Pseudo action: ovn-dbs-bundle-master_stop_0
+ * Resource action: ovndb_servers notify on ovn-dbs-bundle-1
+ * Pseudo action: ovn-dbs-bundle-master_confirmed-pre_notify_start_0
+ * Pseudo action: ovn-dbs-bundle-master_start_0
+ * Resource action: ovn-dbs-bundle-podman-0 start on controller-0
+ * Resource action: ovn-dbs-bundle-0 start on controller-0
* Resource action: stonith-fence_ipmilan-52540040bb56 start on database-0
* Resource action: stonith-fence_ipmilan-525400e1534e start on messaging-2
* Pseudo action: rabbitmq-bundle-clone_running_0
- * Resource action: ovndb_servers stop on ovn-dbs-bundle-1
- * Pseudo action: ovn-dbs-bundle-master_stopped_0
- * Resource action: ovn-dbs-bundle-1 stop on controller-2
+ * Resource action: ovndb_servers start on ovn-dbs-bundle-0
+ * Pseudo action: ovn-dbs-bundle-master_running_0
+ * Resource action: ovn-dbs-bundle-podman-0 monitor=60000 on controller-0
+ * Resource action: ovn-dbs-bundle-0 monitor=30000 on controller-0
* Resource action: stonith-fence_ipmilan-52540040bb56 monitor=60000 on database-0
* Resource action: stonith-fence_ipmilan-525400e1534e monitor=60000 on messaging-2
* Pseudo action: rabbitmq-bundle-clone_post_notify_running_0
- * Pseudo action: ovn-dbs-bundle-master_post_notify_stopped_0
- * Resource action: ovn-dbs-bundle-podman-1 stop on controller-2
+ * Pseudo action: ovn-dbs-bundle-master_post_notify_running_0
* Pseudo action: rabbitmq-bundle-clone_confirmed-post_notify_running_0
* Resource action: ovndb_servers notify on ovn-dbs-bundle-2
- * Pseudo action: ovn-dbs-bundle-master_confirmed-post_notify_stopped_0
- * Pseudo action: ovn-dbs-bundle-master_pre_notify_start_0
- * Pseudo action: ovn-dbs-bundle_stopped_0
- * Pseudo action: ovn-dbs-bundle_start_0
- * Pseudo action: rabbitmq-bundle_running_0
- * Resource action: ovndb_servers notify on ovn-dbs-bundle-2
- * Pseudo action: ovn-dbs-bundle-master_confirmed-pre_notify_start_0
- * Pseudo action: ovn-dbs-bundle-master_start_0
- * Resource action: ovn-dbs-bundle-podman-0 start on controller-2
- * Resource action: ovn-dbs-bundle-0 start on controller-2
- * Resource action: ovn-dbs-bundle-podman-1 start on controller-0
- * Resource action: ovn-dbs-bundle-1 start on controller-0
- * Resource action: ovndb_servers start on ovn-dbs-bundle-0
- * Resource action: ovndb_servers start on ovn-dbs-bundle-1
- * Pseudo action: ovn-dbs-bundle-master_running_0
- * Resource action: ovn-dbs-bundle-podman-0 monitor=60000 on controller-2
- * Resource action: ovn-dbs-bundle-0 monitor=30000 on controller-2
- * Resource action: ovn-dbs-bundle-podman-1 monitor=60000 on controller-0
- * Resource action: ovn-dbs-bundle-1 monitor=30000 on controller-0
- * Pseudo action: ovn-dbs-bundle-master_post_notify_running_0
* Resource action: ovndb_servers notify on ovn-dbs-bundle-0
* Resource action: ovndb_servers notify on ovn-dbs-bundle-1
- * Resource action: ovndb_servers notify on ovn-dbs-bundle-2
* Pseudo action: ovn-dbs-bundle-master_confirmed-post_notify_running_0
* Pseudo action: ovn-dbs-bundle_running_0
+ * Pseudo action: rabbitmq-bundle_running_0
* Pseudo action: ovn-dbs-bundle-master_pre_notify_promote_0
* Pseudo action: ovn-dbs-bundle_promote_0
+ * Resource action: ovndb_servers notify on ovn-dbs-bundle-2
* Resource action: ovndb_servers notify on ovn-dbs-bundle-0
* Resource action: ovndb_servers notify on ovn-dbs-bundle-1
- * Resource action: ovndb_servers notify on ovn-dbs-bundle-2
* Pseudo action: ovn-dbs-bundle-master_confirmed-pre_notify_promote_0
* Pseudo action: ovn-dbs-bundle-master_promote_0
- * Resource action: ip-172.17.1.87 start on controller-0
- * Resource action: ovndb_servers promote on ovn-dbs-bundle-1
+ * Resource action: ip-172.17.1.87 start on controller-1
+ * Resource action: ovndb_servers promote on ovn-dbs-bundle-2
* Pseudo action: ovn-dbs-bundle-master_promoted_0
- * Resource action: ip-172.17.1.87 monitor=10000 on controller-0
+ * Resource action: ip-172.17.1.87 monitor=10000 on controller-1
* Pseudo action: ovn-dbs-bundle-master_post_notify_promoted_0
+ * Resource action: ovndb_servers notify on ovn-dbs-bundle-2
* Resource action: ovndb_servers notify on ovn-dbs-bundle-0
* Resource action: ovndb_servers notify on ovn-dbs-bundle-1
- * Resource action: ovndb_servers notify on ovn-dbs-bundle-2
* Pseudo action: ovn-dbs-bundle-master_confirmed-post_notify_promoted_0
* Pseudo action: ovn-dbs-bundle_promoted_0
+ * Resource action: ovndb_servers monitor=10000 on ovn-dbs-bundle-2
* Resource action: ovndb_servers monitor=30000 on ovn-dbs-bundle-0
- * Resource action: ovndb_servers monitor=10000 on ovn-dbs-bundle-1
Using the original execution date of: 2021-02-15 01:40:51Z
Revised Cluster Status:
@@ -187,10 +165,10 @@ Revised Cluster Status:
* haproxy-bundle-podman-1 (ocf:heartbeat:podman): Started controller-0
* haproxy-bundle-podman-2 (ocf:heartbeat:podman): Started controller-1
* Container bundle set: ovn-dbs-bundle [cluster.common.tag/rhosp16-openstack-ovn-northd:pcmklatest]:
- * ovn-dbs-bundle-0 (ocf:ovn:ovndb-servers): Unpromoted controller-2
- * ovn-dbs-bundle-1 (ocf:ovn:ovndb-servers): Promoted controller-0
- * ovn-dbs-bundle-2 (ocf:ovn:ovndb-servers): Unpromoted controller-1
- * ip-172.17.1.87 (ocf:heartbeat:IPaddr2): Started controller-0
+ * ovn-dbs-bundle-0 (ocf:ovn:ovndb-servers): Unpromoted controller-0
+ * ovn-dbs-bundle-1 (ocf:ovn:ovndb-servers): Unpromoted controller-2
+ * ovn-dbs-bundle-2 (ocf:ovn:ovndb-servers): Promoted controller-1
+ * ip-172.17.1.87 (ocf:heartbeat:IPaddr2): Started controller-1
* stonith-fence_compute-fence-nova (stonith:fence_compute): Started database-1
* Clone Set: compute-unfence-trigger-clone [compute-unfence-trigger]:
* Started: [ compute-0 compute-1 ]
diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-1.summary b/cts/scheduler/summary/clone-recover-no-shuffle-1.summary
new file mode 100644
index 0000000..0b6866e
--- /dev/null
+++ b/cts/scheduler/summary/clone-recover-no-shuffle-1.summary
@@ -0,0 +1,29 @@
+Using the original execution date of: 2023-06-21 00:59:59Z
+Current cluster status:
+ * Node List:
+ * Online: [ node1 node2 node3 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node2
+ * Clone Set: dummy-clone [dummy]:
+ * Started: [ node2 node3 ]
+ * Stopped: [ node1 ]
+
+Transition Summary:
+ * Start dummy:2 ( node1 )
+
+Executing Cluster Transition:
+ * Pseudo action: dummy-clone_start_0
+ * Resource action: dummy start on node1
+ * Pseudo action: dummy-clone_running_0
+ * Resource action: dummy monitor=10000 on node1
+Using the original execution date of: 2023-06-21 00:59:59Z
+
+Revised Cluster Status:
+ * Node List:
+ * Online: [ node1 node2 node3 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node2
+ * Clone Set: dummy-clone [dummy]:
+ * Started: [ node1 node2 node3 ]
diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-10.summary b/cts/scheduler/summary/clone-recover-no-shuffle-10.summary
new file mode 100644
index 0000000..5b0f9b6
--- /dev/null
+++ b/cts/scheduler/summary/clone-recover-no-shuffle-10.summary
@@ -0,0 +1,29 @@
+Current cluster status:
+ * Node List:
+ * Online: [ node1 node2 node3 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node2
+ * Clone Set: dummy-clone [dummy] (promotable):
+ * Promoted: [ node2 ]
+ * Unpromoted: [ node3 ]
+ * Stopped: [ node1 ]
+
+Transition Summary:
+ * Start dummy:2 ( node1 )
+
+Executing Cluster Transition:
+ * Pseudo action: dummy-clone_start_0
+ * Resource action: dummy start on node1
+ * Pseudo action: dummy-clone_running_0
+ * Resource action: dummy monitor=11000 on node1
+
+Revised Cluster Status:
+ * Node List:
+ * Online: [ node1 node2 node3 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node2
+ * Clone Set: dummy-clone [dummy] (promotable):
+ * Promoted: [ node2 ]
+ * Unpromoted: [ node1 node3 ]
diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-11.summary b/cts/scheduler/summary/clone-recover-no-shuffle-11.summary
new file mode 100644
index 0000000..e0bdb61
--- /dev/null
+++ b/cts/scheduler/summary/clone-recover-no-shuffle-11.summary
@@ -0,0 +1,34 @@
+Current cluster status:
+ * Node List:
+ * Online: [ node1 node2 node3 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node2
+ * Clone Set: grp-clone [grp] (promotable):
+ * Promoted: [ node2 ]
+ * Unpromoted: [ node3 ]
+ * Stopped: [ node1 ]
+
+Transition Summary:
+ * Start rsc1:2 ( node1 )
+ * Start rsc2:2 ( node1 )
+
+Executing Cluster Transition:
+ * Pseudo action: grp-clone_start_0
+ * Pseudo action: grp:2_start_0
+ * Resource action: rsc1 start on node1
+ * Resource action: rsc2 start on node1
+ * Pseudo action: grp:2_running_0
+ * Resource action: rsc1 monitor=11000 on node1
+ * Resource action: rsc2 monitor=11000 on node1
+ * Pseudo action: grp-clone_running_0
+
+Revised Cluster Status:
+ * Node List:
+ * Online: [ node1 node2 node3 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node2
+ * Clone Set: grp-clone [grp] (promotable):
+ * Promoted: [ node2 ]
+ * Unpromoted: [ node1 node3 ]
diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-12.summary b/cts/scheduler/summary/clone-recover-no-shuffle-12.summary
new file mode 100644
index 0000000..6e55a0b
--- /dev/null
+++ b/cts/scheduler/summary/clone-recover-no-shuffle-12.summary
@@ -0,0 +1,43 @@
+Current cluster status:
+ * Node List:
+ * Online: [ node1 node2 node3 ]
+ * GuestOnline: [ base-bundle-0 base-bundle-1 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node2
+ * Container bundle set: base-bundle [localhost/pcmktest]:
+ * base-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node3
+ * base-bundle-1 (ocf:pacemaker:Stateful): Promoted node2
+ * base-bundle-2 (ocf:pacemaker:Stateful): Stopped
+
+Transition Summary:
+ * Start base-bundle-podman-2 ( node1 )
+ * Start base-bundle-2 ( node1 )
+ * Start base:2 ( base-bundle-2 )
+
+Executing Cluster Transition:
+ * Pseudo action: base-bundle_start_0
+ * Pseudo action: base-bundle-clone_start_0
+ * Resource action: base-bundle-podman-2 start on node1
+ * Resource action: base-bundle-2 monitor on node3
+ * Resource action: base-bundle-2 monitor on node2
+ * Resource action: base-bundle-2 monitor on node1
+ * Resource action: base-bundle-podman-2 monitor=60000 on node1
+ * Resource action: base-bundle-2 start on node1
+ * Resource action: base start on base-bundle-2
+ * Pseudo action: base-bundle-clone_running_0
+ * Resource action: base-bundle-2 monitor=30000 on node1
+ * Pseudo action: base-bundle_running_0
+ * Resource action: base monitor=16000 on base-bundle-2
+
+Revised Cluster Status:
+ * Node List:
+ * Online: [ node1 node2 node3 ]
+ * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node2
+ * Container bundle set: base-bundle [localhost/pcmktest]:
+ * base-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node3
+ * base-bundle-1 (ocf:pacemaker:Stateful): Promoted node2
+ * base-bundle-2 (ocf:pacemaker:Stateful): Unpromoted node1
diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-2.summary b/cts/scheduler/summary/clone-recover-no-shuffle-2.summary
new file mode 100644
index 0000000..8b18120
--- /dev/null
+++ b/cts/scheduler/summary/clone-recover-no-shuffle-2.summary
@@ -0,0 +1,32 @@
+Current cluster status:
+ * Node List:
+ * Online: [ node1 node2 node3 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node2
+ * Clone Set: grp-clone [grp]:
+ * Started: [ node2 node3 ]
+ * Stopped: [ node1 ]
+
+Transition Summary:
+ * Start rsc1:2 ( node1 )
+ * Start rsc2:2 ( node1 )
+
+Executing Cluster Transition:
+ * Pseudo action: grp-clone_start_0
+ * Pseudo action: grp:2_start_0
+ * Resource action: rsc1 start on node1
+ * Resource action: rsc2 start on node1
+ * Pseudo action: grp:2_running_0
+ * Resource action: rsc1 monitor=10000 on node1
+ * Resource action: rsc2 monitor=10000 on node1
+ * Pseudo action: grp-clone_running_0
+
+Revised Cluster Status:
+ * Node List:
+ * Online: [ node1 node2 node3 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node2
+ * Clone Set: grp-clone [grp]:
+ * Started: [ node1 node2 node3 ]
diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-3.summary b/cts/scheduler/summary/clone-recover-no-shuffle-3.summary
new file mode 100644
index 0000000..5702177
--- /dev/null
+++ b/cts/scheduler/summary/clone-recover-no-shuffle-3.summary
@@ -0,0 +1,42 @@
+Current cluster status:
+ * Node List:
+ * Online: [ node1 node2 node3 ]
+ * GuestOnline: [ base-bundle-0 base-bundle-1 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node2
+ * Container bundle set: base-bundle [localhost/pcmktest]:
+ * base-bundle-0 (ocf:pacemaker:Stateful): Started node3
+ * base-bundle-1 (ocf:pacemaker:Stateful): Started node2
+ * base-bundle-2 (ocf:pacemaker:Stateful): Stopped
+
+Transition Summary:
+ * Start base-bundle-podman-2 ( node1 )
+ * Start base-bundle-2 ( node1 )
+ * Start base:2 ( base-bundle-2 )
+
+Executing Cluster Transition:
+ * Pseudo action: base-bundle_start_0
+ * Pseudo action: base-bundle-clone_start_0
+ * Resource action: base-bundle-podman-2 start on node1
+ * Resource action: base-bundle-2 monitor on node3
+ * Resource action: base-bundle-2 monitor on node2
+ * Resource action: base-bundle-2 monitor on node1
+ * Resource action: base-bundle-podman-2 monitor=60000 on node1
+ * Resource action: base-bundle-2 start on node1
+ * Resource action: base start on base-bundle-2
+ * Pseudo action: base-bundle-clone_running_0
+ * Resource action: base-bundle-2 monitor=30000 on node1
+ * Pseudo action: base-bundle_running_0
+
+Revised Cluster Status:
+ * Node List:
+ * Online: [ node1 node2 node3 ]
+ * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node2
+ * Container bundle set: base-bundle [localhost/pcmktest]:
+ * base-bundle-0 (ocf:pacemaker:Stateful): Started node3
+ * base-bundle-1 (ocf:pacemaker:Stateful): Started node2
+ * base-bundle-2 (ocf:pacemaker:Stateful): Started node1
diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-4.summary b/cts/scheduler/summary/clone-recover-no-shuffle-4.summary
new file mode 100644
index 0000000..0b6866e
--- /dev/null
+++ b/cts/scheduler/summary/clone-recover-no-shuffle-4.summary
@@ -0,0 +1,29 @@
+Using the original execution date of: 2023-06-21 00:59:59Z
+Current cluster status:
+ * Node List:
+ * Online: [ node1 node2 node3 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node2
+ * Clone Set: dummy-clone [dummy]:
+ * Started: [ node2 node3 ]
+ * Stopped: [ node1 ]
+
+Transition Summary:
+ * Start dummy:2 ( node1 )
+
+Executing Cluster Transition:
+ * Pseudo action: dummy-clone_start_0
+ * Resource action: dummy start on node1
+ * Pseudo action: dummy-clone_running_0
+ * Resource action: dummy monitor=10000 on node1
+Using the original execution date of: 2023-06-21 00:59:59Z
+
+Revised Cluster Status:
+ * Node List:
+ * Online: [ node1 node2 node3 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node2
+ * Clone Set: dummy-clone [dummy]:
+ * Started: [ node1 node2 node3 ]
diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-5.summary b/cts/scheduler/summary/clone-recover-no-shuffle-5.summary
new file mode 100644
index 0000000..8b18120
--- /dev/null
+++ b/cts/scheduler/summary/clone-recover-no-shuffle-5.summary
@@ -0,0 +1,32 @@
+Current cluster status:
+ * Node List:
+ * Online: [ node1 node2 node3 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node2
+ * Clone Set: grp-clone [grp]:
+ * Started: [ node2 node3 ]
+ * Stopped: [ node1 ]
+
+Transition Summary:
+ * Start rsc1:2 ( node1 )
+ * Start rsc2:2 ( node1 )
+
+Executing Cluster Transition:
+ * Pseudo action: grp-clone_start_0
+ * Pseudo action: grp:2_start_0
+ * Resource action: rsc1 start on node1
+ * Resource action: rsc2 start on node1
+ * Pseudo action: grp:2_running_0
+ * Resource action: rsc1 monitor=10000 on node1
+ * Resource action: rsc2 monitor=10000 on node1
+ * Pseudo action: grp-clone_running_0
+
+Revised Cluster Status:
+ * Node List:
+ * Online: [ node1 node2 node3 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node2
+ * Clone Set: grp-clone [grp]:
+ * Started: [ node1 node2 node3 ]
diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-6.summary b/cts/scheduler/summary/clone-recover-no-shuffle-6.summary
new file mode 100644
index 0000000..5702177
--- /dev/null
+++ b/cts/scheduler/summary/clone-recover-no-shuffle-6.summary
@@ -0,0 +1,42 @@
+Current cluster status:
+ * Node List:
+ * Online: [ node1 node2 node3 ]
+ * GuestOnline: [ base-bundle-0 base-bundle-1 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node2
+ * Container bundle set: base-bundle [localhost/pcmktest]:
+ * base-bundle-0 (ocf:pacemaker:Stateful): Started node3
+ * base-bundle-1 (ocf:pacemaker:Stateful): Started node2
+ * base-bundle-2 (ocf:pacemaker:Stateful): Stopped
+
+Transition Summary:
+ * Start base-bundle-podman-2 ( node1 )
+ * Start base-bundle-2 ( node1 )
+ * Start base:2 ( base-bundle-2 )
+
+Executing Cluster Transition:
+ * Pseudo action: base-bundle_start_0
+ * Pseudo action: base-bundle-clone_start_0
+ * Resource action: base-bundle-podman-2 start on node1
+ * Resource action: base-bundle-2 monitor on node3
+ * Resource action: base-bundle-2 monitor on node2
+ * Resource action: base-bundle-2 monitor on node1
+ * Resource action: base-bundle-podman-2 monitor=60000 on node1
+ * Resource action: base-bundle-2 start on node1
+ * Resource action: base start on base-bundle-2
+ * Pseudo action: base-bundle-clone_running_0
+ * Resource action: base-bundle-2 monitor=30000 on node1
+ * Pseudo action: base-bundle_running_0
+
+Revised Cluster Status:
+ * Node List:
+ * Online: [ node1 node2 node3 ]
+ * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node2
+ * Container bundle set: base-bundle [localhost/pcmktest]:
+ * base-bundle-0 (ocf:pacemaker:Stateful): Started node3
+ * base-bundle-1 (ocf:pacemaker:Stateful): Started node2
+ * base-bundle-2 (ocf:pacemaker:Stateful): Started node1
diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-7.summary b/cts/scheduler/summary/clone-recover-no-shuffle-7.summary
new file mode 100644
index 0000000..7744570
--- /dev/null
+++ b/cts/scheduler/summary/clone-recover-no-shuffle-7.summary
@@ -0,0 +1,38 @@
+Current cluster status:
+ * Node List:
+ * Online: [ node1 node2 node3 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node2
+ * Clone Set: dummy-clone [dummy] (promotable):
+ * Promoted: [ node2 ]
+ * Unpromoted: [ node3 ]
+ * Stopped: [ node1 ]
+
+Transition Summary:
+ * Demote dummy:1 ( Promoted -> Unpromoted node2 )
+ * Promote dummy:2 ( Stopped -> Promoted node1 )
+
+Executing Cluster Transition:
+ * Resource action: dummy cancel=10000 on node2
+ * Pseudo action: dummy-clone_demote_0
+ * Resource action: dummy demote on node2
+ * Pseudo action: dummy-clone_demoted_0
+ * Pseudo action: dummy-clone_start_0
+ * Resource action: dummy monitor=11000 on node2
+ * Resource action: dummy start on node1
+ * Pseudo action: dummy-clone_running_0
+ * Pseudo action: dummy-clone_promote_0
+ * Resource action: dummy promote on node1
+ * Pseudo action: dummy-clone_promoted_0
+ * Resource action: dummy monitor=10000 on node1
+
+Revised Cluster Status:
+ * Node List:
+ * Online: [ node1 node2 node3 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node2
+ * Clone Set: dummy-clone [dummy] (promotable):
+ * Promoted: [ node1 ]
+ * Unpromoted: [ node2 node3 ]
diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-8.summary b/cts/scheduler/summary/clone-recover-no-shuffle-8.summary
new file mode 100644
index 0000000..878f248
--- /dev/null
+++ b/cts/scheduler/summary/clone-recover-no-shuffle-8.summary
@@ -0,0 +1,52 @@
+Current cluster status:
+ * Node List:
+ * Online: [ node1 node2 node3 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node2
+ * Clone Set: grp-clone [grp] (promotable):
+ * Promoted: [ node2 ]
+ * Unpromoted: [ node3 ]
+ * Stopped: [ node1 ]
+
+Transition Summary:
+ * Demote rsc1:1 ( Promoted -> Unpromoted node2 )
+ * Demote rsc2:1 ( Promoted -> Unpromoted node2 )
+ * Promote rsc1:2 ( Stopped -> Promoted node1 )
+ * Promote rsc2:2 ( Stopped -> Promoted node1 )
+
+Executing Cluster Transition:
+ * Resource action: rsc1 cancel=10000 on node2
+ * Resource action: rsc2 cancel=10000 on node2
+ * Pseudo action: grp-clone_demote_0
+ * Pseudo action: grp:1_demote_0
+ * Resource action: rsc2 demote on node2
+ * Resource action: rsc1 demote on node2
+ * Resource action: rsc2 monitor=11000 on node2
+ * Pseudo action: grp:1_demoted_0
+ * Resource action: rsc1 monitor=11000 on node2
+ * Pseudo action: grp-clone_demoted_0
+ * Pseudo action: grp-clone_start_0
+ * Pseudo action: grp:2_start_0
+ * Resource action: rsc1 start on node1
+ * Resource action: rsc2 start on node1
+ * Pseudo action: grp:2_running_0
+ * Pseudo action: grp-clone_running_0
+ * Pseudo action: grp-clone_promote_0
+ * Pseudo action: grp:2_promote_0
+ * Resource action: rsc1 promote on node1
+ * Resource action: rsc2 promote on node1
+ * Pseudo action: grp:2_promoted_0
+ * Resource action: rsc1 monitor=10000 on node1
+ * Resource action: rsc2 monitor=10000 on node1
+ * Pseudo action: grp-clone_promoted_0
+
+Revised Cluster Status:
+ * Node List:
+ * Online: [ node1 node2 node3 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node2
+ * Clone Set: grp-clone [grp] (promotable):
+ * Promoted: [ node1 ]
+ * Unpromoted: [ node2 node3 ]
diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-9.summary b/cts/scheduler/summary/clone-recover-no-shuffle-9.summary
new file mode 100644
index 0000000..7ede39a
--- /dev/null
+++ b/cts/scheduler/summary/clone-recover-no-shuffle-9.summary
@@ -0,0 +1,56 @@
+Current cluster status:
+ * Node List:
+ * Online: [ node1 node2 node3 ]
+ * GuestOnline: [ base-bundle-0 base-bundle-1 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node2
+ * Container bundle set: base-bundle [localhost/pcmktest]:
+ * base-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node3
+ * base-bundle-1 (ocf:pacemaker:Stateful): Promoted node2
+ * base-bundle-2 (ocf:pacemaker:Stateful): Stopped
+
+Transition Summary:
+ * Demote base:1 ( Promoted -> Unpromoted base-bundle-1 )
+ * Start base-bundle-podman-2 ( node1 )
+ * Start base-bundle-2 ( node1 )
+ * Promote base:2 ( Stopped -> Promoted base-bundle-2 )
+
+Executing Cluster Transition:
+ * Resource action: base cancel=15000 on base-bundle-1
+ * Pseudo action: base-bundle_demote_0
+ * Pseudo action: base-bundle-clone_demote_0
+ * Resource action: base demote on base-bundle-1
+ * Pseudo action: base-bundle-clone_demoted_0
+ * Pseudo action: base-bundle_demoted_0
+ * Pseudo action: base-bundle_start_0
+ * Resource action: base monitor=16000 on base-bundle-1
+ * Pseudo action: base-bundle-clone_start_0
+ * Resource action: base-bundle-podman-2 start on node1
+ * Resource action: base-bundle-2 monitor on node3
+ * Resource action: base-bundle-2 monitor on node2
+ * Resource action: base-bundle-2 monitor on node1
+ * Resource action: base-bundle-podman-2 monitor=60000 on node1
+ * Resource action: base-bundle-2 start on node1
+ * Resource action: base start on base-bundle-2
+ * Pseudo action: base-bundle-clone_running_0
+ * Resource action: base-bundle-2 monitor=30000 on node1
+ * Pseudo action: base-bundle_running_0
+ * Pseudo action: base-bundle_promote_0
+ * Pseudo action: base-bundle-clone_promote_0
+ * Resource action: base promote on base-bundle-2
+ * Pseudo action: base-bundle-clone_promoted_0
+ * Pseudo action: base-bundle_promoted_0
+ * Resource action: base monitor=15000 on base-bundle-2
+
+Revised Cluster Status:
+ * Node List:
+ * Online: [ node1 node2 node3 ]
+ * GuestOnline: [ base-bundle-0 base-bundle-1 base-bundle-2 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node2
+ * Container bundle set: base-bundle [localhost/pcmktest]:
+ * base-bundle-0 (ocf:pacemaker:Stateful): Unpromoted node3
+ * base-bundle-1 (ocf:pacemaker:Stateful): Unpromoted node2
+ * base-bundle-2 (ocf:pacemaker:Stateful): Promoted node1
diff --git a/cts/scheduler/summary/coloc-with-inner-group-member.summary b/cts/scheduler/summary/coloc-with-inner-group-member.summary
new file mode 100644
index 0000000..6659721
--- /dev/null
+++ b/cts/scheduler/summary/coloc-with-inner-group-member.summary
@@ -0,0 +1,45 @@
+Using the original execution date of: 2023-06-20 20:45:06Z
+Current cluster status:
+ * Node List:
+ * Online: [ rhel8-1 rhel8-2 rhel8-3 rhel8-4 rhel8-5 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started rhel8-1
+ * vip-dep (ocf:pacemaker:Dummy): Started rhel8-3
+ * Resource Group: grp:
+ * foo (ocf:pacemaker:Dummy): Started rhel8-4
+ * bar (ocf:pacemaker:Dummy): Started rhel8-4
+ * vip (ocf:pacemaker:Dummy): Started rhel8-3
+
+Transition Summary:
+ * Move foo ( rhel8-4 -> rhel8-3 )
+ * Move bar ( rhel8-4 -> rhel8-3 )
+ * Restart vip ( rhel8-3 ) due to required bar start
+
+Executing Cluster Transition:
+ * Pseudo action: grp_stop_0
+ * Resource action: vip stop on rhel8-3
+ * Resource action: bar stop on rhel8-4
+ * Resource action: foo stop on rhel8-4
+ * Pseudo action: grp_stopped_0
+ * Pseudo action: grp_start_0
+ * Resource action: foo start on rhel8-3
+ * Resource action: bar start on rhel8-3
+ * Resource action: vip start on rhel8-3
+ * Resource action: vip monitor=10000 on rhel8-3
+ * Pseudo action: grp_running_0
+ * Resource action: foo monitor=10000 on rhel8-3
+ * Resource action: bar monitor=10000 on rhel8-3
+Using the original execution date of: 2023-06-20 20:45:06Z
+
+Revised Cluster Status:
+ * Node List:
+ * Online: [ rhel8-1 rhel8-2 rhel8-3 rhel8-4 rhel8-5 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started rhel8-1
+ * vip-dep (ocf:pacemaker:Dummy): Started rhel8-3
+ * Resource Group: grp:
+ * foo (ocf:pacemaker:Dummy): Started rhel8-3
+ * bar (ocf:pacemaker:Dummy): Started rhel8-3
+ * vip (ocf:pacemaker:Dummy): Started rhel8-3
diff --git a/cts/scheduler/summary/group-anticolocation-2.summary b/cts/scheduler/summary/group-anticolocation-2.summary
new file mode 100644
index 0000000..3ecb056
--- /dev/null
+++ b/cts/scheduler/summary/group-anticolocation-2.summary
@@ -0,0 +1,41 @@
+Current cluster status:
+ * Node List:
+ * Online: [ node1 node2 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node1
+ * Resource Group: group1:
+ * member1a (ocf:pacemaker:Dummy): Started node2
+ * member1b (ocf:pacemaker:Dummy): Started node2
+ * Resource Group: group2:
+ * member2a (ocf:pacemaker:Dummy): Started node1
+ * member2b (ocf:pacemaker:Dummy): FAILED node1
+
+Transition Summary:
+ * Move member2a ( node1 -> node2 )
+ * Recover member2b ( node1 -> node2 )
+
+Executing Cluster Transition:
+ * Pseudo action: group2_stop_0
+ * Resource action: member2b stop on node1
+ * Resource action: member2a stop on node1
+ * Pseudo action: group2_stopped_0
+ * Pseudo action: group2_start_0
+ * Resource action: member2a start on node2
+ * Resource action: member2b start on node2
+ * Pseudo action: group2_running_0
+ * Resource action: member2a monitor=10000 on node2
+ * Resource action: member2b monitor=10000 on node2
+
+Revised Cluster Status:
+ * Node List:
+ * Online: [ node1 node2 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node1
+ * Resource Group: group1:
+ * member1a (ocf:pacemaker:Dummy): Started node2
+ * member1b (ocf:pacemaker:Dummy): Started node2
+ * Resource Group: group2:
+ * member2a (ocf:pacemaker:Dummy): Started node2
+ * member2b (ocf:pacemaker:Dummy): Started node2
diff --git a/cts/scheduler/summary/group-anticolocation-3.summary b/cts/scheduler/summary/group-anticolocation-3.summary
new file mode 100644
index 0000000..c9d4321
--- /dev/null
+++ b/cts/scheduler/summary/group-anticolocation-3.summary
@@ -0,0 +1,33 @@
+Current cluster status:
+ * Node List:
+ * Online: [ node1 node2 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node1
+ * Resource Group: group1:
+ * member1a (ocf:pacemaker:Dummy): Started node2
+ * member1b (ocf:pacemaker:Dummy): Started node2
+ * Resource Group: group2:
+ * member2a (ocf:pacemaker:Dummy): Started node1
+ * member2b (ocf:pacemaker:Dummy): FAILED node1
+
+Transition Summary:
+ * Stop member2b ( node1 ) due to node availability
+
+Executing Cluster Transition:
+ * Pseudo action: group2_stop_0
+ * Resource action: member2b stop on node1
+ * Pseudo action: group2_stopped_0
+
+Revised Cluster Status:
+ * Node List:
+ * Online: [ node1 node2 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node1
+ * Resource Group: group1:
+ * member1a (ocf:pacemaker:Dummy): Started node2
+ * member1b (ocf:pacemaker:Dummy): Started node2
+ * Resource Group: group2:
+ * member2a (ocf:pacemaker:Dummy): Started node1
+ * member2b (ocf:pacemaker:Dummy): Stopped
diff --git a/cts/scheduler/summary/group-anticolocation-4.summary b/cts/scheduler/summary/group-anticolocation-4.summary
new file mode 100644
index 0000000..3ecb056
--- /dev/null
+++ b/cts/scheduler/summary/group-anticolocation-4.summary
@@ -0,0 +1,41 @@
+Current cluster status:
+ * Node List:
+ * Online: [ node1 node2 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node1
+ * Resource Group: group1:
+ * member1a (ocf:pacemaker:Dummy): Started node2
+ * member1b (ocf:pacemaker:Dummy): Started node2
+ * Resource Group: group2:
+ * member2a (ocf:pacemaker:Dummy): Started node1
+ * member2b (ocf:pacemaker:Dummy): FAILED node1
+
+Transition Summary:
+ * Move member2a ( node1 -> node2 )
+ * Recover member2b ( node1 -> node2 )
+
+Executing Cluster Transition:
+ * Pseudo action: group2_stop_0
+ * Resource action: member2b stop on node1
+ * Resource action: member2a stop on node1
+ * Pseudo action: group2_stopped_0
+ * Pseudo action: group2_start_0
+ * Resource action: member2a start on node2
+ * Resource action: member2b start on node2
+ * Pseudo action: group2_running_0
+ * Resource action: member2a monitor=10000 on node2
+ * Resource action: member2b monitor=10000 on node2
+
+Revised Cluster Status:
+ * Node List:
+ * Online: [ node1 node2 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node1
+ * Resource Group: group1:
+ * member1a (ocf:pacemaker:Dummy): Started node2
+ * member1b (ocf:pacemaker:Dummy): Started node2
+ * Resource Group: group2:
+ * member2a (ocf:pacemaker:Dummy): Started node2
+ * member2b (ocf:pacemaker:Dummy): Started node2
diff --git a/cts/scheduler/summary/group-anticolocation-5.summary b/cts/scheduler/summary/group-anticolocation-5.summary
new file mode 100644
index 0000000..6f83538
--- /dev/null
+++ b/cts/scheduler/summary/group-anticolocation-5.summary
@@ -0,0 +1,41 @@
+Current cluster status:
+ * Node List:
+ * Online: [ node1 node2 node3 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node1
+ * Resource Group: group1:
+ * member1a (ocf:pacemaker:Dummy): Started node2
+ * member1b (ocf:pacemaker:Dummy): Started node2
+ * Resource Group: group2:
+ * member2a (ocf:pacemaker:Dummy): Started node1
+ * member2b (ocf:pacemaker:Dummy): FAILED node1
+
+Transition Summary:
+ * Move member2a ( node1 -> node3 )
+ * Recover member2b ( node1 -> node3 )
+
+Executing Cluster Transition:
+ * Pseudo action: group2_stop_0
+ * Resource action: member2b stop on node1
+ * Resource action: member2a stop on node1
+ * Pseudo action: group2_stopped_0
+ * Pseudo action: group2_start_0
+ * Resource action: member2a start on node3
+ * Resource action: member2b start on node3
+ * Pseudo action: group2_running_0
+ * Resource action: member2a monitor=10000 on node3
+ * Resource action: member2b monitor=10000 on node3
+
+Revised Cluster Status:
+ * Node List:
+ * Online: [ node1 node2 node3 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node1
+ * Resource Group: group1:
+ * member1a (ocf:pacemaker:Dummy): Started node2
+ * member1b (ocf:pacemaker:Dummy): Started node2
+ * Resource Group: group2:
+ * member2a (ocf:pacemaker:Dummy): Started node3
+ * member2b (ocf:pacemaker:Dummy): Started node3
diff --git a/cts/scheduler/summary/group-anticolocation.summary b/cts/scheduler/summary/group-anticolocation.summary
index 3ecb056..93d2e73 100644
--- a/cts/scheduler/summary/group-anticolocation.summary
+++ b/cts/scheduler/summary/group-anticolocation.summary
@@ -12,17 +12,29 @@ Current cluster status:
* member2b (ocf:pacemaker:Dummy): FAILED node1
Transition Summary:
+ * Move member1a ( node2 -> node1 )
+ * Move member1b ( node2 -> node1 )
* Move member2a ( node1 -> node2 )
* Recover member2b ( node1 -> node2 )
Executing Cluster Transition:
+ * Pseudo action: group1_stop_0
+ * Resource action: member1b stop on node2
* Pseudo action: group2_stop_0
* Resource action: member2b stop on node1
+ * Resource action: member1a stop on node2
* Resource action: member2a stop on node1
+ * Pseudo action: group1_stopped_0
+ * Pseudo action: group1_start_0
+ * Resource action: member1a start on node1
+ * Resource action: member1b start on node1
* Pseudo action: group2_stopped_0
* Pseudo action: group2_start_0
* Resource action: member2a start on node2
* Resource action: member2b start on node2
+ * Pseudo action: group1_running_0
+ * Resource action: member1a monitor=10000 on node1
+ * Resource action: member1b monitor=10000 on node1
* Pseudo action: group2_running_0
* Resource action: member2a monitor=10000 on node2
* Resource action: member2b monitor=10000 on node2
@@ -34,8 +46,8 @@ Revised Cluster Status:
* Full List of Resources:
* Fencing (stonith:fence_xvm): Started node1
* Resource Group: group1:
- * member1a (ocf:pacemaker:Dummy): Started node2
- * member1b (ocf:pacemaker:Dummy): Started node2
+ * member1a (ocf:pacemaker:Dummy): Started node1
+ * member1b (ocf:pacemaker:Dummy): Started node1
* Resource Group: group2:
* member2a (ocf:pacemaker:Dummy): Started node2
* member2b (ocf:pacemaker:Dummy): Started node2
diff --git a/cts/scheduler/summary/migrate-fencing.summary b/cts/scheduler/summary/migrate-fencing.summary
index ebc65bd..500c78a 100644
--- a/cts/scheduler/summary/migrate-fencing.summary
+++ b/cts/scheduler/summary/migrate-fencing.summary
@@ -23,7 +23,7 @@ Current cluster status:
* Unpromoted: [ pcmk-1 pcmk-2 pcmk-3 ]
Transition Summary:
- * Fence (reboot) pcmk-4 'termination was requested'
+ * Fence (reboot) pcmk-4 'fencing was requested'
* Stop FencingChild:0 ( pcmk-4 ) due to node availability
* Move r192.168.101.181 ( pcmk-4 -> pcmk-1 )
* Move r192.168.101.182 ( pcmk-4 -> pcmk-1 )
diff --git a/cts/scheduler/summary/no-promote-on-unrunnable-guest.summary b/cts/scheduler/summary/no-promote-on-unrunnable-guest.summary
index c06f8f0..ab8f8ff 100644
--- a/cts/scheduler/summary/no-promote-on-unrunnable-guest.summary
+++ b/cts/scheduler/summary/no-promote-on-unrunnable-guest.summary
@@ -37,9 +37,9 @@ Executing Cluster Transition:
* Resource action: ovndb_servers cancel=30000 on ovn-dbs-bundle-1
* Pseudo action: ovn-dbs-bundle-master_pre_notify_stop_0
* Pseudo action: ovn-dbs-bundle_stop_0
- * Resource action: ovndb_servers notify on ovn-dbs-bundle-0
* Resource action: ovndb_servers notify on ovn-dbs-bundle-1
* Resource action: ovndb_servers notify on ovn-dbs-bundle-2
+ * Resource action: ovndb_servers notify on ovn-dbs-bundle-0
* Pseudo action: ovn-dbs-bundle-master_confirmed-pre_notify_stop_0
* Pseudo action: ovn-dbs-bundle-master_stop_0
* Resource action: ovndb_servers stop on ovn-dbs-bundle-0
diff --git a/cts/scheduler/summary/node-pending-timeout.summary b/cts/scheduler/summary/node-pending-timeout.summary
new file mode 100644
index 0000000..0fef982
--- /dev/null
+++ b/cts/scheduler/summary/node-pending-timeout.summary
@@ -0,0 +1,26 @@
+Using the original execution date of: 2023-02-21 12:19:57Z
+Current cluster status:
+ * Node List:
+ * Node node-2: UNCLEAN (online)
+ * Online: [ node-1 ]
+
+ * Full List of Resources:
+ * st-sbd (stonith:external/sbd): Stopped
+
+Transition Summary:
+ * Fence (reboot) node-2 'peer pending timed out on joining the process group'
+ * Start st-sbd ( node-1 )
+
+Executing Cluster Transition:
+ * Resource action: st-sbd monitor on node-1
+ * Fencing node-2 (reboot)
+ * Resource action: st-sbd start on node-1
+Using the original execution date of: 2023-02-21 12:19:57Z
+
+Revised Cluster Status:
+ * Node List:
+ * Online: [ node-1 ]
+ * OFFLINE: [ node-2 ]
+
+ * Full List of Resources:
+ * st-sbd (stonith:external/sbd): Started node-1
diff --git a/cts/scheduler/summary/pending-node-no-uname.summary b/cts/scheduler/summary/pending-node-no-uname.summary
new file mode 100644
index 0000000..5f04fc6
--- /dev/null
+++ b/cts/scheduler/summary/pending-node-no-uname.summary
@@ -0,0 +1,23 @@
+Using the original execution date of: 2023-02-21 12:19:57Z
+Current cluster status:
+ * Node List:
+ * Node node-2: pending
+ * Online: [ node-1 ]
+
+ * Full List of Resources:
+ * st-sbd (stonith:external/sbd): Stopped
+
+Transition Summary:
+ * Start st-sbd ( node-1 ) blocked
+
+Executing Cluster Transition:
+ * Resource action: st-sbd monitor on node-1
+Using the original execution date of: 2023-02-21 12:19:57Z
+
+Revised Cluster Status:
+ * Node List:
+ * Node node-2: pending
+ * Online: [ node-1 ]
+
+ * Full List of Resources:
+ * st-sbd (stonith:external/sbd): Stopped
diff --git a/cts/scheduler/summary/promoted-ordering.summary b/cts/scheduler/summary/promoted-ordering.summary
index 3222e18..0ef1bd8 100644
--- a/cts/scheduler/summary/promoted-ordering.summary
+++ b/cts/scheduler/summary/promoted-ordering.summary
@@ -9,8 +9,8 @@ Current cluster status:
* extip_2 (ocf:heartbeat:IPaddr2): Stopped
* Resource Group: group_main:
* intip_0_main (ocf:heartbeat:IPaddr2): Stopped
- * intip_1_master (ocf:heartbeat:IPaddr2): Stopped
- * intip_2_slave (ocf:heartbeat:IPaddr2): Stopped
+ * intip_1_active (ocf:heartbeat:IPaddr2): Stopped
+ * intip_2_passive (ocf:heartbeat:IPaddr2): Stopped
* Clone Set: ms_drbd_www [drbd_www] (promotable):
* Stopped: [ webcluster01 webcluster02 ]
* Clone Set: clone_ocfs2_www [ocfs2_www] (unique):
@@ -25,8 +25,8 @@ Current cluster status:
Transition Summary:
* Start extip_1 ( webcluster01 )
* Start extip_2 ( webcluster01 )
- * Start intip_1_master ( webcluster01 )
- * Start intip_2_slave ( webcluster01 )
+ * Start intip_1_active ( webcluster01 )
+ * Start intip_2_passive ( webcluster01 )
* Start drbd_www:0 ( webcluster01 )
* Start drbd_mysql:0 ( webcluster01 )
@@ -35,8 +35,8 @@ Executing Cluster Transition:
* Resource action: extip_1 monitor on webcluster01
* Resource action: extip_2 monitor on webcluster01
* Resource action: intip_0_main monitor on webcluster01
- * Resource action: intip_1_master monitor on webcluster01
- * Resource action: intip_2_slave monitor on webcluster01
+ * Resource action: intip_1_active monitor on webcluster01
+ * Resource action: intip_2_passive monitor on webcluster01
* Resource action: drbd_www:0 monitor on webcluster01
* Pseudo action: ms_drbd_www_pre_notify_start_0
* Resource action: ocfs2_www:0 monitor on webcluster01
@@ -48,16 +48,16 @@ Executing Cluster Transition:
* Resource action: fs_mysql monitor on webcluster01
* Resource action: extip_1 start on webcluster01
* Resource action: extip_2 start on webcluster01
- * Resource action: intip_1_master start on webcluster01
- * Resource action: intip_2_slave start on webcluster01
+ * Resource action: intip_1_active start on webcluster01
+ * Resource action: intip_2_passive start on webcluster01
* Pseudo action: ms_drbd_www_confirmed-pre_notify_start_0
* Pseudo action: ms_drbd_www_start_0
* Pseudo action: ms_drbd_mysql_confirmed-pre_notify_start_0
* Pseudo action: ms_drbd_mysql_start_0
* Resource action: extip_1 monitor=30000 on webcluster01
* Resource action: extip_2 monitor=30000 on webcluster01
- * Resource action: intip_1_master monitor=30000 on webcluster01
- * Resource action: intip_2_slave monitor=30000 on webcluster01
+ * Resource action: intip_1_active monitor=30000 on webcluster01
+ * Resource action: intip_2_passive monitor=30000 on webcluster01
* Resource action: drbd_www:0 start on webcluster01
* Pseudo action: ms_drbd_www_running_0
* Resource action: drbd_mysql:0 start on webcluster01
@@ -80,8 +80,8 @@ Revised Cluster Status:
* extip_2 (ocf:heartbeat:IPaddr2): Started webcluster01
* Resource Group: group_main:
* intip_0_main (ocf:heartbeat:IPaddr2): Stopped
- * intip_1_master (ocf:heartbeat:IPaddr2): Started webcluster01
- * intip_2_slave (ocf:heartbeat:IPaddr2): Started webcluster01
+ * intip_1_active (ocf:heartbeat:IPaddr2): Started webcluster01
+ * intip_2_passive (ocf:heartbeat:IPaddr2): Started webcluster01
* Clone Set: ms_drbd_www [drbd_www] (promotable):
* Unpromoted: [ webcluster01 ]
* Stopped: [ webcluster02 ]
diff --git a/cts/scheduler/summary/promoted-probed-score.summary b/cts/scheduler/summary/promoted-probed-score.summary
index 3c9326c..52487d4 100644
--- a/cts/scheduler/summary/promoted-probed-score.summary
+++ b/cts/scheduler/summary/promoted-probed-score.summary
@@ -39,8 +39,8 @@ Current cluster status:
* Proxy (ocf:heartbeat:VirtualDomain): Stopped
Transition Summary:
- * Promote AdminDrbd:0 ( Stopped -> Promoted hypatia-corosync.nevis.columbia.edu )
- * Promote AdminDrbd:1 ( Stopped -> Promoted orestes-corosync.nevis.columbia.edu )
+ * Promote AdminDrbd:0 ( Stopped -> Promoted orestes-corosync.nevis.columbia.edu )
+ * Promote AdminDrbd:1 ( Stopped -> Promoted hypatia-corosync.nevis.columbia.edu )
* Start CronAmbientTemperature ( hypatia-corosync.nevis.columbia.edu )
* Start StonithHypatia ( orestes-corosync.nevis.columbia.edu )
* Start StonithOrestes ( hypatia-corosync.nevis.columbia.edu )
@@ -83,18 +83,18 @@ Transition Summary:
* Start ExportUsrNevis:1 ( orestes-corosync.nevis.columbia.edu )
* Start ExportUsrNevisOffsite:1 ( orestes-corosync.nevis.columbia.edu )
* Start ExportWWW:1 ( orestes-corosync.nevis.columbia.edu )
- * Start AdminLvm:0 ( hypatia-corosync.nevis.columbia.edu )
- * Start FSUsrNevis:0 ( hypatia-corosync.nevis.columbia.edu )
- * Start FSVarNevis:0 ( hypatia-corosync.nevis.columbia.edu )
- * Start FSVirtualMachines:0 ( hypatia-corosync.nevis.columbia.edu )
- * Start FSMail:0 ( hypatia-corosync.nevis.columbia.edu )
- * Start FSWork:0 ( hypatia-corosync.nevis.columbia.edu )
- * Start AdminLvm:1 ( orestes-corosync.nevis.columbia.edu )
- * Start FSUsrNevis:1 ( orestes-corosync.nevis.columbia.edu )
- * Start FSVarNevis:1 ( orestes-corosync.nevis.columbia.edu )
- * Start FSVirtualMachines:1 ( orestes-corosync.nevis.columbia.edu )
- * Start FSMail:1 ( orestes-corosync.nevis.columbia.edu )
- * Start FSWork:1 ( orestes-corosync.nevis.columbia.edu )
+ * Start AdminLvm:0 ( orestes-corosync.nevis.columbia.edu )
+ * Start FSUsrNevis:0 ( orestes-corosync.nevis.columbia.edu )
+ * Start FSVarNevis:0 ( orestes-corosync.nevis.columbia.edu )
+ * Start FSVirtualMachines:0 ( orestes-corosync.nevis.columbia.edu )
+ * Start FSMail:0 ( orestes-corosync.nevis.columbia.edu )
+ * Start FSWork:0 ( orestes-corosync.nevis.columbia.edu )
+ * Start AdminLvm:1 ( hypatia-corosync.nevis.columbia.edu )
+ * Start FSUsrNevis:1 ( hypatia-corosync.nevis.columbia.edu )
+ * Start FSVarNevis:1 ( hypatia-corosync.nevis.columbia.edu )
+ * Start FSVirtualMachines:1 ( hypatia-corosync.nevis.columbia.edu )
+ * Start FSMail:1 ( hypatia-corosync.nevis.columbia.edu )
+ * Start FSWork:1 ( hypatia-corosync.nevis.columbia.edu )
* Start KVM-guest ( hypatia-corosync.nevis.columbia.edu )
* Start Proxy ( orestes-corosync.nevis.columbia.edu )
@@ -125,74 +125,74 @@ Executing Cluster Transition:
* Resource action: ExportUsrNevis:1 monitor on orestes-corosync.nevis.columbia.edu
* Resource action: ExportUsrNevisOffsite:1 monitor on orestes-corosync.nevis.columbia.edu
* Resource action: ExportWWW:1 monitor on orestes-corosync.nevis.columbia.edu
- * Resource action: AdminLvm:0 monitor on hypatia-corosync.nevis.columbia.edu
- * Resource action: FSUsrNevis:0 monitor on hypatia-corosync.nevis.columbia.edu
- * Resource action: FSVarNevis:0 monitor on hypatia-corosync.nevis.columbia.edu
- * Resource action: FSVirtualMachines:0 monitor on hypatia-corosync.nevis.columbia.edu
- * Resource action: FSMail:0 monitor on hypatia-corosync.nevis.columbia.edu
- * Resource action: FSWork:0 monitor on hypatia-corosync.nevis.columbia.edu
- * Resource action: AdminLvm:1 monitor on orestes-corosync.nevis.columbia.edu
- * Resource action: FSUsrNevis:1 monitor on orestes-corosync.nevis.columbia.edu
- * Resource action: FSVarNevis:1 monitor on orestes-corosync.nevis.columbia.edu
- * Resource action: FSVirtualMachines:1 monitor on orestes-corosync.nevis.columbia.edu
- * Resource action: FSMail:1 monitor on orestes-corosync.nevis.columbia.edu
- * Resource action: FSWork:1 monitor on orestes-corosync.nevis.columbia.edu
+ * Resource action: AdminLvm:0 monitor on orestes-corosync.nevis.columbia.edu
+ * Resource action: FSUsrNevis:0 monitor on orestes-corosync.nevis.columbia.edu
+ * Resource action: FSVarNevis:0 monitor on orestes-corosync.nevis.columbia.edu
+ * Resource action: FSVirtualMachines:0 monitor on orestes-corosync.nevis.columbia.edu
+ * Resource action: FSMail:0 monitor on orestes-corosync.nevis.columbia.edu
+ * Resource action: FSWork:0 monitor on orestes-corosync.nevis.columbia.edu
+ * Resource action: AdminLvm:1 monitor on hypatia-corosync.nevis.columbia.edu
+ * Resource action: FSUsrNevis:1 monitor on hypatia-corosync.nevis.columbia.edu
+ * Resource action: FSVarNevis:1 monitor on hypatia-corosync.nevis.columbia.edu
+ * Resource action: FSVirtualMachines:1 monitor on hypatia-corosync.nevis.columbia.edu
+ * Resource action: FSMail:1 monitor on hypatia-corosync.nevis.columbia.edu
+ * Resource action: FSWork:1 monitor on hypatia-corosync.nevis.columbia.edu
* Resource action: KVM-guest monitor on orestes-corosync.nevis.columbia.edu
* Resource action: KVM-guest monitor on hypatia-corosync.nevis.columbia.edu
* Resource action: Proxy monitor on orestes-corosync.nevis.columbia.edu
* Resource action: Proxy monitor on hypatia-corosync.nevis.columbia.edu
* Pseudo action: AdminClone_confirmed-pre_notify_start_0
* Pseudo action: AdminClone_start_0
- * Resource action: AdminDrbd:0 start on hypatia-corosync.nevis.columbia.edu
- * Resource action: AdminDrbd:1 start on orestes-corosync.nevis.columbia.edu
+ * Resource action: AdminDrbd:0 start on orestes-corosync.nevis.columbia.edu
+ * Resource action: AdminDrbd:1 start on hypatia-corosync.nevis.columbia.edu
* Pseudo action: AdminClone_running_0
* Pseudo action: AdminClone_post_notify_running_0
- * Resource action: AdminDrbd:0 notify on hypatia-corosync.nevis.columbia.edu
- * Resource action: AdminDrbd:1 notify on orestes-corosync.nevis.columbia.edu
+ * Resource action: AdminDrbd:0 notify on orestes-corosync.nevis.columbia.edu
+ * Resource action: AdminDrbd:1 notify on hypatia-corosync.nevis.columbia.edu
* Pseudo action: AdminClone_confirmed-post_notify_running_0
* Pseudo action: AdminClone_pre_notify_promote_0
- * Resource action: AdminDrbd:0 notify on hypatia-corosync.nevis.columbia.edu
- * Resource action: AdminDrbd:1 notify on orestes-corosync.nevis.columbia.edu
+ * Resource action: AdminDrbd:0 notify on orestes-corosync.nevis.columbia.edu
+ * Resource action: AdminDrbd:1 notify on hypatia-corosync.nevis.columbia.edu
* Pseudo action: AdminClone_confirmed-pre_notify_promote_0
* Pseudo action: AdminClone_promote_0
- * Resource action: AdminDrbd:0 promote on hypatia-corosync.nevis.columbia.edu
- * Resource action: AdminDrbd:1 promote on orestes-corosync.nevis.columbia.edu
+ * Resource action: AdminDrbd:0 promote on orestes-corosync.nevis.columbia.edu
+ * Resource action: AdminDrbd:1 promote on hypatia-corosync.nevis.columbia.edu
* Pseudo action: AdminClone_promoted_0
* Pseudo action: AdminClone_post_notify_promoted_0
- * Resource action: AdminDrbd:0 notify on hypatia-corosync.nevis.columbia.edu
- * Resource action: AdminDrbd:1 notify on orestes-corosync.nevis.columbia.edu
+ * Resource action: AdminDrbd:0 notify on orestes-corosync.nevis.columbia.edu
+ * Resource action: AdminDrbd:1 notify on hypatia-corosync.nevis.columbia.edu
* Pseudo action: AdminClone_confirmed-post_notify_promoted_0
* Pseudo action: FilesystemClone_start_0
- * Resource action: AdminDrbd:0 monitor=59000 on hypatia-corosync.nevis.columbia.edu
- * Resource action: AdminDrbd:1 monitor=59000 on orestes-corosync.nevis.columbia.edu
+ * Resource action: AdminDrbd:0 monitor=59000 on orestes-corosync.nevis.columbia.edu
+ * Resource action: AdminDrbd:1 monitor=59000 on hypatia-corosync.nevis.columbia.edu
* Pseudo action: FilesystemGroup:0_start_0
- * Resource action: AdminLvm:0 start on hypatia-corosync.nevis.columbia.edu
- * Resource action: FSUsrNevis:0 start on hypatia-corosync.nevis.columbia.edu
- * Resource action: FSVarNevis:0 start on hypatia-corosync.nevis.columbia.edu
- * Resource action: FSVirtualMachines:0 start on hypatia-corosync.nevis.columbia.edu
- * Resource action: FSMail:0 start on hypatia-corosync.nevis.columbia.edu
- * Resource action: FSWork:0 start on hypatia-corosync.nevis.columbia.edu
+ * Resource action: AdminLvm:0 start on orestes-corosync.nevis.columbia.edu
+ * Resource action: FSUsrNevis:0 start on orestes-corosync.nevis.columbia.edu
+ * Resource action: FSVarNevis:0 start on orestes-corosync.nevis.columbia.edu
+ * Resource action: FSVirtualMachines:0 start on orestes-corosync.nevis.columbia.edu
+ * Resource action: FSMail:0 start on orestes-corosync.nevis.columbia.edu
+ * Resource action: FSWork:0 start on orestes-corosync.nevis.columbia.edu
* Pseudo action: FilesystemGroup:1_start_0
- * Resource action: AdminLvm:1 start on orestes-corosync.nevis.columbia.edu
- * Resource action: FSUsrNevis:1 start on orestes-corosync.nevis.columbia.edu
- * Resource action: FSVarNevis:1 start on orestes-corosync.nevis.columbia.edu
- * Resource action: FSVirtualMachines:1 start on orestes-corosync.nevis.columbia.edu
- * Resource action: FSMail:1 start on orestes-corosync.nevis.columbia.edu
- * Resource action: FSWork:1 start on orestes-corosync.nevis.columbia.edu
+ * Resource action: AdminLvm:1 start on hypatia-corosync.nevis.columbia.edu
+ * Resource action: FSUsrNevis:1 start on hypatia-corosync.nevis.columbia.edu
+ * Resource action: FSVarNevis:1 start on hypatia-corosync.nevis.columbia.edu
+ * Resource action: FSVirtualMachines:1 start on hypatia-corosync.nevis.columbia.edu
+ * Resource action: FSMail:1 start on hypatia-corosync.nevis.columbia.edu
+ * Resource action: FSWork:1 start on hypatia-corosync.nevis.columbia.edu
* Pseudo action: FilesystemGroup:0_running_0
- * Resource action: AdminLvm:0 monitor=30000 on hypatia-corosync.nevis.columbia.edu
- * Resource action: FSUsrNevis:0 monitor=20000 on hypatia-corosync.nevis.columbia.edu
- * Resource action: FSVarNevis:0 monitor=20000 on hypatia-corosync.nevis.columbia.edu
- * Resource action: FSVirtualMachines:0 monitor=20000 on hypatia-corosync.nevis.columbia.edu
- * Resource action: FSMail:0 monitor=20000 on hypatia-corosync.nevis.columbia.edu
- * Resource action: FSWork:0 monitor=20000 on hypatia-corosync.nevis.columbia.edu
+ * Resource action: AdminLvm:0 monitor=30000 on orestes-corosync.nevis.columbia.edu
+ * Resource action: FSUsrNevis:0 monitor=20000 on orestes-corosync.nevis.columbia.edu
+ * Resource action: FSVarNevis:0 monitor=20000 on orestes-corosync.nevis.columbia.edu
+ * Resource action: FSVirtualMachines:0 monitor=20000 on orestes-corosync.nevis.columbia.edu
+ * Resource action: FSMail:0 monitor=20000 on orestes-corosync.nevis.columbia.edu
+ * Resource action: FSWork:0 monitor=20000 on orestes-corosync.nevis.columbia.edu
* Pseudo action: FilesystemGroup:1_running_0
- * Resource action: AdminLvm:1 monitor=30000 on orestes-corosync.nevis.columbia.edu
- * Resource action: FSUsrNevis:1 monitor=20000 on orestes-corosync.nevis.columbia.edu
- * Resource action: FSVarNevis:1 monitor=20000 on orestes-corosync.nevis.columbia.edu
- * Resource action: FSVirtualMachines:1 monitor=20000 on orestes-corosync.nevis.columbia.edu
- * Resource action: FSMail:1 monitor=20000 on orestes-corosync.nevis.columbia.edu
- * Resource action: FSWork:1 monitor=20000 on orestes-corosync.nevis.columbia.edu
+ * Resource action: AdminLvm:1 monitor=30000 on hypatia-corosync.nevis.columbia.edu
+ * Resource action: FSUsrNevis:1 monitor=20000 on hypatia-corosync.nevis.columbia.edu
+ * Resource action: FSVarNevis:1 monitor=20000 on hypatia-corosync.nevis.columbia.edu
+ * Resource action: FSVirtualMachines:1 monitor=20000 on hypatia-corosync.nevis.columbia.edu
+ * Resource action: FSMail:1 monitor=20000 on hypatia-corosync.nevis.columbia.edu
+ * Resource action: FSWork:1 monitor=20000 on hypatia-corosync.nevis.columbia.edu
* Pseudo action: FilesystemClone_running_0
* Resource action: CronAmbientTemperature start on hypatia-corosync.nevis.columbia.edu
* Pseudo action: DhcpGroup_start_0
diff --git a/cts/scheduler/summary/timeout-by-node.summary b/cts/scheduler/summary/timeout-by-node.summary
new file mode 100644
index 0000000..78f4fcd
--- /dev/null
+++ b/cts/scheduler/summary/timeout-by-node.summary
@@ -0,0 +1,43 @@
+Current cluster status:
+ * Node List:
+ * Online: [ node1 node2 node3 node4 node5 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node1
+ * Clone Set: rsc1-clone [rsc1]:
+ * Stopped: [ node1 node2 node3 node4 node5 ]
+
+Transition Summary:
+ * Start rsc1:0 ( node2 )
+ * Start rsc1:1 ( node3 )
+ * Start rsc1:2 ( node4 )
+ * Start rsc1:3 ( node5 )
+ * Start rsc1:4 ( node1 )
+
+Executing Cluster Transition:
+ * Resource action: rsc1:0 monitor on node2
+ * Resource action: rsc1:1 monitor on node3
+ * Resource action: rsc1:2 monitor on node4
+ * Resource action: rsc1:3 monitor on node5
+ * Resource action: rsc1:4 monitor on node1
+ * Pseudo action: rsc1-clone_start_0
+ * Resource action: rsc1:0 start on node2
+ * Resource action: rsc1:1 start on node3
+ * Resource action: rsc1:2 start on node4
+ * Resource action: rsc1:3 start on node5
+ * Resource action: rsc1:4 start on node1
+ * Pseudo action: rsc1-clone_running_0
+ * Resource action: rsc1:0 monitor=10000 on node2
+ * Resource action: rsc1:1 monitor=10000 on node3
+ * Resource action: rsc1:2 monitor=10000 on node4
+ * Resource action: rsc1:3 monitor=10000 on node5
+ * Resource action: rsc1:4 monitor=10000 on node1
+
+Revised Cluster Status:
+ * Node List:
+ * Online: [ node1 node2 node3 node4 node5 ]
+
+ * Full List of Resources:
+ * Fencing (stonith:fence_xvm): Started node1
+ * Clone Set: rsc1-clone [rsc1]:
+ * Started: [ node1 node2 node3 node4 node5 ]
diff --git a/cts/scheduler/summary/unfence-definition.summary b/cts/scheduler/summary/unfence-definition.summary
index bb22680..2d94f71 100644
--- a/cts/scheduler/summary/unfence-definition.summary
+++ b/cts/scheduler/summary/unfence-definition.summary
@@ -32,8 +32,8 @@ Executing Cluster Transition:
* Resource action: fencing monitor on virt-3
* Resource action: fencing delete on virt-1
* Resource action: dlm monitor on virt-3
- * Resource action: clvmd stop on virt-1
* Resource action: clvmd monitor on virt-3
+ * Resource action: clvmd stop on virt-1
* Pseudo action: clvmd-clone_stopped_0
* Pseudo action: dlm-clone_stop_0
* Resource action: dlm stop on virt-1
diff --git a/cts/scheduler/summary/unfence-parameters.summary b/cts/scheduler/summary/unfence-parameters.summary
index b872a41..93a65e6 100644
--- a/cts/scheduler/summary/unfence-parameters.summary
+++ b/cts/scheduler/summary/unfence-parameters.summary
@@ -31,8 +31,8 @@ Executing Cluster Transition:
* Fencing virt-3 (on)
* Resource action: fencing monitor on virt-3
* Resource action: dlm monitor on virt-3
- * Resource action: clvmd stop on virt-1
* Resource action: clvmd monitor on virt-3
+ * Resource action: clvmd stop on virt-1
* Pseudo action: clvmd-clone_stopped_0
* Pseudo action: dlm-clone_stop_0
* Resource action: dlm stop on virt-1
diff --git a/cts/scheduler/xml/anon-instance-pending.xml b/cts/scheduler/xml/anon-instance-pending.xml
index 86a6728..297c0bb 100644
--- a/cts/scheduler/xml/anon-instance-pending.xml
+++ b/cts/scheduler/xml/anon-instance-pending.xml
@@ -16,7 +16,7 @@
</nodes>
<resources>
<primitive class="stonith" id="Fencing" type="fence_imaginary"/>
- <!-- clone1 tests a pending start on node4 with node1-2 slave, node3 master, and node5-11 stopped -->
+ <!-- clone1 tests a pending start on node4 with node1-2 unpromoted, node3 promoted, and node5-11 stopped -->
<clone id="clone1">
<primitive id="clone1rsc" class="ocf" provider="pacemaker" type="Stateful">
<operations>
diff --git a/cts/scheduler/xml/bundle-interleave-start.xml b/cts/scheduler/xml/bundle-interleave-start.xml
index e8630cd..facb181 100644
--- a/cts/scheduler/xml/bundle-interleave-start.xml
+++ b/cts/scheduler/xml/bundle-interleave-start.xml
@@ -6,7 +6,8 @@
and its promoted role is colocated with base's. App's starts and
promotes are ordered after base's.
- In this test, all are stopped and must be started.
+ In this test, all are stopped and must be started. One replica of each
+ bundle must be promoted.
-->
<configuration>
<crm_config>
diff --git a/cts/scheduler/xml/bundle-promoted-anticolocation-1.xml b/cts/scheduler/xml/bundle-promoted-anticolocation-1.xml
new file mode 100644
index 0000000..71f472e
--- /dev/null
+++ b/cts/scheduler/xml/bundle-promoted-anticolocation-1.xml
@@ -0,0 +1,238 @@
+<cib crm_feature_set="3.17.4" validate-with="pacemaker-3.9" epoch="47" num_updates="0" admin_epoch="0" cib-last-written="Mon Jun 19 19:33:16 2023" update-origin="node1" update-client="cibadmin" update-user="root" have-quorum="1" dc-uuid="1">
+ <configuration>
+ <!-- The essential elements of this test are:
+ * A primitive (vip) is mandatorily anti-colocated with the promoted
+ instance of a bundle (base-bundle)
+ * The primitive is running on the same node as the bundle's promoted
+ instance
+ * There is no stickiness
+ * There are no location constraints
+ * There are three nodes available for both resources
+
+ In this situation, the primitive should move away from the promoted
+ bundle instance's node.
+ -->
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.1.5-1.0a457786a.git.el9-0a457786a"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="test"/>
+ <nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1687217818"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="node1"/>
+ <node id="2" uname="node2"/>
+ <node id="3" uname="node3"/>
+ </nodes>
+ <resources>
+ <primitive class="stonith" id="Fencing" type="fence_xvm">
+ <instance_attributes id="Fencing-instance_attributes">
+ <nvpair id="Fencing-instance_attributes_pcmk_host_list" name="pcmk_host_list" value="node1 node2 node3"/>
+ </instance_attributes>
+ <operations>
+ <op id="Fencing-monitor-120s" interval="120s" name="monitor" timeout="120s"/>
+ </operations>
+ </primitive>
+ <bundle id="base-bundle">
+ <meta_attributes id="base-bundle-meta_attributes">
+ <nvpair id="base-bundle-meta_attributes-promotable" name="promotable" value="true"/>
+ <nvpair id="base-bundle-meta_attributes-container-attribute-target" name="container-attribute-target" value="host"/>
+ </meta_attributes>
+ <podman image="localhost/pcmktest" replicas="3"/>
+ <network control-port="3121"/>
+ <primitive id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <operations>
+ <op id="base-monitor-15s" interval="15s" name="monitor" timeout="15s" role="Promoted"/>
+ <op id="base-monitor-16s" interval="16s" name="monitor" timeout="16s" role="Unpromoted"/>
+ </operations>
+ </primitive>
+ </bundle>
+ <primitive class="ocf" id="vip" provider="heartbeat" type="IPaddr2">
+ <instance_attributes id="vip-instance_attributes">
+ <nvpair id="vip-instance_attributes-cidr_netmask" name="cidr_netmask" value="32"/>
+ <nvpair id="vip-instance_attributes-ip" name="ip" value="192.168.22.81"/>
+ </instance_attributes>
+ <operations>
+ <op id="vip-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/>
+ <op id="vip-start-interval-0s" interval="0s" name="start" timeout="20s"/>
+ <op id="vip-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
+ </operations>
+ </primitive>
+ </resources>
+ <constraints>
+ <rsc_colocation id="colocation-vip-base-bundle--INFINITY" rsc="vip" score="-INFINITY" with-rsc="base-bundle" with-rsc-role="Promoted"/>
+ </constraints>
+ <fencing-topology/>
+ <op_defaults/>
+ <alerts/>
+ <rsc_defaults/>
+ </configuration>
+ <status>
+ <node_state id="1" uname="node1" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <transient_attributes id="1">
+ <instance_attributes id="status-1">
+ <nvpair id="status-1-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-1-master-base" name="master-base" value="11"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="1">
+ <lrm_resources>
+ <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_stop_0" operation="stop" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="12:62:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;12:62:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="55" rc-code="0" op-status="0" interval="0" last-rc-change="1687222219" exec-time="0" queue-time="0" op-digest="6197322ae276dfeb4a212d09787f9738"/>
+ <lrm_rsc_op id="Fencing_monitor_120000" operation_key="Fencing_monitor_120000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="2:23:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;2:23:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="38" rc-code="0" op-status="0" interval="120000" last-rc-change="1687217955" exec-time="43" queue-time="0" op-digest="8ca455a603cbe2dc8d68703e63c272f1"/>
+ </lrm_resource>
+ <lrm_resource id="vip" type="IPaddr2" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="vip_last_0" operation_key="vip_stop_0" operation="stop" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="50:71:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;50:71:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="57" rc-code="0" op-status="0" interval="0" last-rc-change="1687228394" exec-time="47" queue-time="0" op-digest="b274efb9afd1400d58df73d9925b6823"/>
+ <lrm_rsc_op id="vip_monitor_10000" operation_key="vip_monitor_10000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="52:60:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;52:60:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="53" rc-code="0" op-status="0" interval="10000" last-rc-change="1687222197" exec-time="38" queue-time="0" op-digest="20f7173b4af9ab62392ae5d9e5243580"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-2" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-2_last_0" operation_key="base-bundle-podman-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="8:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;8:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="49" rc-code="7" op-status="0" interval="0" last-rc-change="1687219402" exec-time="103" queue-time="0" op-digest="eb1c148b6d2d7a7013f75d596536409b"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-1" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-1_last_0" operation_key="base-bundle-podman-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="6:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;6:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="45" rc-code="7" op-status="0" interval="0" last-rc-change="1687219402" exec-time="82" queue-time="0" op-digest="91ec5f49c5fd88f29405389545fb7d59"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-2" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-2">
+ <lrm_rsc_op id="base-bundle-2_last_0" operation_key="base-bundle-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="9:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;9:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="5" rc-code="7" op-status="0" interval="0" last-rc-change="1687219636" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-0" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-0_last_0" operation_key="base-bundle-podman-0_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="7:28:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;7:28:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="40" rc-code="0" op-status="0" interval="0" last-rc-change="1687218074" exec-time="795" queue-time="0" op-digest="b93578750303776f6bb098395c53338a"/>
+ <lrm_rsc_op id="base-bundle-podman-0_monitor_60000" operation_key="base-bundle-podman-0_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="8:29:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;8:29:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="41" rc-code="0" op-status="0" interval="60000" last-rc-change="1687218074" exec-time="166" queue-time="0" op-digest="902512fcf3e4556d9585c44184665d8c"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-1" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-1">
+ <lrm_rsc_op id="base-bundle-1_last_0" operation_key="base-bundle-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="7:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;7:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="4" rc-code="7" op-status="0" interval="0" last-rc-change="1687219403" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-0" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-0">
+ <lrm_rsc_op id="base-bundle-0_last_0" operation_key="base-bundle-0_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="9:29:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;9:29:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="2" rc-code="0" op-status="0" interval="0" last-rc-change="1687218074" exec-time="0" queue-time="0" op-digest="2b7683df7d64ff71ec5fd3675fd12017" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ <lrm_rsc_op id="base-bundle-0_monitor_30000" operation_key="base-bundle-0_monitor_30000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="11:30:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;11:30:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="3" rc-code="0" op-status="0" interval="30000" last-rc-change="1687218075" exec-time="0" queue-time="0" op-digest="6f3c7e233bacb8420fef5f9581190d00"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state id="2" uname="node2" crmd="online" crm-debug-origin="controld_update_resource_history" in_ccm="true" join="member" expected="member">
+ <transient_attributes id="2">
+ <instance_attributes id="status-2">
+ <nvpair id="status-2-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-2-master-base" name="master-base" value="12"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="2">
+ <lrm_resources>
+ <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="12:69:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;12:69:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="58" rc-code="0" op-status="0" interval="0" last-rc-change="1687226745" exec-time="33" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ <lrm_rsc_op id="Fencing_monitor_120000" operation_key="Fencing_monitor_120000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="4:69:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;4:69:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="59" rc-code="0" op-status="0" interval="120000" last-rc-change="1687226745" exec-time="27" queue-time="0" op-digest="24989640311980988fb77ddd1cc1002b"/>
+ </lrm_resource>
+ <lrm_resource id="vip" type="IPaddr2" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="vip_last_0" operation_key="vip_stop_0" operation="stop" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="50:60:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;50:60:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="53" rc-code="0" op-status="0" interval="0" last-rc-change="1687222197" exec-time="47" queue-time="0" op-digest="b274efb9afd1400d58df73d9925b6823"/>
+ <lrm_rsc_op id="vip_monitor_10000" operation_key="vip_monitor_10000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="26:23:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;26:23:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="37" rc-code="0" op-status="0" interval="10000" last-rc-change="1687217955" exec-time="42" queue-time="0" op-digest="20f7173b4af9ab62392ae5d9e5243580"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-2" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-2_last_0" operation_key="base-bundle-podman-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="12:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;12:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="47" rc-code="7" op-status="0" interval="0" last-rc-change="1687219402" exec-time="69" queue-time="1" op-digest="eb1c148b6d2d7a7013f75d596536409b"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-1" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-1_last_0" operation_key="base-bundle-podman-1_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="20:33:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;20:33:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="48" rc-code="0" op-status="0" interval="0" last-rc-change="1687219402" exec-time="665" queue-time="0" op-digest="91ec5f49c5fd88f29405389545fb7d59"/>
+ <lrm_rsc_op id="base-bundle-podman-1_monitor_60000" operation_key="base-bundle-podman-1_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="21:33:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;21:33:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="49" rc-code="0" op-status="0" interval="60000" last-rc-change="1687219403" exec-time="158" queue-time="0" op-digest="f0ef4729d120aa3f5d938cabca4d06c7"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-2" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-2">
+ <lrm_rsc_op id="base-bundle-2_last_0" operation_key="base-bundle-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="10:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;10:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="5" rc-code="7" op-status="0" interval="0" last-rc-change="1687219636" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-0" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-0_last_0" operation_key="base-bundle-podman-0_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="2:27:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;2:27:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="39" rc-code="0" op-status="0" interval="0" last-rc-change="1687217956" exec-time="161" queue-time="0" op-digest="bbac36b73a7a6604aefdd2cb3b5f42e6"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-1" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-1">
+ <lrm_rsc_op id="base-bundle-1_last_0" operation_key="base-bundle-1_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="22:33:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;22:33:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="3" rc-code="0" op-status="0" interval="0" last-rc-change="1687219403" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ <lrm_rsc_op id="base-bundle-1_monitor_30000" operation_key="base-bundle-1_monitor_30000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="20:34:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;20:34:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="4" rc-code="0" op-status="0" interval="30000" last-rc-change="1687219404" exec-time="0" queue-time="0" op-digest="3929eec440004bca31f813a8e6097506"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-0" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-0">
+ <lrm_rsc_op id="base-bundle-0_last_0" operation_key="base-bundle-0_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="3:7:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;3:7:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1687217701" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state remote_node="true" id="base-bundle-0" uname="base-bundle-0" in_ccm="true" crm-debug-origin="do_state_transition" node_fenced="0">
+ <lrm id="base-bundle-0">
+ <lrm_resources>
+ <lrm_resource id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="base_last_0" operation_key="base_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="16:30:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;16:30:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="8" rc-code="0" op-status="0" interval="0" last-rc-change="1687218075" exec-time="307" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="base_monitor_16000" operation_key="base_monitor_16000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="17:31:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;17:31:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="14" rc-code="0" op-status="0" interval="16000" last-rc-change="1687218076" exec-time="15" queue-time="0" op-digest="e62ad827732488b88ef8722ff6a484b3"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="base-bundle-0">
+ <instance_attributes id="status-base-bundle-0">
+ <nvpair id="status-base-bundle-0-master-base" name="master-base" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state id="3" uname="node3" crmd="online" crm-debug-origin="controld_update_resource_history" in_ccm="true" join="member" expected="member">
+ <transient_attributes id="3">
+ <instance_attributes id="status-3">
+ <nvpair id="status-3-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-3-master-base" name="master-base" value="13"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="3">
+ <lrm_resources>
+ <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="11:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;11:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="8" rc-code="7" op-status="0" interval="0" last-rc-change="1687219634" exec-time="5" queue-time="0" op-digest="6197322ae276dfeb4a212d09787f9738"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-2" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-2_last_0" operation_key="base-bundle-podman-2_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="29:36:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;29:36:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="24" rc-code="0" op-status="0" interval="0" last-rc-change="1687219634" exec-time="936" queue-time="0" op-digest="eb1c148b6d2d7a7013f75d596536409b"/>
+ <lrm_rsc_op id="base-bundle-podman-2_monitor_60000" operation_key="base-bundle-podman-2_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="30:36:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;30:36:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="25" rc-code="0" op-status="0" interval="60000" last-rc-change="1687219635" exec-time="173" queue-time="0" op-digest="8eeca5a30b14f3d9ef7d2ddbd16c2e05"/>
+ </lrm_resource>
+ <lrm_resource id="vip" type="IPaddr2" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="vip_last_0" operation_key="vip_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="51:71:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;51:71:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="27" rc-code="0" op-status="0" interval="0" last-rc-change="1687228393" exec-time="58" queue-time="0" op-digest="b274efb9afd1400d58df73d9925b6823"/>
+ <lrm_rsc_op id="vip_monitor_10000" operation_key="vip_monitor_10000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="52:71:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;52:71:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="28" rc-code="0" op-status="0" interval="10000" last-rc-change="1687228393" exec-time="35" queue-time="0" op-digest="20f7173b4af9ab62392ae5d9e5243580"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-1" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-1_last_0" operation_key="base-bundle-podman-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="14:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;14:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="14" rc-code="7" op-status="0" interval="0" last-rc-change="1687219634" exec-time="107" queue-time="0" op-digest="91ec5f49c5fd88f29405389545fb7d59"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-2" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-2">
+ <lrm_rsc_op id="base-bundle-2_last_0" operation_key="base-bundle-2_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="31:36:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;31:36:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="4" rc-code="0" op-status="0" interval="0" last-rc-change="1687219635" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ <lrm_rsc_op id="base-bundle-2_monitor_30000" operation_key="base-bundle-2_monitor_30000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="25:37:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;25:37:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="5" rc-code="0" op-status="0" interval="30000" last-rc-change="1687219637" exec-time="0" queue-time="0" op-digest="354b9acaa7ea1113d708dc11a1d6bbfa"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-0" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-0_last_0" operation_key="base-bundle-podman-0_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="12:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;12:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="10" rc-code="7" op-status="0" interval="0" last-rc-change="1687219634" exec-time="104" queue-time="0" op-digest="b93578750303776f6bb098395c53338a"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-1" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-1">
+ <lrm_rsc_op id="base-bundle-1_last_0" operation_key="base-bundle-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="15:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;15:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="2" rc-code="7" op-status="0" interval="0" last-rc-change="1687219634" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-0" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-0">
+ <lrm_rsc_op id="base-bundle-0_last_0" operation_key="base-bundle-0_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="13:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;13:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1687219634" exec-time="0" queue-time="0" op-digest="2b7683df7d64ff71ec5fd3675fd12017" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state remote_node="true" id="base-bundle-1" uname="base-bundle-1" in_ccm="true" crm-debug-origin="controld_update_resource_history" node_fenced="0">
+ <lrm id="base-bundle-1">
+ <lrm_resources>
+ <lrm_resource id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="base_last_0" operation_key="base_demote_0" operation="demote" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="35:55:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;35:55:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="86" rc-code="0" op-status="0" interval="0" last-rc-change="1687220328" exec-time="221" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="base_monitor_16000" operation_key="base_monitor_16000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="35:56:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;35:56:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="92" rc-code="0" op-status="0" interval="16000" last-rc-change="1687220329" exec-time="43" queue-time="0" op-digest="e62ad827732488b88ef8722ff6a484b3"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="base-bundle-1">
+ <instance_attributes id="status-base-bundle-1">
+ <nvpair id="status-base-bundle-1-master-base" name="master-base" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state remote_node="true" id="base-bundle-2" uname="base-bundle-2" in_ccm="true" crm-debug-origin="controld_update_resource_history" node_fenced="0">
+ <lrm id="base-bundle-2">
+ <lrm_resources>
+ <lrm_resource id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="base_last_0" operation_key="base_promote_0" operation="promote" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="32:43:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;32:43:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="30" rc-code="0" op-status="0" interval="0" last-rc-change="1687220058" exec-time="222" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="base_monitor_15000" operation_key="base_monitor_15000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="33:44:8:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:8;33:44:8:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="36" rc-code="8" op-status="0" interval="15000" last-rc-change="1687220059" exec-time="13" queue-time="0" op-digest="3ef575c5f050ae086f0f31bc8f085fdc"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="base-bundle-2">
+ <instance_attributes id="status-base-bundle-2">
+ <nvpair id="status-base-bundle-2-master-base" name="master-base" value="10"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ </status>
+</cib>
diff --git a/cts/scheduler/xml/bundle-promoted-anticolocation-2.xml b/cts/scheduler/xml/bundle-promoted-anticolocation-2.xml
new file mode 100644
index 0000000..32bc5ea
--- /dev/null
+++ b/cts/scheduler/xml/bundle-promoted-anticolocation-2.xml
@@ -0,0 +1,238 @@
+<cib crm_feature_set="3.17.4" validate-with="pacemaker-3.9" epoch="47" num_updates="0" admin_epoch="0" cib-last-written="Mon Jun 19 19:33:16 2023" update-origin="node1" update-client="cibadmin" update-user="root" have-quorum="1" dc-uuid="1">
+ <configuration>
+ <!-- The essential elements of this test are:
+ * A primitive (vip) is optionally anti-colocated (score=-5000) with
+ the promoted instance of a bundle (base-bundle)
+ * The primitive is running on the same node as the bundle's promoted
+ instance
+ * There is no stickiness
+ * There are no location constraints
+ * There are three nodes available for both resources
+
+ In this situation, the primitive should move away from the promoted
+ bundle instance's node.
+ -->
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.1.5-1.0a457786a.git.el9-0a457786a"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="test"/>
+ <nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1687217818"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="node1"/>
+ <node id="2" uname="node2"/>
+ <node id="3" uname="node3"/>
+ </nodes>
+ <resources>
+ <primitive class="stonith" id="Fencing" type="fence_xvm">
+ <instance_attributes id="Fencing-instance_attributes">
+ <nvpair id="Fencing-instance_attributes_pcmk_host_list" name="pcmk_host_list" value="node1 node2 node3"/>
+ </instance_attributes>
+ <operations>
+ <op id="Fencing-monitor-120s" interval="120s" name="monitor" timeout="120s"/>
+ </operations>
+ </primitive>
+ <bundle id="base-bundle">
+ <meta_attributes id="base-bundle-meta_attributes">
+ <nvpair id="base-bundle-meta_attributes-promotable" name="promotable" value="true"/>
+ <nvpair id="base-bundle-meta_attributes-container-attribute-target" name="container-attribute-target" value="host"/>
+ </meta_attributes>
+ <podman image="localhost/pcmktest" replicas="3"/>
+ <network control-port="3121"/>
+ <primitive id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <operations>
+ <op id="base-monitor-15s" interval="15s" name="monitor" timeout="15s" role="Promoted"/>
+ <op id="base-monitor-16s" interval="16s" name="monitor" timeout="16s" role="Unpromoted"/>
+ </operations>
+ </primitive>
+ </bundle>
+ <primitive class="ocf" id="vip" provider="heartbeat" type="IPaddr2">
+ <instance_attributes id="vip-instance_attributes">
+ <nvpair id="vip-instance_attributes-cidr_netmask" name="cidr_netmask" value="32"/>
+ <nvpair id="vip-instance_attributes-ip" name="ip" value="192.168.22.81"/>
+ </instance_attributes>
+ <operations>
+ <op id="vip-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/>
+ <op id="vip-start-interval-0s" interval="0s" name="start" timeout="20s"/>
+ <op id="vip-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
+ </operations>
+ </primitive>
+ </resources>
+ <constraints>
+ <rsc_colocation id="colocation-vip-base-bundle--5000" rsc="vip" score="-5000" with-rsc="base-bundle" with-rsc-role="Promoted"/>
+ </constraints>
+ <fencing-topology/>
+ <op_defaults/>
+ <alerts/>
+ <rsc_defaults/>
+ </configuration>
+ <status>
+ <node_state id="1" uname="node1" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <transient_attributes id="1">
+ <instance_attributes id="status-1">
+ <nvpair id="status-1-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-1-master-base" name="master-base" value="11"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="1">
+ <lrm_resources>
+ <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_stop_0" operation="stop" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="12:62:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;12:62:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="55" rc-code="0" op-status="0" interval="0" last-rc-change="1687222219" exec-time="0" queue-time="0" op-digest="6197322ae276dfeb4a212d09787f9738"/>
+ <lrm_rsc_op id="Fencing_monitor_120000" operation_key="Fencing_monitor_120000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="2:23:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;2:23:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="38" rc-code="0" op-status="0" interval="120000" last-rc-change="1687217955" exec-time="43" queue-time="0" op-digest="8ca455a603cbe2dc8d68703e63c272f1"/>
+ </lrm_resource>
+ <lrm_resource id="vip" type="IPaddr2" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="vip_last_0" operation_key="vip_stop_0" operation="stop" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="50:71:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;50:71:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="57" rc-code="0" op-status="0" interval="0" last-rc-change="1687228394" exec-time="47" queue-time="0" op-digest="b274efb9afd1400d58df73d9925b6823"/>
+ <lrm_rsc_op id="vip_monitor_10000" operation_key="vip_monitor_10000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="52:60:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;52:60:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="53" rc-code="0" op-status="0" interval="10000" last-rc-change="1687222197" exec-time="38" queue-time="0" op-digest="20f7173b4af9ab62392ae5d9e5243580"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-2" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-2_last_0" operation_key="base-bundle-podman-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="8:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;8:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="49" rc-code="7" op-status="0" interval="0" last-rc-change="1687219402" exec-time="103" queue-time="0" op-digest="eb1c148b6d2d7a7013f75d596536409b"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-1" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-1_last_0" operation_key="base-bundle-podman-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="6:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;6:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="45" rc-code="7" op-status="0" interval="0" last-rc-change="1687219402" exec-time="82" queue-time="0" op-digest="91ec5f49c5fd88f29405389545fb7d59"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-2" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-2">
+ <lrm_rsc_op id="base-bundle-2_last_0" operation_key="base-bundle-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="9:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;9:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="5" rc-code="7" op-status="0" interval="0" last-rc-change="1687219636" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-0" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-0_last_0" operation_key="base-bundle-podman-0_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="7:28:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;7:28:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="40" rc-code="0" op-status="0" interval="0" last-rc-change="1687218074" exec-time="795" queue-time="0" op-digest="b93578750303776f6bb098395c53338a"/>
+ <lrm_rsc_op id="base-bundle-podman-0_monitor_60000" operation_key="base-bundle-podman-0_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="8:29:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;8:29:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="41" rc-code="0" op-status="0" interval="60000" last-rc-change="1687218074" exec-time="166" queue-time="0" op-digest="902512fcf3e4556d9585c44184665d8c"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-1" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-1">
+ <lrm_rsc_op id="base-bundle-1_last_0" operation_key="base-bundle-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="7:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;7:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="4" rc-code="7" op-status="0" interval="0" last-rc-change="1687219403" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-0" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-0">
+ <lrm_rsc_op id="base-bundle-0_last_0" operation_key="base-bundle-0_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="9:29:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;9:29:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="2" rc-code="0" op-status="0" interval="0" last-rc-change="1687218074" exec-time="0" queue-time="0" op-digest="2b7683df7d64ff71ec5fd3675fd12017" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ <lrm_rsc_op id="base-bundle-0_monitor_30000" operation_key="base-bundle-0_monitor_30000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="11:30:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;11:30:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="3" rc-code="0" op-status="0" interval="30000" last-rc-change="1687218075" exec-time="0" queue-time="0" op-digest="6f3c7e233bacb8420fef5f9581190d00"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state id="2" uname="node2" crmd="online" crm-debug-origin="controld_update_resource_history" in_ccm="true" join="member" expected="member">
+ <transient_attributes id="2">
+ <instance_attributes id="status-2">
+ <nvpair id="status-2-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-2-master-base" name="master-base" value="12"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="2">
+ <lrm_resources>
+ <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="12:69:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;12:69:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="58" rc-code="0" op-status="0" interval="0" last-rc-change="1687226745" exec-time="33" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ <lrm_rsc_op id="Fencing_monitor_120000" operation_key="Fencing_monitor_120000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="4:69:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;4:69:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="59" rc-code="0" op-status="0" interval="120000" last-rc-change="1687226745" exec-time="27" queue-time="0" op-digest="24989640311980988fb77ddd1cc1002b"/>
+ </lrm_resource>
+ <lrm_resource id="vip" type="IPaddr2" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="vip_last_0" operation_key="vip_stop_0" operation="stop" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="50:60:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;50:60:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="53" rc-code="0" op-status="0" interval="0" last-rc-change="1687222197" exec-time="47" queue-time="0" op-digest="b274efb9afd1400d58df73d9925b6823"/>
+ <lrm_rsc_op id="vip_monitor_10000" operation_key="vip_monitor_10000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="26:23:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;26:23:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="37" rc-code="0" op-status="0" interval="10000" last-rc-change="1687217955" exec-time="42" queue-time="0" op-digest="20f7173b4af9ab62392ae5d9e5243580"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-2" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-2_last_0" operation_key="base-bundle-podman-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="12:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;12:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="47" rc-code="7" op-status="0" interval="0" last-rc-change="1687219402" exec-time="69" queue-time="1" op-digest="eb1c148b6d2d7a7013f75d596536409b"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-1" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-1_last_0" operation_key="base-bundle-podman-1_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="20:33:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;20:33:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="48" rc-code="0" op-status="0" interval="0" last-rc-change="1687219402" exec-time="665" queue-time="0" op-digest="91ec5f49c5fd88f29405389545fb7d59"/>
+ <lrm_rsc_op id="base-bundle-podman-1_monitor_60000" operation_key="base-bundle-podman-1_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="21:33:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;21:33:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="49" rc-code="0" op-status="0" interval="60000" last-rc-change="1687219403" exec-time="158" queue-time="0" op-digest="f0ef4729d120aa3f5d938cabca4d06c7"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-2" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-2">
+ <lrm_rsc_op id="base-bundle-2_last_0" operation_key="base-bundle-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="10:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;10:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="5" rc-code="7" op-status="0" interval="0" last-rc-change="1687219636" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-0" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-0_last_0" operation_key="base-bundle-podman-0_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="2:27:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;2:27:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="39" rc-code="0" op-status="0" interval="0" last-rc-change="1687217956" exec-time="161" queue-time="0" op-digest="bbac36b73a7a6604aefdd2cb3b5f42e6"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-1" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-1">
+ <lrm_rsc_op id="base-bundle-1_last_0" operation_key="base-bundle-1_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="22:33:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;22:33:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="3" rc-code="0" op-status="0" interval="0" last-rc-change="1687219403" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ <lrm_rsc_op id="base-bundle-1_monitor_30000" operation_key="base-bundle-1_monitor_30000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="20:34:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;20:34:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="4" rc-code="0" op-status="0" interval="30000" last-rc-change="1687219404" exec-time="0" queue-time="0" op-digest="3929eec440004bca31f813a8e6097506"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-0" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-0">
+ <lrm_rsc_op id="base-bundle-0_last_0" operation_key="base-bundle-0_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="3:7:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;3:7:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1687217701" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state remote_node="true" id="base-bundle-0" uname="base-bundle-0" in_ccm="true" crm-debug-origin="do_state_transition" node_fenced="0">
+ <lrm id="base-bundle-0">
+ <lrm_resources>
+ <lrm_resource id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="base_last_0" operation_key="base_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="16:30:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;16:30:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="8" rc-code="0" op-status="0" interval="0" last-rc-change="1687218075" exec-time="307" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="base_monitor_16000" operation_key="base_monitor_16000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="17:31:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;17:31:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="14" rc-code="0" op-status="0" interval="16000" last-rc-change="1687218076" exec-time="15" queue-time="0" op-digest="e62ad827732488b88ef8722ff6a484b3"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="base-bundle-0">
+ <instance_attributes id="status-base-bundle-0">
+ <nvpair id="status-base-bundle-0-master-base" name="master-base" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state id="3" uname="node3" crmd="online" crm-debug-origin="controld_update_resource_history" in_ccm="true" join="member" expected="member">
+ <transient_attributes id="3">
+ <instance_attributes id="status-3">
+ <nvpair id="status-3-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-3-master-base" name="master-base" value="13"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="3">
+ <lrm_resources>
+ <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="11:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;11:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="8" rc-code="7" op-status="0" interval="0" last-rc-change="1687219634" exec-time="5" queue-time="0" op-digest="6197322ae276dfeb4a212d09787f9738"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-2" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-2_last_0" operation_key="base-bundle-podman-2_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="29:36:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;29:36:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="24" rc-code="0" op-status="0" interval="0" last-rc-change="1687219634" exec-time="936" queue-time="0" op-digest="eb1c148b6d2d7a7013f75d596536409b"/>
+ <lrm_rsc_op id="base-bundle-podman-2_monitor_60000" operation_key="base-bundle-podman-2_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="30:36:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;30:36:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="25" rc-code="0" op-status="0" interval="60000" last-rc-change="1687219635" exec-time="173" queue-time="0" op-digest="8eeca5a30b14f3d9ef7d2ddbd16c2e05"/>
+ </lrm_resource>
+ <lrm_resource id="vip" type="IPaddr2" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="vip_last_0" operation_key="vip_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="51:71:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;51:71:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="27" rc-code="0" op-status="0" interval="0" last-rc-change="1687228393" exec-time="58" queue-time="0" op-digest="b274efb9afd1400d58df73d9925b6823"/>
+ <lrm_rsc_op id="vip_monitor_10000" operation_key="vip_monitor_10000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="52:71:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;52:71:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="28" rc-code="0" op-status="0" interval="10000" last-rc-change="1687228393" exec-time="35" queue-time="0" op-digest="20f7173b4af9ab62392ae5d9e5243580"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-1" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-1_last_0" operation_key="base-bundle-podman-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="14:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;14:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="14" rc-code="7" op-status="0" interval="0" last-rc-change="1687219634" exec-time="107" queue-time="0" op-digest="91ec5f49c5fd88f29405389545fb7d59"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-2" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-2">
+ <lrm_rsc_op id="base-bundle-2_last_0" operation_key="base-bundle-2_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="31:36:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;31:36:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="4" rc-code="0" op-status="0" interval="0" last-rc-change="1687219635" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ <lrm_rsc_op id="base-bundle-2_monitor_30000" operation_key="base-bundle-2_monitor_30000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="25:37:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;25:37:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="5" rc-code="0" op-status="0" interval="30000" last-rc-change="1687219637" exec-time="0" queue-time="0" op-digest="354b9acaa7ea1113d708dc11a1d6bbfa"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-0" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-0_last_0" operation_key="base-bundle-podman-0_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="12:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;12:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="10" rc-code="7" op-status="0" interval="0" last-rc-change="1687219634" exec-time="104" queue-time="0" op-digest="b93578750303776f6bb098395c53338a"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-1" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-1">
+ <lrm_rsc_op id="base-bundle-1_last_0" operation_key="base-bundle-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="15:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;15:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="2" rc-code="7" op-status="0" interval="0" last-rc-change="1687219634" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-0" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-0">
+ <lrm_rsc_op id="base-bundle-0_last_0" operation_key="base-bundle-0_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="13:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;13:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1687219634" exec-time="0" queue-time="0" op-digest="2b7683df7d64ff71ec5fd3675fd12017" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state remote_node="true" id="base-bundle-1" uname="base-bundle-1" in_ccm="true" crm-debug-origin="controld_update_resource_history" node_fenced="0">
+ <lrm id="base-bundle-1">
+ <lrm_resources>
+ <lrm_resource id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="base_last_0" operation_key="base_demote_0" operation="demote" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="35:55:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;35:55:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="86" rc-code="0" op-status="0" interval="0" last-rc-change="1687220328" exec-time="221" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="base_monitor_16000" operation_key="base_monitor_16000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="35:56:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;35:56:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="92" rc-code="0" op-status="0" interval="16000" last-rc-change="1687220329" exec-time="43" queue-time="0" op-digest="e62ad827732488b88ef8722ff6a484b3"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="base-bundle-1">
+ <instance_attributes id="status-base-bundle-1">
+ <nvpair id="status-base-bundle-1-master-base" name="master-base" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state remote_node="true" id="base-bundle-2" uname="base-bundle-2" in_ccm="true" crm-debug-origin="controld_update_resource_history" node_fenced="0">
+ <lrm id="base-bundle-2">
+ <lrm_resources>
+ <lrm_resource id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="base_last_0" operation_key="base_promote_0" operation="promote" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="32:43:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;32:43:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="30" rc-code="0" op-status="0" interval="0" last-rc-change="1687220058" exec-time="222" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="base_monitor_15000" operation_key="base_monitor_15000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="33:44:8:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:8;33:44:8:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="36" rc-code="8" op-status="0" interval="15000" last-rc-change="1687220059" exec-time="13" queue-time="0" op-digest="3ef575c5f050ae086f0f31bc8f085fdc"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="base-bundle-2">
+ <instance_attributes id="status-base-bundle-2">
+ <nvpair id="status-base-bundle-2-master-base" name="master-base" value="10"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ </status>
+</cib>
diff --git a/cts/scheduler/xml/bundle-promoted-anticolocation-3.xml b/cts/scheduler/xml/bundle-promoted-anticolocation-3.xml
new file mode 100644
index 0000000..f954ae1
--- /dev/null
+++ b/cts/scheduler/xml/bundle-promoted-anticolocation-3.xml
@@ -0,0 +1,238 @@
+<cib crm_feature_set="3.17.4" validate-with="pacemaker-3.9" epoch="47" num_updates="0" admin_epoch="0" cib-last-written="Mon Jun 19 19:33:16 2023" update-origin="node1" update-client="cibadmin" update-user="root" have-quorum="1" dc-uuid="1">
+ <configuration>
+ <!-- The essential elements of this test are:
+ * The promoted instance of a bundle (base-bundle) is mandatorily
+ anti-colocated with a primitive (vip)
+ * The bundle's promoted instance is running on the same node as the
+ primitive
+ * There is no stickiness
+ * There are no location constraints
+ * There are three nodes available for both resources
+
+ In this situation, the bundle should demote its currently promoted
+ instance and promote an instance elsewhere.
+ -->
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.1.5-1.0a457786a.git.el9-0a457786a"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="test"/>
+ <nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1687217818"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="node1"/>
+ <node id="2" uname="node2"/>
+ <node id="3" uname="node3"/>
+ </nodes>
+ <resources>
+ <primitive class="stonith" id="Fencing" type="fence_xvm">
+ <instance_attributes id="Fencing-instance_attributes">
+ <nvpair id="Fencing-instance_attributes_pcmk_host_list" name="pcmk_host_list" value="node1 node2 node3"/>
+ </instance_attributes>
+ <operations>
+ <op id="Fencing-monitor-120s" interval="120s" name="monitor" timeout="120s"/>
+ </operations>
+ </primitive>
+ <bundle id="base-bundle">
+ <meta_attributes id="base-bundle-meta_attributes">
+ <nvpair id="base-bundle-meta_attributes-promotable" name="promotable" value="true"/>
+ <nvpair id="base-bundle-meta_attributes-container-attribute-target" name="container-attribute-target" value="host"/>
+ </meta_attributes>
+ <podman image="localhost/pcmktest" replicas="3"/>
+ <network control-port="3121"/>
+ <primitive id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <operations>
+ <op id="base-monitor-15s" interval="15s" name="monitor" timeout="15s" role="Promoted"/>
+ <op id="base-monitor-16s" interval="16s" name="monitor" timeout="16s" role="Unpromoted"/>
+ </operations>
+ </primitive>
+ </bundle>
+ <primitive class="ocf" id="vip" provider="heartbeat" type="IPaddr2">
+ <instance_attributes id="vip-instance_attributes">
+ <nvpair id="vip-instance_attributes-cidr_netmask" name="cidr_netmask" value="32"/>
+ <nvpair id="vip-instance_attributes-ip" name="ip" value="192.168.22.81"/>
+ </instance_attributes>
+ <operations>
+ <op id="vip-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/>
+ <op id="vip-start-interval-0s" interval="0s" name="start" timeout="20s"/>
+ <op id="vip-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
+ </operations>
+ </primitive>
+ </resources>
+ <constraints>
+ <rsc_colocation id="colocation-base-bundle-vip--INFINITY" rsc="base-bundle" rsc-role="Promoted" score="-INFINITY" with-rsc="vip"/>
+ </constraints>
+ <fencing-topology/>
+ <op_defaults/>
+ <alerts/>
+ <rsc_defaults/>
+ </configuration>
+ <status>
+ <node_state id="1" uname="node1" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <transient_attributes id="1">
+ <instance_attributes id="status-1">
+ <nvpair id="status-1-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-1-master-base" name="master-base" value="11"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="1">
+ <lrm_resources>
+ <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_stop_0" operation="stop" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="12:62:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;12:62:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="55" rc-code="0" op-status="0" interval="0" last-rc-change="1687222219" exec-time="0" queue-time="0" op-digest="6197322ae276dfeb4a212d09787f9738"/>
+ <lrm_rsc_op id="Fencing_monitor_120000" operation_key="Fencing_monitor_120000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="2:23:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;2:23:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="38" rc-code="0" op-status="0" interval="120000" last-rc-change="1687217955" exec-time="43" queue-time="0" op-digest="8ca455a603cbe2dc8d68703e63c272f1"/>
+ </lrm_resource>
+ <lrm_resource id="vip" type="IPaddr2" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="vip_last_0" operation_key="vip_stop_0" operation="stop" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="50:71:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;50:71:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="57" rc-code="0" op-status="0" interval="0" last-rc-change="1687228394" exec-time="47" queue-time="0" op-digest="b274efb9afd1400d58df73d9925b6823"/>
+ <lrm_rsc_op id="vip_monitor_10000" operation_key="vip_monitor_10000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="52:60:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;52:60:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="53" rc-code="0" op-status="0" interval="10000" last-rc-change="1687222197" exec-time="38" queue-time="0" op-digest="20f7173b4af9ab62392ae5d9e5243580"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-2" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-2_last_0" operation_key="base-bundle-podman-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="8:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;8:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="49" rc-code="7" op-status="0" interval="0" last-rc-change="1687219402" exec-time="103" queue-time="0" op-digest="eb1c148b6d2d7a7013f75d596536409b"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-1" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-1_last_0" operation_key="base-bundle-podman-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="6:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;6:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="45" rc-code="7" op-status="0" interval="0" last-rc-change="1687219402" exec-time="82" queue-time="0" op-digest="91ec5f49c5fd88f29405389545fb7d59"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-2" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-2">
+ <lrm_rsc_op id="base-bundle-2_last_0" operation_key="base-bundle-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="9:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;9:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="5" rc-code="7" op-status="0" interval="0" last-rc-change="1687219636" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-0" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-0_last_0" operation_key="base-bundle-podman-0_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="7:28:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;7:28:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="40" rc-code="0" op-status="0" interval="0" last-rc-change="1687218074" exec-time="795" queue-time="0" op-digest="b93578750303776f6bb098395c53338a"/>
+ <lrm_rsc_op id="base-bundle-podman-0_monitor_60000" operation_key="base-bundle-podman-0_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="8:29:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;8:29:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="41" rc-code="0" op-status="0" interval="60000" last-rc-change="1687218074" exec-time="166" queue-time="0" op-digest="902512fcf3e4556d9585c44184665d8c"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-1" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-1">
+ <lrm_rsc_op id="base-bundle-1_last_0" operation_key="base-bundle-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="7:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;7:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="4" rc-code="7" op-status="0" interval="0" last-rc-change="1687219403" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-0" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-0">
+ <lrm_rsc_op id="base-bundle-0_last_0" operation_key="base-bundle-0_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="9:29:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;9:29:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="2" rc-code="0" op-status="0" interval="0" last-rc-change="1687218074" exec-time="0" queue-time="0" op-digest="2b7683df7d64ff71ec5fd3675fd12017" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ <lrm_rsc_op id="base-bundle-0_monitor_30000" operation_key="base-bundle-0_monitor_30000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="11:30:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;11:30:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="3" rc-code="0" op-status="0" interval="30000" last-rc-change="1687218075" exec-time="0" queue-time="0" op-digest="6f3c7e233bacb8420fef5f9581190d00"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state id="2" uname="node2" crmd="online" crm-debug-origin="controld_update_resource_history" in_ccm="true" join="member" expected="member">
+ <transient_attributes id="2">
+ <instance_attributes id="status-2">
+ <nvpair id="status-2-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-2-master-base" name="master-base" value="12"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="2">
+ <lrm_resources>
+ <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="12:69:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;12:69:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="58" rc-code="0" op-status="0" interval="0" last-rc-change="1687226745" exec-time="33" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ <lrm_rsc_op id="Fencing_monitor_120000" operation_key="Fencing_monitor_120000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="4:69:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;4:69:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="59" rc-code="0" op-status="0" interval="120000" last-rc-change="1687226745" exec-time="27" queue-time="0" op-digest="24989640311980988fb77ddd1cc1002b"/>
+ </lrm_resource>
+ <lrm_resource id="vip" type="IPaddr2" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="vip_last_0" operation_key="vip_stop_0" operation="stop" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="50:60:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;50:60:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="53" rc-code="0" op-status="0" interval="0" last-rc-change="1687222197" exec-time="47" queue-time="0" op-digest="b274efb9afd1400d58df73d9925b6823"/>
+ <lrm_rsc_op id="vip_monitor_10000" operation_key="vip_monitor_10000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="26:23:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;26:23:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="37" rc-code="0" op-status="0" interval="10000" last-rc-change="1687217955" exec-time="42" queue-time="0" op-digest="20f7173b4af9ab62392ae5d9e5243580"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-2" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-2_last_0" operation_key="base-bundle-podman-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="12:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;12:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="47" rc-code="7" op-status="0" interval="0" last-rc-change="1687219402" exec-time="69" queue-time="1" op-digest="eb1c148b6d2d7a7013f75d596536409b"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-1" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-1_last_0" operation_key="base-bundle-podman-1_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="20:33:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;20:33:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="48" rc-code="0" op-status="0" interval="0" last-rc-change="1687219402" exec-time="665" queue-time="0" op-digest="91ec5f49c5fd88f29405389545fb7d59"/>
+ <lrm_rsc_op id="base-bundle-podman-1_monitor_60000" operation_key="base-bundle-podman-1_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="21:33:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;21:33:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="49" rc-code="0" op-status="0" interval="60000" last-rc-change="1687219403" exec-time="158" queue-time="0" op-digest="f0ef4729d120aa3f5d938cabca4d06c7"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-2" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-2">
+ <lrm_rsc_op id="base-bundle-2_last_0" operation_key="base-bundle-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="10:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;10:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="5" rc-code="7" op-status="0" interval="0" last-rc-change="1687219636" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-0" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-0_last_0" operation_key="base-bundle-podman-0_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="2:27:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;2:27:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="39" rc-code="0" op-status="0" interval="0" last-rc-change="1687217956" exec-time="161" queue-time="0" op-digest="bbac36b73a7a6604aefdd2cb3b5f42e6"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-1" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-1">
+ <lrm_rsc_op id="base-bundle-1_last_0" operation_key="base-bundle-1_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="22:33:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;22:33:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="3" rc-code="0" op-status="0" interval="0" last-rc-change="1687219403" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ <lrm_rsc_op id="base-bundle-1_monitor_30000" operation_key="base-bundle-1_monitor_30000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="20:34:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;20:34:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="4" rc-code="0" op-status="0" interval="30000" last-rc-change="1687219404" exec-time="0" queue-time="0" op-digest="3929eec440004bca31f813a8e6097506"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-0" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-0">
+ <lrm_rsc_op id="base-bundle-0_last_0" operation_key="base-bundle-0_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="3:7:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;3:7:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1687217701" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state remote_node="true" id="base-bundle-0" uname="base-bundle-0" in_ccm="true" crm-debug-origin="do_state_transition" node_fenced="0">
+ <lrm id="base-bundle-0">
+ <lrm_resources>
+ <lrm_resource id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="base_last_0" operation_key="base_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="16:30:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;16:30:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="8" rc-code="0" op-status="0" interval="0" last-rc-change="1687218075" exec-time="307" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="base_monitor_16000" operation_key="base_monitor_16000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="17:31:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;17:31:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="14" rc-code="0" op-status="0" interval="16000" last-rc-change="1687218076" exec-time="15" queue-time="0" op-digest="e62ad827732488b88ef8722ff6a484b3"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="base-bundle-0">
+ <instance_attributes id="status-base-bundle-0">
+ <nvpair id="status-base-bundle-0-master-base" name="master-base" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state id="3" uname="node3" crmd="online" crm-debug-origin="controld_update_resource_history" in_ccm="true" join="member" expected="member">
+ <transient_attributes id="3">
+ <instance_attributes id="status-3">
+ <nvpair id="status-3-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-3-master-base" name="master-base" value="13"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="3">
+ <lrm_resources>
+ <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="11:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;11:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="8" rc-code="7" op-status="0" interval="0" last-rc-change="1687219634" exec-time="5" queue-time="0" op-digest="6197322ae276dfeb4a212d09787f9738"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-2" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-2_last_0" operation_key="base-bundle-podman-2_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="29:36:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;29:36:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="24" rc-code="0" op-status="0" interval="0" last-rc-change="1687219634" exec-time="936" queue-time="0" op-digest="eb1c148b6d2d7a7013f75d596536409b"/>
+ <lrm_rsc_op id="base-bundle-podman-2_monitor_60000" operation_key="base-bundle-podman-2_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="30:36:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;30:36:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="25" rc-code="0" op-status="0" interval="60000" last-rc-change="1687219635" exec-time="173" queue-time="0" op-digest="8eeca5a30b14f3d9ef7d2ddbd16c2e05"/>
+ </lrm_resource>
+ <lrm_resource id="vip" type="IPaddr2" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="vip_last_0" operation_key="vip_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="51:71:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;51:71:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="27" rc-code="0" op-status="0" interval="0" last-rc-change="1687228393" exec-time="58" queue-time="0" op-digest="b274efb9afd1400d58df73d9925b6823"/>
+ <lrm_rsc_op id="vip_monitor_10000" operation_key="vip_monitor_10000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="52:71:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;52:71:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="28" rc-code="0" op-status="0" interval="10000" last-rc-change="1687228393" exec-time="35" queue-time="0" op-digest="20f7173b4af9ab62392ae5d9e5243580"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-1" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-1_last_0" operation_key="base-bundle-podman-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="14:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;14:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="14" rc-code="7" op-status="0" interval="0" last-rc-change="1687219634" exec-time="107" queue-time="0" op-digest="91ec5f49c5fd88f29405389545fb7d59"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-2" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-2">
+ <lrm_rsc_op id="base-bundle-2_last_0" operation_key="base-bundle-2_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="31:36:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;31:36:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="4" rc-code="0" op-status="0" interval="0" last-rc-change="1687219635" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ <lrm_rsc_op id="base-bundle-2_monitor_30000" operation_key="base-bundle-2_monitor_30000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="25:37:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;25:37:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="5" rc-code="0" op-status="0" interval="30000" last-rc-change="1687219637" exec-time="0" queue-time="0" op-digest="354b9acaa7ea1113d708dc11a1d6bbfa"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-0" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-0_last_0" operation_key="base-bundle-podman-0_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="12:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;12:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="10" rc-code="7" op-status="0" interval="0" last-rc-change="1687219634" exec-time="104" queue-time="0" op-digest="b93578750303776f6bb098395c53338a"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-1" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-1">
+ <lrm_rsc_op id="base-bundle-1_last_0" operation_key="base-bundle-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="15:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;15:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="2" rc-code="7" op-status="0" interval="0" last-rc-change="1687219634" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-0" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-0">
+ <lrm_rsc_op id="base-bundle-0_last_0" operation_key="base-bundle-0_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="13:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;13:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1687219634" exec-time="0" queue-time="0" op-digest="2b7683df7d64ff71ec5fd3675fd12017" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state remote_node="true" id="base-bundle-1" uname="base-bundle-1" in_ccm="true" crm-debug-origin="controld_update_resource_history" node_fenced="0">
+ <lrm id="base-bundle-1">
+ <lrm_resources>
+ <lrm_resource id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="base_last_0" operation_key="base_demote_0" operation="demote" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="35:55:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;35:55:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="86" rc-code="0" op-status="0" interval="0" last-rc-change="1687220328" exec-time="221" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="base_monitor_16000" operation_key="base_monitor_16000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="35:56:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;35:56:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="92" rc-code="0" op-status="0" interval="16000" last-rc-change="1687220329" exec-time="43" queue-time="0" op-digest="e62ad827732488b88ef8722ff6a484b3"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="base-bundle-1">
+ <instance_attributes id="status-base-bundle-1">
+ <nvpair id="status-base-bundle-1-master-base" name="master-base" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state remote_node="true" id="base-bundle-2" uname="base-bundle-2" in_ccm="true" crm-debug-origin="controld_update_resource_history" node_fenced="0">
+ <lrm id="base-bundle-2">
+ <lrm_resources>
+ <lrm_resource id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="base_last_0" operation_key="base_promote_0" operation="promote" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="32:43:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;32:43:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="30" rc-code="0" op-status="0" interval="0" last-rc-change="1687220058" exec-time="222" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="base_monitor_15000" operation_key="base_monitor_15000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="33:44:8:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:8;33:44:8:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="36" rc-code="8" op-status="0" interval="15000" last-rc-change="1687220059" exec-time="13" queue-time="0" op-digest="3ef575c5f050ae086f0f31bc8f085fdc"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="base-bundle-2">
+ <instance_attributes id="status-base-bundle-2">
+ <nvpair id="status-base-bundle-2-master-base" name="master-base" value="10"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ </status>
+</cib>
diff --git a/cts/scheduler/xml/bundle-promoted-anticolocation-4.xml b/cts/scheduler/xml/bundle-promoted-anticolocation-4.xml
new file mode 100644
index 0000000..8902190
--- /dev/null
+++ b/cts/scheduler/xml/bundle-promoted-anticolocation-4.xml
@@ -0,0 +1,238 @@
+<cib crm_feature_set="3.17.4" validate-with="pacemaker-3.9" epoch="47" num_updates="0" admin_epoch="0" cib-last-written="Mon Jun 19 19:33:16 2023" update-origin="node1" update-client="cibadmin" update-user="root" have-quorum="1" dc-uuid="1">
+ <configuration>
+ <!-- The essential elements of this test are:
+ * The promoted instance of a bundle (base-bundle) is optionally
+ anti-colocated (score=-5000) with a primitive (vip)
+ * The bundle's promoted instance is running on the same node as the
+ primitive
+ * There is no stickiness
+ * There are no location constraints
+ * There are three nodes available for both resources
+
+ In this situation, the bundle should demote its currently promoted
+ instance and promote an instance elsewhere.
+ -->
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.1.5-1.0a457786a.git.el9-0a457786a"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="test"/>
+ <nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1687217818"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="node1"/>
+ <node id="2" uname="node2"/>
+ <node id="3" uname="node3"/>
+ </nodes>
+ <resources>
+ <primitive class="stonith" id="Fencing" type="fence_xvm">
+ <instance_attributes id="Fencing-instance_attributes">
+ <nvpair id="Fencing-instance_attributes_pcmk_host_list" name="pcmk_host_list" value="node1 node2 node3"/>
+ </instance_attributes>
+ <operations>
+ <op id="Fencing-monitor-120s" interval="120s" name="monitor" timeout="120s"/>
+ </operations>
+ </primitive>
+ <bundle id="base-bundle">
+ <meta_attributes id="base-bundle-meta_attributes">
+ <nvpair id="base-bundle-meta_attributes-promotable" name="promotable" value="true"/>
+ <nvpair id="base-bundle-meta_attributes-container-attribute-target" name="container-attribute-target" value="host"/>
+ </meta_attributes>
+ <podman image="localhost/pcmktest" replicas="3"/>
+ <network control-port="3121"/>
+ <primitive id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <operations>
+ <op id="base-monitor-15s" interval="15s" name="monitor" timeout="15s" role="Promoted"/>
+ <op id="base-monitor-16s" interval="16s" name="monitor" timeout="16s" role="Unpromoted"/>
+ </operations>
+ </primitive>
+ </bundle>
+ <primitive class="ocf" id="vip" provider="heartbeat" type="IPaddr2">
+ <instance_attributes id="vip-instance_attributes">
+ <nvpair id="vip-instance_attributes-cidr_netmask" name="cidr_netmask" value="32"/>
+ <nvpair id="vip-instance_attributes-ip" name="ip" value="192.168.22.81"/>
+ </instance_attributes>
+ <operations>
+ <op id="vip-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/>
+ <op id="vip-start-interval-0s" interval="0s" name="start" timeout="20s"/>
+ <op id="vip-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
+ </operations>
+ </primitive>
+ </resources>
+ <constraints>
+ <rsc_colocation id="colocation-base-bundle-vip--5000" rsc="base-bundle" rsc-role="Promoted" score="-5000" with-rsc="vip"/>
+ </constraints>
+ <fencing-topology/>
+ <op_defaults/>
+ <alerts/>
+ <rsc_defaults/>
+ </configuration>
+ <status>
+ <node_state id="1" uname="node1" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <transient_attributes id="1">
+ <instance_attributes id="status-1">
+ <nvpair id="status-1-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-1-master-base" name="master-base" value="11"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="1">
+ <lrm_resources>
+ <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_stop_0" operation="stop" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="12:62:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;12:62:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="55" rc-code="0" op-status="0" interval="0" last-rc-change="1687222219" exec-time="0" queue-time="0" op-digest="6197322ae276dfeb4a212d09787f9738"/>
+ <lrm_rsc_op id="Fencing_monitor_120000" operation_key="Fencing_monitor_120000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="2:23:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;2:23:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="38" rc-code="0" op-status="0" interval="120000" last-rc-change="1687217955" exec-time="43" queue-time="0" op-digest="8ca455a603cbe2dc8d68703e63c272f1"/>
+ </lrm_resource>
+ <lrm_resource id="vip" type="IPaddr2" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="vip_last_0" operation_key="vip_stop_0" operation="stop" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="50:71:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;50:71:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="57" rc-code="0" op-status="0" interval="0" last-rc-change="1687228394" exec-time="47" queue-time="0" op-digest="b274efb9afd1400d58df73d9925b6823"/>
+ <lrm_rsc_op id="vip_monitor_10000" operation_key="vip_monitor_10000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="52:60:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;52:60:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="53" rc-code="0" op-status="0" interval="10000" last-rc-change="1687222197" exec-time="38" queue-time="0" op-digest="20f7173b4af9ab62392ae5d9e5243580"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-2" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-2_last_0" operation_key="base-bundle-podman-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="8:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;8:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="49" rc-code="7" op-status="0" interval="0" last-rc-change="1687219402" exec-time="103" queue-time="0" op-digest="eb1c148b6d2d7a7013f75d596536409b"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-1" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-1_last_0" operation_key="base-bundle-podman-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="6:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;6:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="45" rc-code="7" op-status="0" interval="0" last-rc-change="1687219402" exec-time="82" queue-time="0" op-digest="91ec5f49c5fd88f29405389545fb7d59"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-2" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-2">
+ <lrm_rsc_op id="base-bundle-2_last_0" operation_key="base-bundle-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="9:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;9:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="5" rc-code="7" op-status="0" interval="0" last-rc-change="1687219636" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-0" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-0_last_0" operation_key="base-bundle-podman-0_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="7:28:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;7:28:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="40" rc-code="0" op-status="0" interval="0" last-rc-change="1687218074" exec-time="795" queue-time="0" op-digest="b93578750303776f6bb098395c53338a"/>
+ <lrm_rsc_op id="base-bundle-podman-0_monitor_60000" operation_key="base-bundle-podman-0_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="8:29:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;8:29:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="41" rc-code="0" op-status="0" interval="60000" last-rc-change="1687218074" exec-time="166" queue-time="0" op-digest="902512fcf3e4556d9585c44184665d8c"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-1" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-1">
+ <lrm_rsc_op id="base-bundle-1_last_0" operation_key="base-bundle-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="7:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;7:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="4" rc-code="7" op-status="0" interval="0" last-rc-change="1687219403" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-0" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-0">
+ <lrm_rsc_op id="base-bundle-0_last_0" operation_key="base-bundle-0_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="9:29:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;9:29:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="2" rc-code="0" op-status="0" interval="0" last-rc-change="1687218074" exec-time="0" queue-time="0" op-digest="2b7683df7d64ff71ec5fd3675fd12017" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ <lrm_rsc_op id="base-bundle-0_monitor_30000" operation_key="base-bundle-0_monitor_30000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="11:30:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;11:30:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="3" rc-code="0" op-status="0" interval="30000" last-rc-change="1687218075" exec-time="0" queue-time="0" op-digest="6f3c7e233bacb8420fef5f9581190d00"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state id="2" uname="node2" crmd="online" crm-debug-origin="controld_update_resource_history" in_ccm="true" join="member" expected="member">
+ <transient_attributes id="2">
+ <instance_attributes id="status-2">
+ <nvpair id="status-2-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-2-master-base" name="master-base" value="12"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="2">
+ <lrm_resources>
+ <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="12:69:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;12:69:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="58" rc-code="0" op-status="0" interval="0" last-rc-change="1687226745" exec-time="33" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ <lrm_rsc_op id="Fencing_monitor_120000" operation_key="Fencing_monitor_120000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="4:69:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;4:69:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="59" rc-code="0" op-status="0" interval="120000" last-rc-change="1687226745" exec-time="27" queue-time="0" op-digest="24989640311980988fb77ddd1cc1002b"/>
+ </lrm_resource>
+ <lrm_resource id="vip" type="IPaddr2" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="vip_last_0" operation_key="vip_stop_0" operation="stop" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="50:60:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;50:60:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="53" rc-code="0" op-status="0" interval="0" last-rc-change="1687222197" exec-time="47" queue-time="0" op-digest="b274efb9afd1400d58df73d9925b6823"/>
+ <lrm_rsc_op id="vip_monitor_10000" operation_key="vip_monitor_10000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="26:23:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;26:23:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="37" rc-code="0" op-status="0" interval="10000" last-rc-change="1687217955" exec-time="42" queue-time="0" op-digest="20f7173b4af9ab62392ae5d9e5243580"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-2" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-2_last_0" operation_key="base-bundle-podman-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="12:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;12:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="47" rc-code="7" op-status="0" interval="0" last-rc-change="1687219402" exec-time="69" queue-time="1" op-digest="eb1c148b6d2d7a7013f75d596536409b"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-1" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-1_last_0" operation_key="base-bundle-podman-1_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="20:33:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;20:33:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="48" rc-code="0" op-status="0" interval="0" last-rc-change="1687219402" exec-time="665" queue-time="0" op-digest="91ec5f49c5fd88f29405389545fb7d59"/>
+ <lrm_rsc_op id="base-bundle-podman-1_monitor_60000" operation_key="base-bundle-podman-1_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="21:33:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;21:33:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="49" rc-code="0" op-status="0" interval="60000" last-rc-change="1687219403" exec-time="158" queue-time="0" op-digest="f0ef4729d120aa3f5d938cabca4d06c7"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-2" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-2">
+ <lrm_rsc_op id="base-bundle-2_last_0" operation_key="base-bundle-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="10:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;10:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="5" rc-code="7" op-status="0" interval="0" last-rc-change="1687219636" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-0" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-0_last_0" operation_key="base-bundle-podman-0_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="2:27:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;2:27:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="39" rc-code="0" op-status="0" interval="0" last-rc-change="1687217956" exec-time="161" queue-time="0" op-digest="bbac36b73a7a6604aefdd2cb3b5f42e6"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-1" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-1">
+ <lrm_rsc_op id="base-bundle-1_last_0" operation_key="base-bundle-1_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="22:33:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;22:33:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="3" rc-code="0" op-status="0" interval="0" last-rc-change="1687219403" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ <lrm_rsc_op id="base-bundle-1_monitor_30000" operation_key="base-bundle-1_monitor_30000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="20:34:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;20:34:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="4" rc-code="0" op-status="0" interval="30000" last-rc-change="1687219404" exec-time="0" queue-time="0" op-digest="3929eec440004bca31f813a8e6097506"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-0" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-0">
+ <lrm_rsc_op id="base-bundle-0_last_0" operation_key="base-bundle-0_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="3:7:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;3:7:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1687217701" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state remote_node="true" id="base-bundle-0" uname="base-bundle-0" in_ccm="true" crm-debug-origin="do_state_transition" node_fenced="0">
+ <lrm id="base-bundle-0">
+ <lrm_resources>
+ <lrm_resource id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="base_last_0" operation_key="base_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="16:30:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;16:30:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="8" rc-code="0" op-status="0" interval="0" last-rc-change="1687218075" exec-time="307" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="base_monitor_16000" operation_key="base_monitor_16000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="17:31:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;17:31:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="14" rc-code="0" op-status="0" interval="16000" last-rc-change="1687218076" exec-time="15" queue-time="0" op-digest="e62ad827732488b88ef8722ff6a484b3"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="base-bundle-0">
+ <instance_attributes id="status-base-bundle-0">
+ <nvpair id="status-base-bundle-0-master-base" name="master-base" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state id="3" uname="node3" crmd="online" crm-debug-origin="controld_update_resource_history" in_ccm="true" join="member" expected="member">
+ <transient_attributes id="3">
+ <instance_attributes id="status-3">
+ <nvpair id="status-3-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-3-master-base" name="master-base" value="13"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="3">
+ <lrm_resources>
+ <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="11:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;11:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="8" rc-code="7" op-status="0" interval="0" last-rc-change="1687219634" exec-time="5" queue-time="0" op-digest="6197322ae276dfeb4a212d09787f9738"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-2" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-2_last_0" operation_key="base-bundle-podman-2_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="29:36:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;29:36:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="24" rc-code="0" op-status="0" interval="0" last-rc-change="1687219634" exec-time="936" queue-time="0" op-digest="eb1c148b6d2d7a7013f75d596536409b"/>
+ <lrm_rsc_op id="base-bundle-podman-2_monitor_60000" operation_key="base-bundle-podman-2_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="30:36:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;30:36:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="25" rc-code="0" op-status="0" interval="60000" last-rc-change="1687219635" exec-time="173" queue-time="0" op-digest="8eeca5a30b14f3d9ef7d2ddbd16c2e05"/>
+ </lrm_resource>
+ <lrm_resource id="vip" type="IPaddr2" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="vip_last_0" operation_key="vip_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="51:71:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;51:71:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="27" rc-code="0" op-status="0" interval="0" last-rc-change="1687228393" exec-time="58" queue-time="0" op-digest="b274efb9afd1400d58df73d9925b6823"/>
+ <lrm_rsc_op id="vip_monitor_10000" operation_key="vip_monitor_10000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="52:71:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;52:71:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="28" rc-code="0" op-status="0" interval="10000" last-rc-change="1687228393" exec-time="35" queue-time="0" op-digest="20f7173b4af9ab62392ae5d9e5243580"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-1" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-1_last_0" operation_key="base-bundle-podman-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="14:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;14:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="14" rc-code="7" op-status="0" interval="0" last-rc-change="1687219634" exec-time="107" queue-time="0" op-digest="91ec5f49c5fd88f29405389545fb7d59"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-2" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-2">
+ <lrm_rsc_op id="base-bundle-2_last_0" operation_key="base-bundle-2_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="31:36:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;31:36:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="4" rc-code="0" op-status="0" interval="0" last-rc-change="1687219635" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ <lrm_rsc_op id="base-bundle-2_monitor_30000" operation_key="base-bundle-2_monitor_30000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="25:37:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;25:37:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="5" rc-code="0" op-status="0" interval="30000" last-rc-change="1687219637" exec-time="0" queue-time="0" op-digest="354b9acaa7ea1113d708dc11a1d6bbfa"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-0" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-0_last_0" operation_key="base-bundle-podman-0_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="12:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;12:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="10" rc-code="7" op-status="0" interval="0" last-rc-change="1687219634" exec-time="104" queue-time="0" op-digest="b93578750303776f6bb098395c53338a"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-1" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-1">
+ <lrm_rsc_op id="base-bundle-1_last_0" operation_key="base-bundle-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="15:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;15:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="2" rc-code="7" op-status="0" interval="0" last-rc-change="1687219634" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-0" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-0">
+ <lrm_rsc_op id="base-bundle-0_last_0" operation_key="base-bundle-0_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="13:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;13:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1687219634" exec-time="0" queue-time="0" op-digest="2b7683df7d64ff71ec5fd3675fd12017" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state remote_node="true" id="base-bundle-1" uname="base-bundle-1" in_ccm="true" crm-debug-origin="controld_update_resource_history" node_fenced="0">
+ <lrm id="base-bundle-1">
+ <lrm_resources>
+ <lrm_resource id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="base_last_0" operation_key="base_demote_0" operation="demote" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="35:55:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;35:55:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="86" rc-code="0" op-status="0" interval="0" last-rc-change="1687220328" exec-time="221" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="base_monitor_16000" operation_key="base_monitor_16000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="35:56:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;35:56:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="92" rc-code="0" op-status="0" interval="16000" last-rc-change="1687220329" exec-time="43" queue-time="0" op-digest="e62ad827732488b88ef8722ff6a484b3"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="base-bundle-1">
+ <instance_attributes id="status-base-bundle-1">
+ <nvpair id="status-base-bundle-1-master-base" name="master-base" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state remote_node="true" id="base-bundle-2" uname="base-bundle-2" in_ccm="true" crm-debug-origin="controld_update_resource_history" node_fenced="0">
+ <lrm id="base-bundle-2">
+ <lrm_resources>
+ <lrm_resource id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="base_last_0" operation_key="base_promote_0" operation="promote" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="32:43:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;32:43:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="30" rc-code="0" op-status="0" interval="0" last-rc-change="1687220058" exec-time="222" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="base_monitor_15000" operation_key="base_monitor_15000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="33:44:8:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:8;33:44:8:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="36" rc-code="8" op-status="0" interval="15000" last-rc-change="1687220059" exec-time="13" queue-time="0" op-digest="3ef575c5f050ae086f0f31bc8f085fdc"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="base-bundle-2">
+ <instance_attributes id="status-base-bundle-2">
+ <nvpair id="status-base-bundle-2-master-base" name="master-base" value="10"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ </status>
+</cib>
diff --git a/cts/scheduler/xml/bundle-promoted-anticolocation-5.xml b/cts/scheduler/xml/bundle-promoted-anticolocation-5.xml
new file mode 100644
index 0000000..b960ac5
--- /dev/null
+++ b/cts/scheduler/xml/bundle-promoted-anticolocation-5.xml
@@ -0,0 +1,368 @@
+<cib crm_feature_set="3.17.4" validate-with="pacemaker-3.9" epoch="61" num_updates="0" admin_epoch="0" cib-last-written="Tue Jun 20 13:17:35 2023" update-origin="node1" update-client="cibadmin" update-user="root" have-quorum="1" dc-uuid="1">
+ <configuration>
+ <!-- The essential elements of this test are:
+ * The promoted instance of a bundle (bundle-a) is mandatorily
+ anti-colocated with the promoted instance of another bundle
+ (bundle-b)
+ * bundle-a's promoted instance is running on the same node as
+ bundle-b's promoted instance
+ * There is no stickiness
+ * There are no location constraints
+ * There are three nodes available for both resources
+
+ In this situation, bundle-a should demote its currently promoted
+ instance and promote an instance elsewhere.
+ -->
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.1.5-1.0a457786a.git.el9-0a457786a"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="test"/>
+ <nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1687288330"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="node1"/>
+ <node id="2" uname="node2"/>
+ <node id="3" uname="node3"/>
+ </nodes>
+ <resources>
+ <primitive class="stonith" id="Fencing" type="fence_xvm">
+ <instance_attributes id="Fencing-instance_attributes">
+ <nvpair id="Fencing-instance_attributes_pcmk_host_list" name="pcmk_host_list" value="node1 node2 node3"/>
+ </instance_attributes>
+ <operations>
+ <op id="Fencing-monitor-120s" interval="120s" name="monitor" timeout="120s"/>
+ </operations>
+ </primitive>
+ <bundle id="bundle-a">
+ <meta_attributes id="bundle-a-meta_attributes">
+ <nvpair id="bundle-a-meta_attributes-promotable" name="promotable" value="true"/>
+ <nvpair id="bundle-a-meta_attributes-container-attribute-target" name="container-attribute-target" value="host"/>
+ </meta_attributes>
+ <podman image="localhost/pcmktest" replicas="3"/>
+ <network control-port="3121"/>
+ <primitive id="bundle-a-rsc" class="ocf" provider="pacemaker" type="Stateful">
+ <operations>
+ <op id="bundle-a-rsc-monitor-15s" interval="15s" name="monitor" timeout="15s" role="Promoted"/>
+ <op id="bundle-a-rsc-monitor-16s" interval="16s" name="monitor" timeout="16s" role="Unpromoted"/>
+ </operations>
+ </primitive>
+ </bundle>
+ <bundle id="bundle-b">
+ <meta_attributes id="bundle-b-meta_attributes">
+ <nvpair id="bundle-b-meta_attributes-interleave" name="interleave" value="true"/>
+ <nvpair id="bundle-b-meta_attributes-promotable" name="promotable" value="true"/>
+ <nvpair id="bundle-b-meta_attributes-container-attribute-target" name="container-attribute-target" value="host"/>
+ </meta_attributes>
+ <podman image="localhost/pcmktest" replicas="3"/>
+ <network control-port="3122"/>
+ <primitive id="bundle-b-rsc" class="ocf" provider="pacemaker" type="Stateful">
+ <operations>
+ <op id="bundle-b-rsc-monitor-15s" interval="15s" name="monitor" timeout="15s" role="Promoted"/>
+ <op id="bundle-b-rsc-monitor-16s" interval="16s" name="monitor" timeout="16s" role="Unpromoted"/>
+ </operations>
+ </primitive>
+ </bundle>
+ </resources>
+ <constraints>
+ <rsc_colocation id="colocation-bundle-a-bundle-b--INFINITY" rsc="bundle-a" rsc-role="Promoted" score="-INFINITY" with-rsc="bundle-b" with-rsc-role="Promoted"/>
+ </constraints>
+ <fencing-topology/>
+ <op_defaults/>
+ <alerts/>
+ <rsc_defaults/>
+ </configuration>
+ <status>
+ <node_state id="1" uname="node1" in_ccm="true" crmd="online" crm-debug-origin="do_state_transition" join="member" expected="member">
+ <transient_attributes id="1">
+ <instance_attributes id="status-1">
+ <nvpair id="status-1-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-1-promotion-bundle-a-rsc" name="master-bundle-a-rsc" value="11"/>
+ <nvpair id="status-1-promotion-bundle-b-rsc" name="master-bundle-b-rsc" value="12"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="1">
+ <lrm_resources>
+ <lrm_resource id="bundle-b-podman-2" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-b-podman-2_last_0" operation_key="bundle-b-podman-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="4:154:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;4:154:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="132" rc-code="7" op-status="0" interval="0" last-rc-change="1687288263" exec-time="237" queue-time="0" op-digest="4e4b17530bea4f6fd27df1cf68bd9f22"/>
+ </lrm_resource>
+ <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="12:62:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;12:62:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="55" rc-code="0" op-status="0" interval="0" last-rc-change="1687222219" exec-time="0" queue-time="0" op-digest="6197322ae276dfeb4a212d09787f9738"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-0" type="remote" class="ocf" provider="pacemaker" container="bundle-a-podman-0">
+ <lrm_rsc_op id="bundle-a-0_last_0" operation_key="bundle-a-0_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="13:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;13:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="19" rc-code="0" op-status="0" interval="0" last-rc-change="1687288332" exec-time="0" queue-time="0" op-digest="2b7683df7d64ff71ec5fd3675fd12017" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ <lrm_rsc_op id="bundle-a-0_monitor_30000" operation_key="bundle-a-0_monitor_30000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="22:162:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;22:162:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="20" rc-code="0" op-status="0" interval="30000" last-rc-change="1687288333" exec-time="0" queue-time="0" op-digest="6f3c7e233bacb8420fef5f9581190d00"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-podman-1" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-a-podman-1_last_0" operation_key="bundle-a-podman-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="3:153:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;3:153:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="117" rc-code="7" op-status="0" interval="0" last-rc-change="1687288204" exec-time="106" queue-time="0" op-digest="90f1390dea2a7bca12099b080987eae7"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-b-podman-0" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-b-podman-0_last_0" operation_key="bundle-b-podman-0_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="45:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;45:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="144" rc-code="0" op-status="0" interval="0" last-rc-change="1687288330" exec-time="1158" queue-time="0" op-digest="94098b91d583b0b8498741508c609a37"/>
+ <lrm_rsc_op id="bundle-b-podman-0_monitor_60000" operation_key="bundle-b-podman-0_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="46:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;46:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="146" rc-code="0" op-status="0" interval="60000" last-rc-change="1687288332" exec-time="182" queue-time="0" op-digest="b7a9f966374c886087fb6f03d6e4c286"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-b-0" type="remote" class="ocf" provider="pacemaker" container="bundle-b-podman-0">
+ <lrm_rsc_op id="bundle-b-0_last_0" operation_key="bundle-b-0_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="47:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;47:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="18" rc-code="0" op-status="0" interval="0" last-rc-change="1687288332" exec-time="0" queue-time="0" op-digest="06b6a770601e99a2d691d4cf853acceb" op-force-restart=" port server " op-restart-digest="5b00a5a6fae3dc7e25288679ad0c92ce"/>
+ <lrm_rsc_op id="bundle-b-0_monitor_30000" operation_key="bundle-b-0_monitor_30000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="57:162:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;57:162:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="21" rc-code="0" op-status="0" interval="30000" last-rc-change="1687288333" exec-time="0" queue-time="0" op-digest="b4ca332a542717c28b117ec80e27b838"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-1" type="remote" class="ocf" provider="pacemaker" container="bundle-a-podman-1">
+ <lrm_rsc_op id="bundle-a-1_last_0" operation_key="bundle-a-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="8:103:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;8:103:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="10" rc-code="7" op-status="0" interval="0" last-rc-change="1687248552" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-podman-2" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-a-podman-2_last_0" operation_key="bundle-a-podman-2_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="17:105:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;17:105:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="107" rc-code="0" op-status="0" interval="0" last-rc-change="1687248553" exec-time="10540" queue-time="0" op-digest="a5415a2b8dfba2a5741d70bab5591c1b"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-b-1" type="remote" class="ocf" provider="pacemaker" container="bundle-b-podman-1">
+ <lrm_rsc_op id="bundle-b-1_last_0" operation_key="bundle-b-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="28:99:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;28:99:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="8" rc-code="7" op-status="0" interval="0" last-rc-change="1687248540" exec-time="0" queue-time="0" op-digest="2b7683df7d64ff71ec5fd3675fd12017" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-b-podman-1" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-b-podman-1_last_0" operation_key="bundle-b-podman-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="3:154:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;3:154:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="128" rc-code="7" op-status="0" interval="0" last-rc-change="1687288263" exec-time="240" queue-time="0" op-digest="e1aa4eab4c9e9334ee91c66cce449b13"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-podman-0" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-a-podman-0_last_0" operation_key="bundle-a-podman-0_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="11:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;11:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="145" rc-code="0" op-status="0" interval="0" last-rc-change="1687288331" exec-time="1143" queue-time="0" op-digest="bc99a652606652014a6cb5f743b5d75d"/>
+ <lrm_rsc_op id="bundle-a-podman-0_monitor_60000" operation_key="bundle-a-podman-0_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="12:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;12:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="147" rc-code="0" op-status="0" interval="60000" last-rc-change="1687288332" exec-time="161" queue-time="0" op-digest="d39778305ffa599cc7f7a94b47d18783"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-b-2" type="remote" class="ocf" provider="pacemaker" container="bundle-b-podman-2">
+ <lrm_rsc_op id="bundle-b-2_last_0" operation_key="bundle-b-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="37:97:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;37:97:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="6" rc-code="7" op-status="0" interval="0" last-rc-change="1687248528" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-2" type="remote" class="ocf" provider="pacemaker" container="bundle-a-podman-2">
+ <lrm_rsc_op id="bundle-a-2_last_0" operation_key="bundle-a-2_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="18:105:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;18:105:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="14" rc-code="0" op-status="0" interval="0" last-rc-change="1687248553" exec-time="0" queue-time="0" op-digest="2b7683df7d64ff71ec5fd3675fd12017"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state id="2" uname="node2" crmd="online" crm-debug-origin="do_state_transition" in_ccm="true" join="member" expected="member">
+ <transient_attributes id="2">
+ <instance_attributes id="status-2">
+ <nvpair id="status-2-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-2-promotion-bundle-a-rsc" name="master-bundle-a-rsc" value="12"/>
+ <nvpair id="status-2-promotion-bundle-b-rsc" name="master-bundle-b-rsc" value="13"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="2">
+ <lrm_resources>
+ <lrm_resource id="bundle-b-podman-2" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-b-podman-2_last_0" operation_key="bundle-b-podman-2_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="53:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;53:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="141" rc-code="0" op-status="0" interval="0" last-rc-change="1687288330" exec-time="752" queue-time="0" op-digest="35320d55914b88e0c9d4d13b574cef8a"/>
+ <lrm_rsc_op id="bundle-b-podman-2_monitor_60000" operation_key="bundle-b-podman-2_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="54:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;54:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="142" rc-code="0" op-status="0" interval="60000" last-rc-change="1687288331" exec-time="120" queue-time="0" op-digest="62d15df41b3f596ca94b0dbe7edda857"/>
+ </lrm_resource>
+ <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="12:69:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;12:69:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="58" rc-code="0" op-status="0" interval="0" last-rc-change="1687226745" exec-time="33" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ <lrm_rsc_op id="Fencing_monitor_120000" operation_key="Fencing_monitor_120000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="4:69:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;4:69:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="59" rc-code="0" op-status="0" interval="120000" last-rc-change="1687226745" exec-time="27" queue-time="0" op-digest="24989640311980988fb77ddd1cc1002b"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-0" type="remote" class="ocf" provider="pacemaker" container="bundle-a-podman-0">
+ <lrm_rsc_op id="bundle-a-0_last_0" operation_key="bundle-a-0_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="11:101:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;11:101:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="9" rc-code="7" op-status="0" interval="0" last-rc-change="1687248551" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-podman-1" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-a-podman-1_last_0" operation_key="bundle-a-podman-1_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="13:105:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;13:105:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="109" rc-code="0" op-status="0" interval="0" last-rc-change="1687248553" exec-time="10646" queue-time="0" op-digest="90f1390dea2a7bca12099b080987eae7"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-b-podman-0" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-b-podman-0_last_0" operation_key="bundle-b-podman-0_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="5:154:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;5:154:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="126" rc-code="7" op-status="0" interval="0" last-rc-change="1687288263" exec-time="124" queue-time="0" op-digest="cad283aeb094013845bac465ab6a198d"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-b-0" type="remote" class="ocf" provider="pacemaker" container="bundle-b-podman-0">
+ <lrm_rsc_op id="bundle-b-0_last_0" operation_key="bundle-b-0_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="13:103:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;13:103:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="12" rc-code="7" op-status="0" interval="0" last-rc-change="1687248552" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-1" type="remote" class="ocf" provider="pacemaker" container="bundle-a-podman-1">
+ <lrm_rsc_op id="bundle-a-1_last_0" operation_key="bundle-a-1_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="14:105:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;14:105:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="14" rc-code="0" op-status="0" interval="0" last-rc-change="1687248553" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-podman-2" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-a-podman-2_last_0" operation_key="bundle-a-podman-2_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="21:154:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;21:154:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="135" rc-code="0" op-status="0" interval="0" last-rc-change="1687288263" exec-time="1059" queue-time="0" op-digest="a5415a2b8dfba2a5741d70bab5591c1b"/>
+ <lrm_rsc_op id="bundle-a-podman-2_monitor_60000" operation_key="bundle-a-podman-2_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="19:155:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;19:155:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="137" rc-code="0" op-status="0" interval="60000" last-rc-change="1687288265" exec-time="284" queue-time="0" op-digest="77326e7c2187a5c4fe386812dfcd4118"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-b-1" type="remote" class="ocf" provider="pacemaker" container="bundle-b-podman-1">
+ <lrm_rsc_op id="bundle-b-1_last_0" operation_key="bundle-b-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="33:99:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;33:99:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="8" rc-code="7" op-status="0" interval="0" last-rc-change="1687248540" exec-time="0" queue-time="0" op-digest="2b7683df7d64ff71ec5fd3675fd12017" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-b-podman-1" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-b-podman-1_last_0" operation_key="bundle-b-podman-1_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="4:156:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;4:156:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="140" rc-code="0" op-status="0" interval="0" last-rc-change="1687288267" exec-time="216" queue-time="0" op-digest="e1aa4eab4c9e9334ee91c66cce449b13"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-podman-0" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-a-podman-0_last_0" operation_key="bundle-a-podman-0_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="4:153:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;4:153:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="115" rc-code="7" op-status="0" interval="0" last-rc-change="1687288204" exec-time="101" queue-time="0" op-digest="bc99a652606652014a6cb5f743b5d75d"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-b-2" type="remote" class="ocf" provider="pacemaker" container="bundle-b-podman-2">
+ <lrm_rsc_op id="bundle-b-2_last_0" operation_key="bundle-b-2_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="55:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;55:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="17" rc-code="0" op-status="0" interval="0" last-rc-change="1687288331" exec-time="0" queue-time="0" op-digest="077939dc4a5dbbd799045f9120d057b4" op-force-restart=" port server " op-restart-digest="5b00a5a6fae3dc7e25288679ad0c92ce"/>
+ <lrm_rsc_op id="bundle-b-2_monitor_30000" operation_key="bundle-b-2_monitor_30000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="67:162:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;67:162:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="18" rc-code="0" op-status="0" interval="30000" last-rc-change="1687288333" exec-time="0" queue-time="0" op-digest="f9f13135e91dabbc6c77a3fd4b23ab80"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-2" type="remote" class="ocf" provider="pacemaker" container="bundle-a-podman-2">
+ <lrm_rsc_op id="bundle-a-2_last_0" operation_key="bundle-a-2_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="20:155:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;20:155:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="15" rc-code="0" op-status="0" interval="0" last-rc-change="1687288265" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ <lrm_rsc_op id="bundle-a-2_monitor_30000" operation_key="bundle-a-2_monitor_30000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="23:156:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;23:156:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="16" rc-code="0" op-status="0" interval="30000" last-rc-change="1687288267" exec-time="0" queue-time="0" op-digest="3929eec440004bca31f813a8e6097506"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state remote_node="true" id="base-bundle-0" uname="base-bundle-0" in_ccm="false" crm-debug-origin="do_state_transition" node_fenced="0">
+ <lrm id="base-bundle-0">
+ <lrm_resources>
+ <lrm_resource id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="base_last_0" operation_key="base_stop_0" operation="stop" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="124:97:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;124:97:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="1035" rc-code="0" op-status="0" interval="0" last-rc-change="1687248529" exec-time="180" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="base_monitor_16000" operation_key="base_monitor_16000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="17:31:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;17:31:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="14" rc-code="0" op-status="0" interval="16000" last-rc-change="1687218076" exec-time="15" queue-time="0" op-digest="e62ad827732488b88ef8722ff6a484b3"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state id="3" uname="node3" crmd="online" crm-debug-origin="do_state_transition" in_ccm="true" join="member" expected="member">
+ <transient_attributes id="3">
+ <instance_attributes id="status-3">
+ <nvpair id="status-3-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-3-promotion-bundle-a-rsc" name="master-bundle-a-rsc" value="13"/>
+ <nvpair id="status-3-promotion-bundle-b-rsc" name="master-bundle-b-rsc" value="14"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="3">
+ <lrm_resources>
+ <lrm_resource id="bundle-b-podman-2" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-b-podman-2_last_0" operation_key="bundle-b-podman-2_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="6:156:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;6:156:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="111" rc-code="0" op-status="0" interval="0" last-rc-change="1687288267" exec-time="243" queue-time="0" op-digest="4e4b17530bea4f6fd27df1cf68bd9f22"/>
+ </lrm_resource>
+ <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="11:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;11:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="8" rc-code="7" op-status="0" interval="0" last-rc-change="1687219634" exec-time="5" queue-time="0" op-digest="6197322ae276dfeb4a212d09787f9738"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-0" type="remote" class="ocf" provider="pacemaker" container="bundle-a-podman-0">
+ <lrm_rsc_op id="bundle-a-0_last_0" operation_key="bundle-a-0_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="10:105:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;10:105:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="14" rc-code="0" op-status="0" interval="0" last-rc-change="1687248552" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-podman-1" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-a-podman-1_last_0" operation_key="bundle-a-podman-1_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="17:154:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;17:154:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="106" rc-code="0" op-status="0" interval="0" last-rc-change="1687288263" exec-time="1002" queue-time="0" op-digest="90f1390dea2a7bca12099b080987eae7"/>
+ <lrm_rsc_op id="bundle-a-podman-1_monitor_60000" operation_key="bundle-a-podman-1_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="14:155:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;14:155:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="108" rc-code="0" op-status="0" interval="60000" last-rc-change="1687288264" exec-time="156" queue-time="0" op-digest="34c506d61f8ef4e0d77193cce6e838ce"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-b-podman-0" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-b-podman-0_last_0" operation_key="bundle-b-podman-0_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="8:154:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;8:154:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="97" rc-code="7" op-status="0" interval="0" last-rc-change="1687288263" exec-time="119" queue-time="0" op-digest="cad283aeb094013845bac465ab6a198d"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-b-0" type="remote" class="ocf" provider="pacemaker" container="bundle-b-podman-0">
+ <lrm_rsc_op id="bundle-b-0_last_0" operation_key="bundle-b-0_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="16:103:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;16:103:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="12" rc-code="7" op-status="0" interval="0" last-rc-change="1687248551" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-podman-2" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-a-podman-2_last_0" operation_key="bundle-a-podman-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="7:153:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;7:153:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="90" rc-code="7" op-status="0" interval="0" last-rc-change="1687288203" exec-time="78" queue-time="0" op-digest="a5415a2b8dfba2a5741d70bab5591c1b"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-b-1" type="remote" class="ocf" provider="pacemaker" container="bundle-b-podman-1">
+ <lrm_rsc_op id="bundle-b-1_last_0" operation_key="bundle-b-1_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="51:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;51:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="17" rc-code="0" op-status="0" interval="0" last-rc-change="1687288331" exec-time="0" queue-time="0" op-digest="aeee9bdab74d90d67c45ff8a2e53b020" op-force-restart=" port server " op-restart-digest="5b00a5a6fae3dc7e25288679ad0c92ce"/>
+ <lrm_rsc_op id="bundle-b-1_monitor_30000" operation_key="bundle-b-1_monitor_30000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="62:162:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;62:162:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="18" rc-code="0" op-status="0" interval="30000" last-rc-change="1687288333" exec-time="0" queue-time="0" op-digest="6a12d3c71ed94e6f961711073ca69f24"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-b-podman-1" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-b-podman-1_last_0" operation_key="bundle-b-podman-1_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="49:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;49:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="112" rc-code="0" op-status="0" interval="0" last-rc-change="1687288330" exec-time="821" queue-time="0" op-digest="9ba708e8e80ce11a8a1ca4908e76e75e"/>
+ <lrm_rsc_op id="bundle-b-podman-1_monitor_60000" operation_key="bundle-b-podman-1_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="50:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;50:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="113" rc-code="0" op-status="0" interval="60000" last-rc-change="1687288331" exec-time="171" queue-time="0" op-digest="12e3bf09305d4a76f940376759128ae1"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-1" type="remote" class="ocf" provider="pacemaker" container="bundle-a-podman-1">
+ <lrm_rsc_op id="bundle-a-1_last_0" operation_key="bundle-a-1_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="15:155:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;15:155:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="15" rc-code="0" op-status="0" interval="0" last-rc-change="1687288264" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ <lrm_rsc_op id="bundle-a-1_monitor_30000" operation_key="bundle-a-1_monitor_30000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="18:156:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;18:156:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="16" rc-code="0" op-status="0" interval="30000" last-rc-change="1687288267" exec-time="0" queue-time="0" op-digest="354b9acaa7ea1113d708dc11a1d6bbfa"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-podman-0" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-a-podman-0_last_0" operation_key="bundle-a-podman-0_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="9:105:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;9:105:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="80" rc-code="0" op-status="0" interval="0" last-rc-change="1687248552" exec-time="10639" queue-time="0" op-digest="bc99a652606652014a6cb5f743b5d75d"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-b-2" type="remote" class="ocf" provider="pacemaker" container="bundle-b-podman-2">
+ <lrm_rsc_op id="bundle-b-2_last_0" operation_key="bundle-b-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="49:97:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;49:97:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="6" rc-code="7" op-status="0" interval="0" last-rc-change="1687248528" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-2" type="remote" class="ocf" provider="pacemaker" container="bundle-a-podman-2">
+ <lrm_rsc_op id="bundle-a-2_last_0" operation_key="bundle-a-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="15:103:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;15:103:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="11" rc-code="7" op-status="0" interval="0" last-rc-change="1687248551" exec-time="0" queue-time="0" op-digest="2b7683df7d64ff71ec5fd3675fd12017" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state remote_node="true" id="base-bundle-1" uname="base-bundle-1" in_ccm="false" crm-debug-origin="do_state_transition" node_fenced="0">
+ <lrm id="base-bundle-1">
+ <lrm_resources>
+ <lrm_resource id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="base_last_0" operation_key="base_stop_0" operation="stop" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="125:97:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;125:97:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="1039" rc-code="0" op-status="0" interval="0" last-rc-change="1687248529" exec-time="177" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="base_monitor_16000" operation_key="base_monitor_16000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="35:56:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;35:56:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="92" rc-code="0" op-status="0" interval="16000" last-rc-change="1687220329" exec-time="43" queue-time="0" op-digest="e62ad827732488b88ef8722ff6a484b3"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state remote_node="true" id="base-bundle-2" uname="base-bundle-2" in_ccm="false" crm-debug-origin="do_state_transition" node_fenced="0">
+ <lrm id="base-bundle-2">
+ <lrm_resources>
+ <lrm_resource id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="base_last_0" operation_key="base_stop_0" operation="stop" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="126:97:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;126:97:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="991" rc-code="0" op-status="0" interval="0" last-rc-change="1687248528" exec-time="201" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="base_monitor_15000" operation_key="base_monitor_15000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="33:44:8:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:8;33:44:8:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="36" rc-code="8" op-status="0" interval="15000" last-rc-change="1687220059" exec-time="13" queue-time="0" op-digest="3ef575c5f050ae086f0f31bc8f085fdc"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state remote_node="true" id="bundle-a-1" uname="bundle-a-1" in_ccm="true" crm-debug-origin="do_state_transition" node_fenced="0">
+ <lrm id="bundle-a-1">
+ <lrm_resources>
+ <lrm_resource id="bundle-a-rsc" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="bundle-a-rsc_last_0" operation_key="bundle-a-rsc_promote_0" operation="promote" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="40:168:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;40:168:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="25" rc-code="0" op-status="0" interval="0" last-rc-change="1687288549" exec-time="216" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="bundle-a-rsc_monitor_15000" operation_key="bundle-a-rsc_monitor_15000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="40:169:8:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:8;40:169:8:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="31" rc-code="8" op-status="0" interval="15000" last-rc-change="1687288549" exec-time="9" queue-time="0" op-digest="3ef575c5f050ae086f0f31bc8f085fdc"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="bundle-a-1">
+ <instance_attributes id="status-bundle-a-1">
+ <nvpair id="status-bundle-a-1-master-bundle-a-rsc" name="master-bundle-a-rsc" value="10"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state remote_node="true" id="bundle-b-2" uname="bundle-b-2" in_ccm="true" crm-debug-origin="controld_update_resource_history" node_fenced="0">
+ <lrm id="bundle-b-2">
+ <lrm_resources>
+ <lrm_resource id="bundle-b-rsc" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="bundle-b-rsc_last_0" operation_key="bundle-b-rsc_demote_0" operation="demote" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="79:177:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;79:177:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="153" rc-code="0" op-status="0" interval="0" last-rc-change="1687292227" exec-time="217" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="bundle-b-rsc_monitor_16000" operation_key="bundle-b-rsc_monitor_16000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="78:178:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;78:178:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="159" rc-code="0" op-status="0" interval="16000" last-rc-change="1687292227" exec-time="10" queue-time="0" op-digest="e62ad827732488b88ef8722ff6a484b3"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="bundle-b-2">
+ <instance_attributes id="status-bundle-b-2">
+ <nvpair id="status-bundle-b-2-master-bundle-b-rsc" name="master-bundle-b-rsc" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state remote_node="true" id="bundle-b-0" uname="bundle-b-0" in_ccm="true" crm-debug-origin="do_state_transition" node_fenced="0">
+ <lrm id="bundle-b-0">
+ <lrm_resources>
+ <lrm_resource id="bundle-b-rsc" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="bundle-b-rsc_last_0" operation_key="bundle-b-rsc_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="69:163:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;69:163:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="8" rc-code="0" op-status="0" interval="0" last-rc-change="1687288333" exec-time="290" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="bundle-b-rsc_monitor_16000" operation_key="bundle-b-rsc_monitor_16000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="71:164:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;71:164:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="14" rc-code="0" op-status="0" interval="16000" last-rc-change="1687288334" exec-time="12" queue-time="0" op-digest="e62ad827732488b88ef8722ff6a484b3"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="bundle-b-0">
+ <instance_attributes id="status-bundle-b-0">
+ <nvpair id="status-bundle-b-0-master-bundle-b-rsc" name="master-bundle-b-rsc" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state remote_node="true" id="bundle-b-1" uname="bundle-b-1" in_ccm="true" crm-debug-origin="controld_update_resource_history" node_fenced="0">
+ <lrm id="bundle-b-1">
+ <lrm_resources>
+ <lrm_resource id="bundle-b-rsc" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="bundle-b-rsc_last_0" operation_key="bundle-b-rsc_promote_0" operation="promote" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="74:178:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;74:178:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="145" rc-code="0" op-status="0" interval="0" last-rc-change="1687292227" exec-time="200" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="bundle-b-rsc_monitor_15000" operation_key="bundle-b-rsc_monitor_15000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="77:179:8:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:8;77:179:8:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="151" rc-code="8" op-status="0" interval="15000" last-rc-change="1687292227" exec-time="32" queue-time="0" op-digest="3ef575c5f050ae086f0f31bc8f085fdc"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="bundle-b-1">
+ <instance_attributes id="status-bundle-b-1">
+ <nvpair id="status-bundle-b-1-master-bundle-b-rsc" name="master-bundle-b-rsc" value="10"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state remote_node="true" id="bundle-a-0" uname="bundle-a-0" in_ccm="true" crm-debug-origin="do_state_transition" node_fenced="0">
+ <lrm id="bundle-a-0">
+ <lrm_resources>
+ <lrm_resource id="bundle-a-rsc" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="bundle-a-rsc_last_0" operation_key="bundle-a-rsc_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="39:162:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;39:162:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="8" rc-code="0" op-status="0" interval="0" last-rc-change="1687288333" exec-time="250" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="bundle-a-rsc_monitor_16000" operation_key="bundle-a-rsc_monitor_16000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="36:163:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;36:163:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="14" rc-code="0" op-status="0" interval="16000" last-rc-change="1687288333" exec-time="10" queue-time="0" op-digest="e62ad827732488b88ef8722ff6a484b3"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="bundle-a-0">
+ <instance_attributes id="status-bundle-a-0">
+ <nvpair id="status-bundle-a-0-master-bundle-a-rsc" name="master-bundle-a-rsc" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state remote_node="true" id="bundle-a-2" uname="bundle-a-2" in_ccm="true" crm-debug-origin="do_state_transition" node_fenced="0">
+ <lrm id="bundle-a-2">
+ <lrm_resources>
+ <lrm_resource id="bundle-a-rsc" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="bundle-a-rsc_last_0" operation_key="bundle-a-rsc_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="29:157:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;29:157:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="8" rc-code="0" op-status="0" interval="0" last-rc-change="1687288268" exec-time="236" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="bundle-a-rsc_monitor_16000" operation_key="bundle-a-rsc_monitor_16000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="30:158:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;30:158:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="14" rc-code="0" op-status="0" interval="16000" last-rc-change="1687288268" exec-time="10" queue-time="0" op-digest="e62ad827732488b88ef8722ff6a484b3"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="bundle-a-2">
+ <instance_attributes id="status-bundle-a-2">
+ <nvpair id="status-bundle-a-2-master-bundle-a-rsc" name="master-bundle-a-rsc" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ </status>
+</cib>
diff --git a/cts/scheduler/xml/bundle-promoted-anticolocation-6.xml b/cts/scheduler/xml/bundle-promoted-anticolocation-6.xml
new file mode 100644
index 0000000..6cc80e4
--- /dev/null
+++ b/cts/scheduler/xml/bundle-promoted-anticolocation-6.xml
@@ -0,0 +1,368 @@
+<cib crm_feature_set="3.17.4" validate-with="pacemaker-3.9" epoch="61" num_updates="0" admin_epoch="0" cib-last-written="Tue Jun 20 13:17:35 2023" update-origin="node1" update-client="cibadmin" update-user="root" have-quorum="1" dc-uuid="1">
+ <configuration>
+ <!-- The essential elements of this test are:
+ * The promoted instance of a bundle (bundle-a) is optionally
+ anti-colocated (score=-5000) with the promoted instance of another
+ bundle (bundle-b)
+ * bundle-a's promoted instance is running on the same node as
+ bundle-b's promoted instance
+ * There is no stickiness
+ * There are no location constraints
+ * There are three nodes available for both resources
+
+ In this situation, bundle-a should demote its currently promoted
+ instance and promote an instance elsewhere.
+ -->
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.1.5-1.0a457786a.git.el9-0a457786a"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="test"/>
+ <nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1687288330"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="node1"/>
+ <node id="2" uname="node2"/>
+ <node id="3" uname="node3"/>
+ </nodes>
+ <resources>
+ <primitive class="stonith" id="Fencing" type="fence_xvm">
+ <instance_attributes id="Fencing-instance_attributes">
+ <nvpair id="Fencing-instance_attributes_pcmk_host_list" name="pcmk_host_list" value="node1 node2 node3"/>
+ </instance_attributes>
+ <operations>
+ <op id="Fencing-monitor-120s" interval="120s" name="monitor" timeout="120s"/>
+ </operations>
+ </primitive>
+ <bundle id="bundle-a">
+ <meta_attributes id="bundle-a-meta_attributes">
+ <nvpair id="bundle-a-meta_attributes-promotable" name="promotable" value="true"/>
+ <nvpair id="bundle-a-meta_attributes-container-attribute-target" name="container-attribute-target" value="host"/>
+ </meta_attributes>
+ <podman image="localhost/pcmktest" replicas="3"/>
+ <network control-port="3121"/>
+ <primitive id="bundle-a-rsc" class="ocf" provider="pacemaker" type="Stateful">
+ <operations>
+ <op id="bundle-a-rsc-monitor-15s" interval="15s" name="monitor" timeout="15s" role="Promoted"/>
+ <op id="bundle-a-rsc-monitor-16s" interval="16s" name="monitor" timeout="16s" role="Unpromoted"/>
+ </operations>
+ </primitive>
+ </bundle>
+ <bundle id="bundle-b">
+ <meta_attributes id="bundle-b-meta_attributes">
+ <nvpair id="bundle-b-meta_attributes-interleave" name="interleave" value="true"/>
+ <nvpair id="bundle-b-meta_attributes-promotable" name="promotable" value="true"/>
+ <nvpair id="bundle-b-meta_attributes-container-attribute-target" name="container-attribute-target" value="host"/>
+ </meta_attributes>
+ <podman image="localhost/pcmktest" replicas="3"/>
+ <network control-port="3122"/>
+ <primitive id="bundle-b-rsc" class="ocf" provider="pacemaker" type="Stateful">
+ <operations>
+ <op id="bundle-b-rsc-monitor-15s" interval="15s" name="monitor" timeout="15s" role="Promoted"/>
+ <op id="bundle-b-rsc-monitor-16s" interval="16s" name="monitor" timeout="16s" role="Unpromoted"/>
+ </operations>
+ </primitive>
+ </bundle>
+ </resources>
+ <constraints>
+ <rsc_colocation id="colocation-bundle-a-bundle-b--5000" rsc="bundle-a" rsc-role="Promoted" score="-5000" with-rsc="bundle-b" with-rsc-role="Promoted"/>
+ </constraints>
+ <fencing-topology/>
+ <op_defaults/>
+ <alerts/>
+ <rsc_defaults/>
+ </configuration>
+ <status>
+ <node_state id="1" uname="node1" in_ccm="true" crmd="online" crm-debug-origin="do_state_transition" join="member" expected="member">
+ <transient_attributes id="1">
+ <instance_attributes id="status-1">
+ <nvpair id="status-1-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-1-promotion-bundle-a-rsc" name="master-bundle-a-rsc" value="11"/>
+ <nvpair id="status-1-promotion-bundle-b-rsc" name="master-bundle-b-rsc" value="12"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="1">
+ <lrm_resources>
+ <lrm_resource id="bundle-b-podman-2" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-b-podman-2_last_0" operation_key="bundle-b-podman-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="4:154:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;4:154:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="132" rc-code="7" op-status="0" interval="0" last-rc-change="1687288263" exec-time="237" queue-time="0" op-digest="4e4b17530bea4f6fd27df1cf68bd9f22"/>
+ </lrm_resource>
+ <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="12:62:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;12:62:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="55" rc-code="0" op-status="0" interval="0" last-rc-change="1687222219" exec-time="0" queue-time="0" op-digest="6197322ae276dfeb4a212d09787f9738"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-0" type="remote" class="ocf" provider="pacemaker" container="bundle-a-podman-0">
+ <lrm_rsc_op id="bundle-a-0_last_0" operation_key="bundle-a-0_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="13:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;13:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="19" rc-code="0" op-status="0" interval="0" last-rc-change="1687288332" exec-time="0" queue-time="0" op-digest="2b7683df7d64ff71ec5fd3675fd12017" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ <lrm_rsc_op id="bundle-a-0_monitor_30000" operation_key="bundle-a-0_monitor_30000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="22:162:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;22:162:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="20" rc-code="0" op-status="0" interval="30000" last-rc-change="1687288333" exec-time="0" queue-time="0" op-digest="6f3c7e233bacb8420fef5f9581190d00"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-podman-1" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-a-podman-1_last_0" operation_key="bundle-a-podman-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="3:153:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;3:153:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="117" rc-code="7" op-status="0" interval="0" last-rc-change="1687288204" exec-time="106" queue-time="0" op-digest="90f1390dea2a7bca12099b080987eae7"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-b-podman-0" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-b-podman-0_last_0" operation_key="bundle-b-podman-0_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="45:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;45:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="144" rc-code="0" op-status="0" interval="0" last-rc-change="1687288330" exec-time="1158" queue-time="0" op-digest="94098b91d583b0b8498741508c609a37"/>
+ <lrm_rsc_op id="bundle-b-podman-0_monitor_60000" operation_key="bundle-b-podman-0_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="46:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;46:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="146" rc-code="0" op-status="0" interval="60000" last-rc-change="1687288332" exec-time="182" queue-time="0" op-digest="b7a9f966374c886087fb6f03d6e4c286"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-b-0" type="remote" class="ocf" provider="pacemaker" container="bundle-b-podman-0">
+ <lrm_rsc_op id="bundle-b-0_last_0" operation_key="bundle-b-0_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="47:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;47:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="18" rc-code="0" op-status="0" interval="0" last-rc-change="1687288332" exec-time="0" queue-time="0" op-digest="06b6a770601e99a2d691d4cf853acceb" op-force-restart=" port server " op-restart-digest="5b00a5a6fae3dc7e25288679ad0c92ce"/>
+ <lrm_rsc_op id="bundle-b-0_monitor_30000" operation_key="bundle-b-0_monitor_30000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="57:162:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;57:162:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="21" rc-code="0" op-status="0" interval="30000" last-rc-change="1687288333" exec-time="0" queue-time="0" op-digest="b4ca332a542717c28b117ec80e27b838"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-1" type="remote" class="ocf" provider="pacemaker" container="bundle-a-podman-1">
+ <lrm_rsc_op id="bundle-a-1_last_0" operation_key="bundle-a-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="8:103:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;8:103:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="10" rc-code="7" op-status="0" interval="0" last-rc-change="1687248552" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-podman-2" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-a-podman-2_last_0" operation_key="bundle-a-podman-2_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="17:105:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;17:105:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="107" rc-code="0" op-status="0" interval="0" last-rc-change="1687248553" exec-time="10540" queue-time="0" op-digest="a5415a2b8dfba2a5741d70bab5591c1b"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-b-1" type="remote" class="ocf" provider="pacemaker" container="bundle-b-podman-1">
+ <lrm_rsc_op id="bundle-b-1_last_0" operation_key="bundle-b-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="28:99:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;28:99:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="8" rc-code="7" op-status="0" interval="0" last-rc-change="1687248540" exec-time="0" queue-time="0" op-digest="2b7683df7d64ff71ec5fd3675fd12017" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-b-podman-1" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-b-podman-1_last_0" operation_key="bundle-b-podman-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="3:154:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;3:154:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="128" rc-code="7" op-status="0" interval="0" last-rc-change="1687288263" exec-time="240" queue-time="0" op-digest="e1aa4eab4c9e9334ee91c66cce449b13"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-podman-0" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-a-podman-0_last_0" operation_key="bundle-a-podman-0_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="11:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;11:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="145" rc-code="0" op-status="0" interval="0" last-rc-change="1687288331" exec-time="1143" queue-time="0" op-digest="bc99a652606652014a6cb5f743b5d75d"/>
+ <lrm_rsc_op id="bundle-a-podman-0_monitor_60000" operation_key="bundle-a-podman-0_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="12:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;12:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="147" rc-code="0" op-status="0" interval="60000" last-rc-change="1687288332" exec-time="161" queue-time="0" op-digest="d39778305ffa599cc7f7a94b47d18783"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-b-2" type="remote" class="ocf" provider="pacemaker" container="bundle-b-podman-2">
+ <lrm_rsc_op id="bundle-b-2_last_0" operation_key="bundle-b-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="37:97:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;37:97:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="6" rc-code="7" op-status="0" interval="0" last-rc-change="1687248528" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-2" type="remote" class="ocf" provider="pacemaker" container="bundle-a-podman-2">
+ <lrm_rsc_op id="bundle-a-2_last_0" operation_key="bundle-a-2_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="18:105:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;18:105:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="14" rc-code="0" op-status="0" interval="0" last-rc-change="1687248553" exec-time="0" queue-time="0" op-digest="2b7683df7d64ff71ec5fd3675fd12017"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state id="2" uname="node2" crmd="online" crm-debug-origin="do_state_transition" in_ccm="true" join="member" expected="member">
+ <transient_attributes id="2">
+ <instance_attributes id="status-2">
+ <nvpair id="status-2-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-2-promotion-bundle-a-rsc" name="master-bundle-a-rsc" value="12"/>
+ <nvpair id="status-2-promotion-bundle-b-rsc" name="master-bundle-b-rsc" value="13"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="2">
+ <lrm_resources>
+ <lrm_resource id="bundle-b-podman-2" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-b-podman-2_last_0" operation_key="bundle-b-podman-2_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="53:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;53:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="141" rc-code="0" op-status="0" interval="0" last-rc-change="1687288330" exec-time="752" queue-time="0" op-digest="35320d55914b88e0c9d4d13b574cef8a"/>
+ <lrm_rsc_op id="bundle-b-podman-2_monitor_60000" operation_key="bundle-b-podman-2_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="54:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;54:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="142" rc-code="0" op-status="0" interval="60000" last-rc-change="1687288331" exec-time="120" queue-time="0" op-digest="62d15df41b3f596ca94b0dbe7edda857"/>
+ </lrm_resource>
+ <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="12:69:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;12:69:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="58" rc-code="0" op-status="0" interval="0" last-rc-change="1687226745" exec-time="33" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ <lrm_rsc_op id="Fencing_monitor_120000" operation_key="Fencing_monitor_120000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="4:69:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;4:69:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="59" rc-code="0" op-status="0" interval="120000" last-rc-change="1687226745" exec-time="27" queue-time="0" op-digest="24989640311980988fb77ddd1cc1002b"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-0" type="remote" class="ocf" provider="pacemaker" container="bundle-a-podman-0">
+ <lrm_rsc_op id="bundle-a-0_last_0" operation_key="bundle-a-0_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="11:101:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;11:101:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="9" rc-code="7" op-status="0" interval="0" last-rc-change="1687248551" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-podman-1" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-a-podman-1_last_0" operation_key="bundle-a-podman-1_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="13:105:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;13:105:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="109" rc-code="0" op-status="0" interval="0" last-rc-change="1687248553" exec-time="10646" queue-time="0" op-digest="90f1390dea2a7bca12099b080987eae7"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-b-podman-0" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-b-podman-0_last_0" operation_key="bundle-b-podman-0_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="5:154:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;5:154:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="126" rc-code="7" op-status="0" interval="0" last-rc-change="1687288263" exec-time="124" queue-time="0" op-digest="cad283aeb094013845bac465ab6a198d"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-b-0" type="remote" class="ocf" provider="pacemaker" container="bundle-b-podman-0">
+ <lrm_rsc_op id="bundle-b-0_last_0" operation_key="bundle-b-0_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="13:103:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;13:103:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="12" rc-code="7" op-status="0" interval="0" last-rc-change="1687248552" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-1" type="remote" class="ocf" provider="pacemaker" container="bundle-a-podman-1">
+ <lrm_rsc_op id="bundle-a-1_last_0" operation_key="bundle-a-1_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="14:105:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;14:105:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="14" rc-code="0" op-status="0" interval="0" last-rc-change="1687248553" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-podman-2" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-a-podman-2_last_0" operation_key="bundle-a-podman-2_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="21:154:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;21:154:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="135" rc-code="0" op-status="0" interval="0" last-rc-change="1687288263" exec-time="1059" queue-time="0" op-digest="a5415a2b8dfba2a5741d70bab5591c1b"/>
+ <lrm_rsc_op id="bundle-a-podman-2_monitor_60000" operation_key="bundle-a-podman-2_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="19:155:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;19:155:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="137" rc-code="0" op-status="0" interval="60000" last-rc-change="1687288265" exec-time="284" queue-time="0" op-digest="77326e7c2187a5c4fe386812dfcd4118"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-b-1" type="remote" class="ocf" provider="pacemaker" container="bundle-b-podman-1">
+ <lrm_rsc_op id="bundle-b-1_last_0" operation_key="bundle-b-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="33:99:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;33:99:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="8" rc-code="7" op-status="0" interval="0" last-rc-change="1687248540" exec-time="0" queue-time="0" op-digest="2b7683df7d64ff71ec5fd3675fd12017" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-b-podman-1" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-b-podman-1_last_0" operation_key="bundle-b-podman-1_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="4:156:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;4:156:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="140" rc-code="0" op-status="0" interval="0" last-rc-change="1687288267" exec-time="216" queue-time="0" op-digest="e1aa4eab4c9e9334ee91c66cce449b13"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-podman-0" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-a-podman-0_last_0" operation_key="bundle-a-podman-0_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="4:153:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;4:153:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="115" rc-code="7" op-status="0" interval="0" last-rc-change="1687288204" exec-time="101" queue-time="0" op-digest="bc99a652606652014a6cb5f743b5d75d"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-b-2" type="remote" class="ocf" provider="pacemaker" container="bundle-b-podman-2">
+ <lrm_rsc_op id="bundle-b-2_last_0" operation_key="bundle-b-2_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="55:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;55:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="17" rc-code="0" op-status="0" interval="0" last-rc-change="1687288331" exec-time="0" queue-time="0" op-digest="077939dc4a5dbbd799045f9120d057b4" op-force-restart=" port server " op-restart-digest="5b00a5a6fae3dc7e25288679ad0c92ce"/>
+ <lrm_rsc_op id="bundle-b-2_monitor_30000" operation_key="bundle-b-2_monitor_30000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="67:162:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;67:162:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="18" rc-code="0" op-status="0" interval="30000" last-rc-change="1687288333" exec-time="0" queue-time="0" op-digest="f9f13135e91dabbc6c77a3fd4b23ab80"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-2" type="remote" class="ocf" provider="pacemaker" container="bundle-a-podman-2">
+ <lrm_rsc_op id="bundle-a-2_last_0" operation_key="bundle-a-2_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="20:155:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;20:155:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="15" rc-code="0" op-status="0" interval="0" last-rc-change="1687288265" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ <lrm_rsc_op id="bundle-a-2_monitor_30000" operation_key="bundle-a-2_monitor_30000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="23:156:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;23:156:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="16" rc-code="0" op-status="0" interval="30000" last-rc-change="1687288267" exec-time="0" queue-time="0" op-digest="3929eec440004bca31f813a8e6097506"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state remote_node="true" id="base-bundle-0" uname="base-bundle-0" in_ccm="false" crm-debug-origin="do_state_transition" node_fenced="0">
+ <lrm id="base-bundle-0">
+ <lrm_resources>
+ <lrm_resource id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="base_last_0" operation_key="base_stop_0" operation="stop" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="124:97:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;124:97:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="1035" rc-code="0" op-status="0" interval="0" last-rc-change="1687248529" exec-time="180" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="base_monitor_16000" operation_key="base_monitor_16000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="17:31:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;17:31:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="14" rc-code="0" op-status="0" interval="16000" last-rc-change="1687218076" exec-time="15" queue-time="0" op-digest="e62ad827732488b88ef8722ff6a484b3"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state id="3" uname="node3" crmd="online" crm-debug-origin="do_state_transition" in_ccm="true" join="member" expected="member">
+ <transient_attributes id="3">
+ <instance_attributes id="status-3">
+ <nvpair id="status-3-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-3-promotion-bundle-a-rsc" name="master-bundle-a-rsc" value="13"/>
+ <nvpair id="status-3-promotion-bundle-b-rsc" name="master-bundle-b-rsc" value="14"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="3">
+ <lrm_resources>
+ <lrm_resource id="bundle-b-podman-2" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-b-podman-2_last_0" operation_key="bundle-b-podman-2_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="6:156:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;6:156:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="111" rc-code="0" op-status="0" interval="0" last-rc-change="1687288267" exec-time="243" queue-time="0" op-digest="4e4b17530bea4f6fd27df1cf68bd9f22"/>
+ </lrm_resource>
+ <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="11:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;11:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="8" rc-code="7" op-status="0" interval="0" last-rc-change="1687219634" exec-time="5" queue-time="0" op-digest="6197322ae276dfeb4a212d09787f9738"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-0" type="remote" class="ocf" provider="pacemaker" container="bundle-a-podman-0">
+ <lrm_rsc_op id="bundle-a-0_last_0" operation_key="bundle-a-0_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="10:105:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;10:105:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="14" rc-code="0" op-status="0" interval="0" last-rc-change="1687248552" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-podman-1" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-a-podman-1_last_0" operation_key="bundle-a-podman-1_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="17:154:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;17:154:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="106" rc-code="0" op-status="0" interval="0" last-rc-change="1687288263" exec-time="1002" queue-time="0" op-digest="90f1390dea2a7bca12099b080987eae7"/>
+ <lrm_rsc_op id="bundle-a-podman-1_monitor_60000" operation_key="bundle-a-podman-1_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="14:155:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;14:155:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="108" rc-code="0" op-status="0" interval="60000" last-rc-change="1687288264" exec-time="156" queue-time="0" op-digest="34c506d61f8ef4e0d77193cce6e838ce"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-b-podman-0" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-b-podman-0_last_0" operation_key="bundle-b-podman-0_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="8:154:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;8:154:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="97" rc-code="7" op-status="0" interval="0" last-rc-change="1687288263" exec-time="119" queue-time="0" op-digest="cad283aeb094013845bac465ab6a198d"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-b-0" type="remote" class="ocf" provider="pacemaker" container="bundle-b-podman-0">
+ <lrm_rsc_op id="bundle-b-0_last_0" operation_key="bundle-b-0_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="16:103:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;16:103:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="12" rc-code="7" op-status="0" interval="0" last-rc-change="1687248551" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-podman-2" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-a-podman-2_last_0" operation_key="bundle-a-podman-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="7:153:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;7:153:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="90" rc-code="7" op-status="0" interval="0" last-rc-change="1687288203" exec-time="78" queue-time="0" op-digest="a5415a2b8dfba2a5741d70bab5591c1b"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-b-1" type="remote" class="ocf" provider="pacemaker" container="bundle-b-podman-1">
+ <lrm_rsc_op id="bundle-b-1_last_0" operation_key="bundle-b-1_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="51:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;51:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="17" rc-code="0" op-status="0" interval="0" last-rc-change="1687288331" exec-time="0" queue-time="0" op-digest="aeee9bdab74d90d67c45ff8a2e53b020" op-force-restart=" port server " op-restart-digest="5b00a5a6fae3dc7e25288679ad0c92ce"/>
+ <lrm_rsc_op id="bundle-b-1_monitor_30000" operation_key="bundle-b-1_monitor_30000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="62:162:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;62:162:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="18" rc-code="0" op-status="0" interval="30000" last-rc-change="1687288333" exec-time="0" queue-time="0" op-digest="6a12d3c71ed94e6f961711073ca69f24"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-b-podman-1" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-b-podman-1_last_0" operation_key="bundle-b-podman-1_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="49:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;49:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="112" rc-code="0" op-status="0" interval="0" last-rc-change="1687288330" exec-time="821" queue-time="0" op-digest="9ba708e8e80ce11a8a1ca4908e76e75e"/>
+ <lrm_rsc_op id="bundle-b-podman-1_monitor_60000" operation_key="bundle-b-podman-1_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="50:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;50:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="113" rc-code="0" op-status="0" interval="60000" last-rc-change="1687288331" exec-time="171" queue-time="0" op-digest="12e3bf09305d4a76f940376759128ae1"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-1" type="remote" class="ocf" provider="pacemaker" container="bundle-a-podman-1">
+ <lrm_rsc_op id="bundle-a-1_last_0" operation_key="bundle-a-1_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="15:155:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;15:155:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="15" rc-code="0" op-status="0" interval="0" last-rc-change="1687288264" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ <lrm_rsc_op id="bundle-a-1_monitor_30000" operation_key="bundle-a-1_monitor_30000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="18:156:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;18:156:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="16" rc-code="0" op-status="0" interval="30000" last-rc-change="1687288267" exec-time="0" queue-time="0" op-digest="354b9acaa7ea1113d708dc11a1d6bbfa"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-podman-0" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-a-podman-0_last_0" operation_key="bundle-a-podman-0_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="9:105:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;9:105:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="80" rc-code="0" op-status="0" interval="0" last-rc-change="1687248552" exec-time="10639" queue-time="0" op-digest="bc99a652606652014a6cb5f743b5d75d"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-b-2" type="remote" class="ocf" provider="pacemaker" container="bundle-b-podman-2">
+ <lrm_rsc_op id="bundle-b-2_last_0" operation_key="bundle-b-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="49:97:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;49:97:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="6" rc-code="7" op-status="0" interval="0" last-rc-change="1687248528" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-2" type="remote" class="ocf" provider="pacemaker" container="bundle-a-podman-2">
+ <lrm_rsc_op id="bundle-a-2_last_0" operation_key="bundle-a-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="15:103:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;15:103:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="11" rc-code="7" op-status="0" interval="0" last-rc-change="1687248551" exec-time="0" queue-time="0" op-digest="2b7683df7d64ff71ec5fd3675fd12017" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state remote_node="true" id="base-bundle-1" uname="base-bundle-1" in_ccm="false" crm-debug-origin="do_state_transition" node_fenced="0">
+ <lrm id="base-bundle-1">
+ <lrm_resources>
+ <lrm_resource id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="base_last_0" operation_key="base_stop_0" operation="stop" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="125:97:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;125:97:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="1039" rc-code="0" op-status="0" interval="0" last-rc-change="1687248529" exec-time="177" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="base_monitor_16000" operation_key="base_monitor_16000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="35:56:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;35:56:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="92" rc-code="0" op-status="0" interval="16000" last-rc-change="1687220329" exec-time="43" queue-time="0" op-digest="e62ad827732488b88ef8722ff6a484b3"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state remote_node="true" id="base-bundle-2" uname="base-bundle-2" in_ccm="false" crm-debug-origin="do_state_transition" node_fenced="0">
+ <lrm id="base-bundle-2">
+ <lrm_resources>
+ <lrm_resource id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="base_last_0" operation_key="base_stop_0" operation="stop" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="126:97:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;126:97:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="991" rc-code="0" op-status="0" interval="0" last-rc-change="1687248528" exec-time="201" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="base_monitor_15000" operation_key="base_monitor_15000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="33:44:8:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:8;33:44:8:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="36" rc-code="8" op-status="0" interval="15000" last-rc-change="1687220059" exec-time="13" queue-time="0" op-digest="3ef575c5f050ae086f0f31bc8f085fdc"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state remote_node="true" id="bundle-a-1" uname="bundle-a-1" in_ccm="true" crm-debug-origin="do_state_transition" node_fenced="0">
+ <lrm id="bundle-a-1">
+ <lrm_resources>
+ <lrm_resource id="bundle-a-rsc" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="bundle-a-rsc_last_0" operation_key="bundle-a-rsc_promote_0" operation="promote" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="40:168:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;40:168:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="25" rc-code="0" op-status="0" interval="0" last-rc-change="1687288549" exec-time="216" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="bundle-a-rsc_monitor_15000" operation_key="bundle-a-rsc_monitor_15000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="40:169:8:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:8;40:169:8:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="31" rc-code="8" op-status="0" interval="15000" last-rc-change="1687288549" exec-time="9" queue-time="0" op-digest="3ef575c5f050ae086f0f31bc8f085fdc"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="bundle-a-1">
+ <instance_attributes id="status-bundle-a-1">
+ <nvpair id="status-bundle-a-1-master-bundle-a-rsc" name="master-bundle-a-rsc" value="10"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state remote_node="true" id="bundle-b-2" uname="bundle-b-2" in_ccm="true" crm-debug-origin="controld_update_resource_history" node_fenced="0">
+ <lrm id="bundle-b-2">
+ <lrm_resources>
+ <lrm_resource id="bundle-b-rsc" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="bundle-b-rsc_last_0" operation_key="bundle-b-rsc_demote_0" operation="demote" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="79:177:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;79:177:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="153" rc-code="0" op-status="0" interval="0" last-rc-change="1687292227" exec-time="217" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="bundle-b-rsc_monitor_16000" operation_key="bundle-b-rsc_monitor_16000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="78:178:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;78:178:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="159" rc-code="0" op-status="0" interval="16000" last-rc-change="1687292227" exec-time="10" queue-time="0" op-digest="e62ad827732488b88ef8722ff6a484b3"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="bundle-b-2">
+ <instance_attributes id="status-bundle-b-2">
+ <nvpair id="status-bundle-b-2-master-bundle-b-rsc" name="master-bundle-b-rsc" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state remote_node="true" id="bundle-b-0" uname="bundle-b-0" in_ccm="true" crm-debug-origin="do_state_transition" node_fenced="0">
+ <lrm id="bundle-b-0">
+ <lrm_resources>
+ <lrm_resource id="bundle-b-rsc" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="bundle-b-rsc_last_0" operation_key="bundle-b-rsc_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="69:163:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;69:163:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="8" rc-code="0" op-status="0" interval="0" last-rc-change="1687288333" exec-time="290" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="bundle-b-rsc_monitor_16000" operation_key="bundle-b-rsc_monitor_16000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="71:164:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;71:164:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="14" rc-code="0" op-status="0" interval="16000" last-rc-change="1687288334" exec-time="12" queue-time="0" op-digest="e62ad827732488b88ef8722ff6a484b3"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="bundle-b-0">
+ <instance_attributes id="status-bundle-b-0">
+ <nvpair id="status-bundle-b-0-master-bundle-b-rsc" name="master-bundle-b-rsc" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state remote_node="true" id="bundle-b-1" uname="bundle-b-1" in_ccm="true" crm-debug-origin="controld_update_resource_history" node_fenced="0">
+ <lrm id="bundle-b-1">
+ <lrm_resources>
+ <lrm_resource id="bundle-b-rsc" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="bundle-b-rsc_last_0" operation_key="bundle-b-rsc_promote_0" operation="promote" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="74:178:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;74:178:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="145" rc-code="0" op-status="0" interval="0" last-rc-change="1687292227" exec-time="200" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="bundle-b-rsc_monitor_15000" operation_key="bundle-b-rsc_monitor_15000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="77:179:8:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:8;77:179:8:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="151" rc-code="8" op-status="0" interval="15000" last-rc-change="1687292227" exec-time="32" queue-time="0" op-digest="3ef575c5f050ae086f0f31bc8f085fdc"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="bundle-b-1">
+ <instance_attributes id="status-bundle-b-1">
+ <nvpair id="status-bundle-b-1-master-bundle-b-rsc" name="master-bundle-b-rsc" value="10"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state remote_node="true" id="bundle-a-0" uname="bundle-a-0" in_ccm="true" crm-debug-origin="do_state_transition" node_fenced="0">
+ <lrm id="bundle-a-0">
+ <lrm_resources>
+ <lrm_resource id="bundle-a-rsc" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="bundle-a-rsc_last_0" operation_key="bundle-a-rsc_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="39:162:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;39:162:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="8" rc-code="0" op-status="0" interval="0" last-rc-change="1687288333" exec-time="250" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="bundle-a-rsc_monitor_16000" operation_key="bundle-a-rsc_monitor_16000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="36:163:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;36:163:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="14" rc-code="0" op-status="0" interval="16000" last-rc-change="1687288333" exec-time="10" queue-time="0" op-digest="e62ad827732488b88ef8722ff6a484b3"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="bundle-a-0">
+ <instance_attributes id="status-bundle-a-0">
+ <nvpair id="status-bundle-a-0-master-bundle-a-rsc" name="master-bundle-a-rsc" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state remote_node="true" id="bundle-a-2" uname="bundle-a-2" in_ccm="true" crm-debug-origin="do_state_transition" node_fenced="0">
+ <lrm id="bundle-a-2">
+ <lrm_resources>
+ <lrm_resource id="bundle-a-rsc" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="bundle-a-rsc_last_0" operation_key="bundle-a-rsc_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="29:157:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;29:157:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="8" rc-code="0" op-status="0" interval="0" last-rc-change="1687288268" exec-time="236" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="bundle-a-rsc_monitor_16000" operation_key="bundle-a-rsc_monitor_16000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="30:158:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;30:158:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="14" rc-code="0" op-status="0" interval="16000" last-rc-change="1687288268" exec-time="10" queue-time="0" op-digest="e62ad827732488b88ef8722ff6a484b3"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="bundle-a-2">
+ <instance_attributes id="status-bundle-a-2">
+ <nvpair id="status-bundle-a-2-master-bundle-a-rsc" name="master-bundle-a-rsc" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ </status>
+</cib>
diff --git a/cts/scheduler/xml/bundle-promoted-colocation-1.xml b/cts/scheduler/xml/bundle-promoted-colocation-1.xml
new file mode 100644
index 0000000..ff2a520
--- /dev/null
+++ b/cts/scheduler/xml/bundle-promoted-colocation-1.xml
@@ -0,0 +1,237 @@
+<cib crm_feature_set="3.17.4" validate-with="pacemaker-3.9" epoch="45" num_updates="6" admin_epoch="0" cib-last-written="Mon Jun 19 19:05:45 2023" update-origin="node1" update-client="cibadmin" update-user="root" have-quorum="1" dc-uuid="1">
+ <configuration>
+ <!-- The essential elements of this test are:
+ * A primitive (vip) is mandatorily colocated with the promoted instance
+ of a bundle (base-bundle)
+ * The primitive is running on a different node from the bundle's
+ promoted instance
+ * There is no stickiness
+ * There are no location constraints
+ * There are three nodes available for both resources
+
+ In this situation, the primitive should move to the same node as the
+ bundle's promoted instance.
+ -->
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.1.5-1.0a457786a.git.el9-0a457786a"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="test"/>
+ <nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1687217818"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="node1"/>
+ <node id="2" uname="node2"/>
+ <node id="3" uname="node3"/>
+ </nodes>
+ <resources>
+ <primitive class="stonith" id="Fencing" type="fence_xvm">
+ <instance_attributes id="Fencing-instance_attributes">
+ <nvpair id="Fencing-instance_attributes_pcmk_host_list" name="pcmk_host_list" value="node1 node2 node3"/>
+ </instance_attributes>
+ <operations>
+ <op id="Fencing-monitor-120s" interval="120s" name="monitor" timeout="120s"/>
+ </operations>
+ </primitive>
+ <bundle id="base-bundle">
+ <meta_attributes id="base-bundle-meta_attributes">
+ <nvpair id="base-bundle-meta_attributes-promotable" name="promotable" value="true"/>
+ <nvpair id="base-bundle-meta_attributes-container-attribute-target" name="container-attribute-target" value="host"/>
+ </meta_attributes>
+ <podman image="localhost/pcmktest" replicas="3"/>
+ <network control-port="3121"/>
+ <primitive id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <operations>
+ <op id="base-monitor-15s" interval="15s" name="monitor" timeout="15s" role="Promoted"/>
+ <op id="base-monitor-16s" interval="16s" name="monitor" timeout="16s" role="Unpromoted"/>
+ </operations>
+ </primitive>
+ </bundle>
+ <primitive class="ocf" id="vip" provider="heartbeat" type="IPaddr2">
+ <instance_attributes id="vip-instance_attributes">
+ <nvpair id="vip-instance_attributes-cidr_netmask" name="cidr_netmask" value="32"/>
+ <nvpair id="vip-instance_attributes-ip" name="ip" value="192.168.22.81"/>
+ </instance_attributes>
+ <operations>
+ <op id="vip-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/>
+ <op id="vip-start-interval-0s" interval="0s" name="start" timeout="20s"/>
+ <op id="vip-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
+ </operations>
+ </primitive>
+ </resources>
+ <constraints>
+ <rsc_colocation id="colocation-vip-base-bundle-INFINITY" rsc="vip" score="INFINITY" with-rsc="base-bundle" with-rsc-role="Promoted"/>
+ </constraints>
+ <fencing-topology/>
+ <op_defaults/>
+ <alerts/>
+ <rsc_defaults/>
+ </configuration>
+ <status>
+ <node_state id="1" uname="node1" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <transient_attributes id="1">
+ <instance_attributes id="status-1">
+ <nvpair id="status-1-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-1-master-base" name="master-base" value="11"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="1">
+ <lrm_resources>
+ <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_stop_0" operation="stop" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="12:62:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;12:62:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="55" rc-code="0" op-status="0" interval="0" last-rc-change="1687222219" exec-time="0" queue-time="0" op-digest="6197322ae276dfeb4a212d09787f9738"/>
+ <lrm_rsc_op id="Fencing_monitor_120000" operation_key="Fencing_monitor_120000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="2:23:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;2:23:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="38" rc-code="0" op-status="0" interval="120000" last-rc-change="1687217955" exec-time="43" queue-time="0" op-digest="8ca455a603cbe2dc8d68703e63c272f1"/>
+ </lrm_resource>
+ <lrm_resource id="vip" type="IPaddr2" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="vip_last_0" operation_key="vip_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="51:60:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;51:60:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="52" rc-code="0" op-status="0" interval="0" last-rc-change="1687222197" exec-time="61" queue-time="0" op-digest="b274efb9afd1400d58df73d9925b6823"/>
+ <lrm_rsc_op id="vip_monitor_10000" operation_key="vip_monitor_10000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="52:60:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;52:60:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="53" rc-code="0" op-status="0" interval="10000" last-rc-change="1687222197" exec-time="38" queue-time="0" op-digest="20f7173b4af9ab62392ae5d9e5243580"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-2" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-2_last_0" operation_key="base-bundle-podman-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="8:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;8:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="49" rc-code="7" op-status="0" interval="0" last-rc-change="1687219402" exec-time="103" queue-time="0" op-digest="eb1c148b6d2d7a7013f75d596536409b"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-1" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-1_last_0" operation_key="base-bundle-podman-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="6:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;6:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="45" rc-code="7" op-status="0" interval="0" last-rc-change="1687219402" exec-time="82" queue-time="0" op-digest="91ec5f49c5fd88f29405389545fb7d59"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-2" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-2">
+ <lrm_rsc_op id="base-bundle-2_last_0" operation_key="base-bundle-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="9:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;9:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="5" rc-code="7" op-status="0" interval="0" last-rc-change="1687219636" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-0" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-0_last_0" operation_key="base-bundle-podman-0_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="7:28:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;7:28:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="40" rc-code="0" op-status="0" interval="0" last-rc-change="1687218074" exec-time="795" queue-time="0" op-digest="b93578750303776f6bb098395c53338a"/>
+ <lrm_rsc_op id="base-bundle-podman-0_monitor_60000" operation_key="base-bundle-podman-0_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="8:29:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;8:29:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="41" rc-code="0" op-status="0" interval="60000" last-rc-change="1687218074" exec-time="166" queue-time="0" op-digest="902512fcf3e4556d9585c44184665d8c"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-1" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-1">
+ <lrm_rsc_op id="base-bundle-1_last_0" operation_key="base-bundle-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="7:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;7:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="4" rc-code="7" op-status="0" interval="0" last-rc-change="1687219403" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-0" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-0">
+ <lrm_rsc_op id="base-bundle-0_last_0" operation_key="base-bundle-0_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="9:29:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;9:29:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="2" rc-code="0" op-status="0" interval="0" last-rc-change="1687218074" exec-time="0" queue-time="0" op-digest="2b7683df7d64ff71ec5fd3675fd12017" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ <lrm_rsc_op id="base-bundle-0_monitor_30000" operation_key="base-bundle-0_monitor_30000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="11:30:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;11:30:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="3" rc-code="0" op-status="0" interval="30000" last-rc-change="1687218075" exec-time="0" queue-time="0" op-digest="6f3c7e233bacb8420fef5f9581190d00"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state id="2" uname="node2" crmd="online" crm-debug-origin="controld_update_resource_history" in_ccm="true" join="member" expected="member">
+ <transient_attributes id="2">
+ <instance_attributes id="status-2">
+ <nvpair id="status-2-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-2-master-base" name="master-base" value="12"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="2">
+ <lrm_resources>
+ <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="12:69:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;12:69:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="58" rc-code="0" op-status="0" interval="0" last-rc-change="1687226745" exec-time="33" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ <lrm_rsc_op id="Fencing_monitor_120000" operation_key="Fencing_monitor_120000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="4:69:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;4:69:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="59" rc-code="0" op-status="0" interval="120000" last-rc-change="1687226745" exec-time="27" queue-time="0" op-digest="24989640311980988fb77ddd1cc1002b"/>
+ </lrm_resource>
+ <lrm_resource id="vip" type="IPaddr2" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="vip_last_0" operation_key="vip_stop_0" operation="stop" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="50:60:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;50:60:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="53" rc-code="0" op-status="0" interval="0" last-rc-change="1687222197" exec-time="47" queue-time="0" op-digest="b274efb9afd1400d58df73d9925b6823"/>
+ <lrm_rsc_op id="vip_monitor_10000" operation_key="vip_monitor_10000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="26:23:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;26:23:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="37" rc-code="0" op-status="0" interval="10000" last-rc-change="1687217955" exec-time="42" queue-time="0" op-digest="20f7173b4af9ab62392ae5d9e5243580"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-2" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-2_last_0" operation_key="base-bundle-podman-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="12:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;12:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="47" rc-code="7" op-status="0" interval="0" last-rc-change="1687219402" exec-time="69" queue-time="1" op-digest="eb1c148b6d2d7a7013f75d596536409b"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-1" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-1_last_0" operation_key="base-bundle-podman-1_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="20:33:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;20:33:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="48" rc-code="0" op-status="0" interval="0" last-rc-change="1687219402" exec-time="665" queue-time="0" op-digest="91ec5f49c5fd88f29405389545fb7d59"/>
+ <lrm_rsc_op id="base-bundle-podman-1_monitor_60000" operation_key="base-bundle-podman-1_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="21:33:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;21:33:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="49" rc-code="0" op-status="0" interval="60000" last-rc-change="1687219403" exec-time="158" queue-time="0" op-digest="f0ef4729d120aa3f5d938cabca4d06c7"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-2" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-2">
+ <lrm_rsc_op id="base-bundle-2_last_0" operation_key="base-bundle-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="10:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;10:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="5" rc-code="7" op-status="0" interval="0" last-rc-change="1687219636" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-0" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-0_last_0" operation_key="base-bundle-podman-0_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="2:27:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;2:27:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="39" rc-code="0" op-status="0" interval="0" last-rc-change="1687217956" exec-time="161" queue-time="0" op-digest="bbac36b73a7a6604aefdd2cb3b5f42e6"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-1" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-1">
+ <lrm_rsc_op id="base-bundle-1_last_0" operation_key="base-bundle-1_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="22:33:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;22:33:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="3" rc-code="0" op-status="0" interval="0" last-rc-change="1687219403" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ <lrm_rsc_op id="base-bundle-1_monitor_30000" operation_key="base-bundle-1_monitor_30000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="20:34:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;20:34:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="4" rc-code="0" op-status="0" interval="30000" last-rc-change="1687219404" exec-time="0" queue-time="0" op-digest="3929eec440004bca31f813a8e6097506"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-0" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-0">
+ <lrm_rsc_op id="base-bundle-0_last_0" operation_key="base-bundle-0_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="3:7:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;3:7:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1687217701" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state remote_node="true" id="base-bundle-0" uname="base-bundle-0" in_ccm="true" crm-debug-origin="do_state_transition" node_fenced="0">
+ <lrm id="base-bundle-0">
+ <lrm_resources>
+ <lrm_resource id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="base_last_0" operation_key="base_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="16:30:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;16:30:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="8" rc-code="0" op-status="0" interval="0" last-rc-change="1687218075" exec-time="307" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="base_monitor_16000" operation_key="base_monitor_16000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="17:31:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;17:31:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="14" rc-code="0" op-status="0" interval="16000" last-rc-change="1687218076" exec-time="15" queue-time="0" op-digest="e62ad827732488b88ef8722ff6a484b3"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="base-bundle-0">
+ <instance_attributes id="status-base-bundle-0">
+ <nvpair id="status-base-bundle-0-master-base" name="master-base" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state id="3" uname="node3" crmd="online" crm-debug-origin="do_state_transition" in_ccm="true" join="member" expected="member">
+ <transient_attributes id="3">
+ <instance_attributes id="status-3">
+ <nvpair id="status-3-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-3-master-base" name="master-base" value="13"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="3">
+ <lrm_resources>
+ <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="11:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;11:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="8" rc-code="7" op-status="0" interval="0" last-rc-change="1687219634" exec-time="5" queue-time="0" op-digest="6197322ae276dfeb4a212d09787f9738"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-2" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-2_last_0" operation_key="base-bundle-podman-2_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="29:36:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;29:36:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="24" rc-code="0" op-status="0" interval="0" last-rc-change="1687219634" exec-time="936" queue-time="0" op-digest="eb1c148b6d2d7a7013f75d596536409b"/>
+ <lrm_rsc_op id="base-bundle-podman-2_monitor_60000" operation_key="base-bundle-podman-2_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="30:36:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;30:36:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="25" rc-code="0" op-status="0" interval="60000" last-rc-change="1687219635" exec-time="173" queue-time="0" op-digest="8eeca5a30b14f3d9ef7d2ddbd16c2e05"/>
+ </lrm_resource>
+ <lrm_resource id="vip" type="IPaddr2" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="vip_last_0" operation_key="vip_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="18:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;18:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="23" rc-code="7" op-status="0" interval="0" last-rc-change="1687219634" exec-time="71" queue-time="0" op-digest="b274efb9afd1400d58df73d9925b6823"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-1" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-1_last_0" operation_key="base-bundle-podman-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="14:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;14:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="14" rc-code="7" op-status="0" interval="0" last-rc-change="1687219634" exec-time="107" queue-time="0" op-digest="91ec5f49c5fd88f29405389545fb7d59"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-2" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-2">
+ <lrm_rsc_op id="base-bundle-2_last_0" operation_key="base-bundle-2_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="31:36:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;31:36:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="4" rc-code="0" op-status="0" interval="0" last-rc-change="1687219635" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ <lrm_rsc_op id="base-bundle-2_monitor_30000" operation_key="base-bundle-2_monitor_30000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="25:37:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;25:37:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="5" rc-code="0" op-status="0" interval="30000" last-rc-change="1687219637" exec-time="0" queue-time="0" op-digest="354b9acaa7ea1113d708dc11a1d6bbfa"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-0" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-0_last_0" operation_key="base-bundle-podman-0_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="12:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;12:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="10" rc-code="7" op-status="0" interval="0" last-rc-change="1687219634" exec-time="104" queue-time="0" op-digest="b93578750303776f6bb098395c53338a"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-1" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-1">
+ <lrm_rsc_op id="base-bundle-1_last_0" operation_key="base-bundle-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="15:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;15:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="2" rc-code="7" op-status="0" interval="0" last-rc-change="1687219634" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-0" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-0">
+ <lrm_rsc_op id="base-bundle-0_last_0" operation_key="base-bundle-0_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="13:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;13:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1687219634" exec-time="0" queue-time="0" op-digest="2b7683df7d64ff71ec5fd3675fd12017" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state remote_node="true" id="base-bundle-1" uname="base-bundle-1" in_ccm="true" crm-debug-origin="controld_update_resource_history" node_fenced="0">
+ <lrm id="base-bundle-1">
+ <lrm_resources>
+ <lrm_resource id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="base_last_0" operation_key="base_demote_0" operation="demote" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="35:55:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;35:55:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="86" rc-code="0" op-status="0" interval="0" last-rc-change="1687220328" exec-time="221" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="base_monitor_16000" operation_key="base_monitor_16000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="35:56:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;35:56:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="92" rc-code="0" op-status="0" interval="16000" last-rc-change="1687220329" exec-time="43" queue-time="0" op-digest="e62ad827732488b88ef8722ff6a484b3"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="base-bundle-1">
+ <instance_attributes id="status-base-bundle-1">
+ <nvpair id="status-base-bundle-1-master-base" name="master-base" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state remote_node="true" id="base-bundle-2" uname="base-bundle-2" in_ccm="true" crm-debug-origin="controld_update_resource_history" node_fenced="0">
+ <lrm id="base-bundle-2">
+ <lrm_resources>
+ <lrm_resource id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="base_last_0" operation_key="base_promote_0" operation="promote" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="32:43:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;32:43:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="30" rc-code="0" op-status="0" interval="0" last-rc-change="1687220058" exec-time="222" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="base_monitor_15000" operation_key="base_monitor_15000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="33:44:8:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:8;33:44:8:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="36" rc-code="8" op-status="0" interval="15000" last-rc-change="1687220059" exec-time="13" queue-time="0" op-digest="3ef575c5f050ae086f0f31bc8f085fdc"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="base-bundle-2">
+ <instance_attributes id="status-base-bundle-2">
+ <nvpair id="status-base-bundle-2-master-base" name="master-base" value="10"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ </status>
+</cib>
diff --git a/cts/scheduler/xml/bundle-promoted-colocation-2.xml b/cts/scheduler/xml/bundle-promoted-colocation-2.xml
new file mode 100644
index 0000000..cbef724
--- /dev/null
+++ b/cts/scheduler/xml/bundle-promoted-colocation-2.xml
@@ -0,0 +1,237 @@
+<cib crm_feature_set="3.17.4" validate-with="pacemaker-3.9" epoch="45" num_updates="6" admin_epoch="0" cib-last-written="Mon Jun 19 19:05:45 2023" update-origin="node1" update-client="cibadmin" update-user="root" have-quorum="1" dc-uuid="1">
+ <configuration>
+ <!-- The essential elements of this test are:
+ * A primitive (vip) is optionally colocated (score=5000) with the
+ promoted instance of a bundle (base-bundle)
+ * The primitive is running on a different node from the bundle's
+ promoted instance
+ * There is no stickiness
+ * There are no location constraints
+ * There are three nodes available for both resources
+
+ In this situation, the primitive should move to the same node as the
+ bundle's promoted instance.
+ -->
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.1.5-1.0a457786a.git.el9-0a457786a"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="test"/>
+ <nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1687217818"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="node1"/>
+ <node id="2" uname="node2"/>
+ <node id="3" uname="node3"/>
+ </nodes>
+ <resources>
+ <primitive class="stonith" id="Fencing" type="fence_xvm">
+ <instance_attributes id="Fencing-instance_attributes">
+ <nvpair id="Fencing-instance_attributes_pcmk_host_list" name="pcmk_host_list" value="node1 node2 node3"/>
+ </instance_attributes>
+ <operations>
+ <op id="Fencing-monitor-120s" interval="120s" name="monitor" timeout="120s"/>
+ </operations>
+ </primitive>
+ <bundle id="base-bundle">
+ <meta_attributes id="base-bundle-meta_attributes">
+ <nvpair id="base-bundle-meta_attributes-promotable" name="promotable" value="true"/>
+ <nvpair id="base-bundle-meta_attributes-container-attribute-target" name="container-attribute-target" value="host"/>
+ </meta_attributes>
+ <podman image="localhost/pcmktest" replicas="3"/>
+ <network control-port="3121"/>
+ <primitive id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <operations>
+ <op id="base-monitor-15s" interval="15s" name="monitor" timeout="15s" role="Promoted"/>
+ <op id="base-monitor-16s" interval="16s" name="monitor" timeout="16s" role="Unpromoted"/>
+ </operations>
+ </primitive>
+ </bundle>
+ <primitive class="ocf" id="vip" provider="heartbeat" type="IPaddr2">
+ <instance_attributes id="vip-instance_attributes">
+ <nvpair id="vip-instance_attributes-cidr_netmask" name="cidr_netmask" value="32"/>
+ <nvpair id="vip-instance_attributes-ip" name="ip" value="192.168.22.81"/>
+ </instance_attributes>
+ <operations>
+ <op id="vip-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/>
+ <op id="vip-start-interval-0s" interval="0s" name="start" timeout="20s"/>
+ <op id="vip-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
+ </operations>
+ </primitive>
+ </resources>
+ <constraints>
+ <rsc_colocation id="colocation-vip-base-bundle-5000" rsc="vip" score="5000" with-rsc="base-bundle" with-rsc-role="Promoted"/>
+ </constraints>
+ <fencing-topology/>
+ <op_defaults/>
+ <alerts/>
+ <rsc_defaults/>
+ </configuration>
+ <status>
+ <node_state id="1" uname="node1" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <transient_attributes id="1">
+ <instance_attributes id="status-1">
+ <nvpair id="status-1-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-1-master-base" name="master-base" value="11"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="1">
+ <lrm_resources>
+ <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_stop_0" operation="stop" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="12:62:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;12:62:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="55" rc-code="0" op-status="0" interval="0" last-rc-change="1687222219" exec-time="0" queue-time="0" op-digest="6197322ae276dfeb4a212d09787f9738"/>
+ <lrm_rsc_op id="Fencing_monitor_120000" operation_key="Fencing_monitor_120000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="2:23:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;2:23:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="38" rc-code="0" op-status="0" interval="120000" last-rc-change="1687217955" exec-time="43" queue-time="0" op-digest="8ca455a603cbe2dc8d68703e63c272f1"/>
+ </lrm_resource>
+ <lrm_resource id="vip" type="IPaddr2" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="vip_last_0" operation_key="vip_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="51:60:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;51:60:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="52" rc-code="0" op-status="0" interval="0" last-rc-change="1687222197" exec-time="61" queue-time="0" op-digest="b274efb9afd1400d58df73d9925b6823"/>
+ <lrm_rsc_op id="vip_monitor_10000" operation_key="vip_monitor_10000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="52:60:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;52:60:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="53" rc-code="0" op-status="0" interval="10000" last-rc-change="1687222197" exec-time="38" queue-time="0" op-digest="20f7173b4af9ab62392ae5d9e5243580"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-2" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-2_last_0" operation_key="base-bundle-podman-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="8:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;8:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="49" rc-code="7" op-status="0" interval="0" last-rc-change="1687219402" exec-time="103" queue-time="0" op-digest="eb1c148b6d2d7a7013f75d596536409b"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-1" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-1_last_0" operation_key="base-bundle-podman-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="6:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;6:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="45" rc-code="7" op-status="0" interval="0" last-rc-change="1687219402" exec-time="82" queue-time="0" op-digest="91ec5f49c5fd88f29405389545fb7d59"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-2" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-2">
+ <lrm_rsc_op id="base-bundle-2_last_0" operation_key="base-bundle-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="9:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;9:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="5" rc-code="7" op-status="0" interval="0" last-rc-change="1687219636" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-0" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-0_last_0" operation_key="base-bundle-podman-0_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="7:28:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;7:28:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="40" rc-code="0" op-status="0" interval="0" last-rc-change="1687218074" exec-time="795" queue-time="0" op-digest="b93578750303776f6bb098395c53338a"/>
+ <lrm_rsc_op id="base-bundle-podman-0_monitor_60000" operation_key="base-bundle-podman-0_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="8:29:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;8:29:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="41" rc-code="0" op-status="0" interval="60000" last-rc-change="1687218074" exec-time="166" queue-time="0" op-digest="902512fcf3e4556d9585c44184665d8c"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-1" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-1">
+ <lrm_rsc_op id="base-bundle-1_last_0" operation_key="base-bundle-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="7:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;7:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="4" rc-code="7" op-status="0" interval="0" last-rc-change="1687219403" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-0" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-0">
+ <lrm_rsc_op id="base-bundle-0_last_0" operation_key="base-bundle-0_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="9:29:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;9:29:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="2" rc-code="0" op-status="0" interval="0" last-rc-change="1687218074" exec-time="0" queue-time="0" op-digest="2b7683df7d64ff71ec5fd3675fd12017" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ <lrm_rsc_op id="base-bundle-0_monitor_30000" operation_key="base-bundle-0_monitor_30000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="11:30:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;11:30:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="3" rc-code="0" op-status="0" interval="30000" last-rc-change="1687218075" exec-time="0" queue-time="0" op-digest="6f3c7e233bacb8420fef5f9581190d00"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state id="2" uname="node2" crmd="online" crm-debug-origin="controld_update_resource_history" in_ccm="true" join="member" expected="member">
+ <transient_attributes id="2">
+ <instance_attributes id="status-2">
+ <nvpair id="status-2-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-2-master-base" name="master-base" value="12"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="2">
+ <lrm_resources>
+ <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="12:69:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;12:69:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="58" rc-code="0" op-status="0" interval="0" last-rc-change="1687226745" exec-time="33" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ <lrm_rsc_op id="Fencing_monitor_120000" operation_key="Fencing_monitor_120000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="4:69:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;4:69:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="59" rc-code="0" op-status="0" interval="120000" last-rc-change="1687226745" exec-time="27" queue-time="0" op-digest="24989640311980988fb77ddd1cc1002b"/>
+ </lrm_resource>
+ <lrm_resource id="vip" type="IPaddr2" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="vip_last_0" operation_key="vip_stop_0" operation="stop" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="50:60:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;50:60:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="53" rc-code="0" op-status="0" interval="0" last-rc-change="1687222197" exec-time="47" queue-time="0" op-digest="b274efb9afd1400d58df73d9925b6823"/>
+ <lrm_rsc_op id="vip_monitor_10000" operation_key="vip_monitor_10000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="26:23:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;26:23:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="37" rc-code="0" op-status="0" interval="10000" last-rc-change="1687217955" exec-time="42" queue-time="0" op-digest="20f7173b4af9ab62392ae5d9e5243580"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-2" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-2_last_0" operation_key="base-bundle-podman-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="12:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;12:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="47" rc-code="7" op-status="0" interval="0" last-rc-change="1687219402" exec-time="69" queue-time="1" op-digest="eb1c148b6d2d7a7013f75d596536409b"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-1" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-1_last_0" operation_key="base-bundle-podman-1_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="20:33:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;20:33:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="48" rc-code="0" op-status="0" interval="0" last-rc-change="1687219402" exec-time="665" queue-time="0" op-digest="91ec5f49c5fd88f29405389545fb7d59"/>
+ <lrm_rsc_op id="base-bundle-podman-1_monitor_60000" operation_key="base-bundle-podman-1_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="21:33:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;21:33:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="49" rc-code="0" op-status="0" interval="60000" last-rc-change="1687219403" exec-time="158" queue-time="0" op-digest="f0ef4729d120aa3f5d938cabca4d06c7"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-2" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-2">
+ <lrm_rsc_op id="base-bundle-2_last_0" operation_key="base-bundle-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="10:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;10:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="5" rc-code="7" op-status="0" interval="0" last-rc-change="1687219636" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-0" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-0_last_0" operation_key="base-bundle-podman-0_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="2:27:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;2:27:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="39" rc-code="0" op-status="0" interval="0" last-rc-change="1687217956" exec-time="161" queue-time="0" op-digest="bbac36b73a7a6604aefdd2cb3b5f42e6"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-1" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-1">
+ <lrm_rsc_op id="base-bundle-1_last_0" operation_key="base-bundle-1_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="22:33:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;22:33:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="3" rc-code="0" op-status="0" interval="0" last-rc-change="1687219403" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ <lrm_rsc_op id="base-bundle-1_monitor_30000" operation_key="base-bundle-1_monitor_30000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="20:34:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;20:34:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="4" rc-code="0" op-status="0" interval="30000" last-rc-change="1687219404" exec-time="0" queue-time="0" op-digest="3929eec440004bca31f813a8e6097506"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-0" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-0">
+ <lrm_rsc_op id="base-bundle-0_last_0" operation_key="base-bundle-0_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="3:7:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;3:7:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1687217701" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state remote_node="true" id="base-bundle-0" uname="base-bundle-0" in_ccm="true" crm-debug-origin="do_state_transition" node_fenced="0">
+ <lrm id="base-bundle-0">
+ <lrm_resources>
+ <lrm_resource id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="base_last_0" operation_key="base_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="16:30:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;16:30:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="8" rc-code="0" op-status="0" interval="0" last-rc-change="1687218075" exec-time="307" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="base_monitor_16000" operation_key="base_monitor_16000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="17:31:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;17:31:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="14" rc-code="0" op-status="0" interval="16000" last-rc-change="1687218076" exec-time="15" queue-time="0" op-digest="e62ad827732488b88ef8722ff6a484b3"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="base-bundle-0">
+ <instance_attributes id="status-base-bundle-0">
+ <nvpair id="status-base-bundle-0-master-base" name="master-base" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state id="3" uname="node3" crmd="online" crm-debug-origin="do_state_transition" in_ccm="true" join="member" expected="member">
+ <transient_attributes id="3">
+ <instance_attributes id="status-3">
+ <nvpair id="status-3-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-3-master-base" name="master-base" value="13"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="3">
+ <lrm_resources>
+ <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="11:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;11:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="8" rc-code="7" op-status="0" interval="0" last-rc-change="1687219634" exec-time="5" queue-time="0" op-digest="6197322ae276dfeb4a212d09787f9738"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-2" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-2_last_0" operation_key="base-bundle-podman-2_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="29:36:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;29:36:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="24" rc-code="0" op-status="0" interval="0" last-rc-change="1687219634" exec-time="936" queue-time="0" op-digest="eb1c148b6d2d7a7013f75d596536409b"/>
+ <lrm_rsc_op id="base-bundle-podman-2_monitor_60000" operation_key="base-bundle-podman-2_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="30:36:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;30:36:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="25" rc-code="0" op-status="0" interval="60000" last-rc-change="1687219635" exec-time="173" queue-time="0" op-digest="8eeca5a30b14f3d9ef7d2ddbd16c2e05"/>
+ </lrm_resource>
+ <lrm_resource id="vip" type="IPaddr2" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="vip_last_0" operation_key="vip_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="18:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;18:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="23" rc-code="7" op-status="0" interval="0" last-rc-change="1687219634" exec-time="71" queue-time="0" op-digest="b274efb9afd1400d58df73d9925b6823"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-1" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-1_last_0" operation_key="base-bundle-podman-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="14:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;14:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="14" rc-code="7" op-status="0" interval="0" last-rc-change="1687219634" exec-time="107" queue-time="0" op-digest="91ec5f49c5fd88f29405389545fb7d59"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-2" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-2">
+ <lrm_rsc_op id="base-bundle-2_last_0" operation_key="base-bundle-2_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="31:36:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;31:36:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="4" rc-code="0" op-status="0" interval="0" last-rc-change="1687219635" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ <lrm_rsc_op id="base-bundle-2_monitor_30000" operation_key="base-bundle-2_monitor_30000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="25:37:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;25:37:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="5" rc-code="0" op-status="0" interval="30000" last-rc-change="1687219637" exec-time="0" queue-time="0" op-digest="354b9acaa7ea1113d708dc11a1d6bbfa"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-0" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-0_last_0" operation_key="base-bundle-podman-0_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="12:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;12:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="10" rc-code="7" op-status="0" interval="0" last-rc-change="1687219634" exec-time="104" queue-time="0" op-digest="b93578750303776f6bb098395c53338a"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-1" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-1">
+ <lrm_rsc_op id="base-bundle-1_last_0" operation_key="base-bundle-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="15:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;15:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="2" rc-code="7" op-status="0" interval="0" last-rc-change="1687219634" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-0" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-0">
+ <lrm_rsc_op id="base-bundle-0_last_0" operation_key="base-bundle-0_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="13:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;13:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1687219634" exec-time="0" queue-time="0" op-digest="2b7683df7d64ff71ec5fd3675fd12017" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state remote_node="true" id="base-bundle-1" uname="base-bundle-1" in_ccm="true" crm-debug-origin="controld_update_resource_history" node_fenced="0">
+ <lrm id="base-bundle-1">
+ <lrm_resources>
+ <lrm_resource id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="base_last_0" operation_key="base_demote_0" operation="demote" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="35:55:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;35:55:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="86" rc-code="0" op-status="0" interval="0" last-rc-change="1687220328" exec-time="221" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="base_monitor_16000" operation_key="base_monitor_16000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="35:56:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;35:56:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="92" rc-code="0" op-status="0" interval="16000" last-rc-change="1687220329" exec-time="43" queue-time="0" op-digest="e62ad827732488b88ef8722ff6a484b3"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="base-bundle-1">
+ <instance_attributes id="status-base-bundle-1">
+ <nvpair id="status-base-bundle-1-master-base" name="master-base" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state remote_node="true" id="base-bundle-2" uname="base-bundle-2" in_ccm="true" crm-debug-origin="controld_update_resource_history" node_fenced="0">
+ <lrm id="base-bundle-2">
+ <lrm_resources>
+ <lrm_resource id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="base_last_0" operation_key="base_promote_0" operation="promote" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="32:43:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;32:43:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="30" rc-code="0" op-status="0" interval="0" last-rc-change="1687220058" exec-time="222" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="base_monitor_15000" operation_key="base_monitor_15000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="33:44:8:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:8;33:44:8:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="36" rc-code="8" op-status="0" interval="15000" last-rc-change="1687220059" exec-time="13" queue-time="0" op-digest="3ef575c5f050ae086f0f31bc8f085fdc"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="base-bundle-2">
+ <instance_attributes id="status-base-bundle-2">
+ <nvpair id="status-base-bundle-2-master-base" name="master-base" value="10"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ </status>
+</cib>
diff --git a/cts/scheduler/xml/bundle-promoted-colocation-3.xml b/cts/scheduler/xml/bundle-promoted-colocation-3.xml
new file mode 100644
index 0000000..94d5d1b
--- /dev/null
+++ b/cts/scheduler/xml/bundle-promoted-colocation-3.xml
@@ -0,0 +1,237 @@
+<cib crm_feature_set="3.17.4" validate-with="pacemaker-3.9" epoch="45" num_updates="6" admin_epoch="0" cib-last-written="Mon Jun 19 19:05:45 2023" update-origin="node1" update-client="cibadmin" update-user="root" have-quorum="1" dc-uuid="1">
+ <configuration>
+ <!-- The essential elements of this test are:
+ * The promoted instance of a bundle (base-bundle) is mandatorily
+ colocated with a primitive (vip)
+ * The bundle's promoted instance is running on a different node from
+ the primitive
+ * There is no stickiness
+ * There are no location constraints
+ * There are three nodes available for both resources
+
+ In this situation, the bundle should demote its currently promoted
+ instance and promote the instance on the primitive's node.
+ -->
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.1.5-1.0a457786a.git.el9-0a457786a"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="test"/>
+ <nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1687217818"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="node1"/>
+ <node id="2" uname="node2"/>
+ <node id="3" uname="node3"/>
+ </nodes>
+ <resources>
+ <primitive class="stonith" id="Fencing" type="fence_xvm">
+ <instance_attributes id="Fencing-instance_attributes">
+ <nvpair id="Fencing-instance_attributes_pcmk_host_list" name="pcmk_host_list" value="node1 node2 node3"/>
+ </instance_attributes>
+ <operations>
+ <op id="Fencing-monitor-120s" interval="120s" name="monitor" timeout="120s"/>
+ </operations>
+ </primitive>
+ <bundle id="base-bundle">
+ <meta_attributes id="base-bundle-meta_attributes">
+ <nvpair id="base-bundle-meta_attributes-promotable" name="promotable" value="true"/>
+ <nvpair id="base-bundle-meta_attributes-container-attribute-target" name="container-attribute-target" value="host"/>
+ </meta_attributes>
+ <podman image="localhost/pcmktest" replicas="3"/>
+ <network control-port="3121"/>
+ <primitive id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <operations>
+ <op id="base-monitor-15s" interval="15s" name="monitor" timeout="15s" role="Promoted"/>
+ <op id="base-monitor-16s" interval="16s" name="monitor" timeout="16s" role="Unpromoted"/>
+ </operations>
+ </primitive>
+ </bundle>
+ <primitive class="ocf" id="vip" provider="heartbeat" type="IPaddr2">
+ <instance_attributes id="vip-instance_attributes">
+ <nvpair id="vip-instance_attributes-cidr_netmask" name="cidr_netmask" value="32"/>
+ <nvpair id="vip-instance_attributes-ip" name="ip" value="192.168.22.81"/>
+ </instance_attributes>
+ <operations>
+ <op id="vip-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/>
+ <op id="vip-start-interval-0s" interval="0s" name="start" timeout="20s"/>
+ <op id="vip-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
+ </operations>
+ </primitive>
+ </resources>
+ <constraints>
+ <rsc_colocation id="colocation-base-bundle-vip-INFINITY" rsc="base-bundle" rsc-role="Promoted" score="INFINITY" with-rsc="vip"/>
+ </constraints>
+ <fencing-topology/>
+ <op_defaults/>
+ <alerts/>
+ <rsc_defaults/>
+ </configuration>
+ <status>
+ <node_state id="1" uname="node1" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <transient_attributes id="1">
+ <instance_attributes id="status-1">
+ <nvpair id="status-1-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-1-master-base" name="master-base" value="11"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="1">
+ <lrm_resources>
+ <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_stop_0" operation="stop" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="12:62:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;12:62:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="55" rc-code="0" op-status="0" interval="0" last-rc-change="1687222219" exec-time="0" queue-time="0" op-digest="6197322ae276dfeb4a212d09787f9738"/>
+ <lrm_rsc_op id="Fencing_monitor_120000" operation_key="Fencing_monitor_120000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="2:23:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;2:23:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="38" rc-code="0" op-status="0" interval="120000" last-rc-change="1687217955" exec-time="43" queue-time="0" op-digest="8ca455a603cbe2dc8d68703e63c272f1"/>
+ </lrm_resource>
+ <lrm_resource id="vip" type="IPaddr2" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="vip_last_0" operation_key="vip_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="51:60:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;51:60:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="52" rc-code="0" op-status="0" interval="0" last-rc-change="1687222197" exec-time="61" queue-time="0" op-digest="b274efb9afd1400d58df73d9925b6823"/>
+ <lrm_rsc_op id="vip_monitor_10000" operation_key="vip_monitor_10000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="52:60:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;52:60:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="53" rc-code="0" op-status="0" interval="10000" last-rc-change="1687222197" exec-time="38" queue-time="0" op-digest="20f7173b4af9ab62392ae5d9e5243580"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-2" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-2_last_0" operation_key="base-bundle-podman-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="8:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;8:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="49" rc-code="7" op-status="0" interval="0" last-rc-change="1687219402" exec-time="103" queue-time="0" op-digest="eb1c148b6d2d7a7013f75d596536409b"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-1" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-1_last_0" operation_key="base-bundle-podman-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="6:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;6:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="45" rc-code="7" op-status="0" interval="0" last-rc-change="1687219402" exec-time="82" queue-time="0" op-digest="91ec5f49c5fd88f29405389545fb7d59"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-2" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-2">
+ <lrm_rsc_op id="base-bundle-2_last_0" operation_key="base-bundle-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="9:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;9:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="5" rc-code="7" op-status="0" interval="0" last-rc-change="1687219636" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-0" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-0_last_0" operation_key="base-bundle-podman-0_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="7:28:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;7:28:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="40" rc-code="0" op-status="0" interval="0" last-rc-change="1687218074" exec-time="795" queue-time="0" op-digest="b93578750303776f6bb098395c53338a"/>
+ <lrm_rsc_op id="base-bundle-podman-0_monitor_60000" operation_key="base-bundle-podman-0_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="8:29:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;8:29:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="41" rc-code="0" op-status="0" interval="60000" last-rc-change="1687218074" exec-time="166" queue-time="0" op-digest="902512fcf3e4556d9585c44184665d8c"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-1" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-1">
+ <lrm_rsc_op id="base-bundle-1_last_0" operation_key="base-bundle-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="7:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;7:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="4" rc-code="7" op-status="0" interval="0" last-rc-change="1687219403" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-0" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-0">
+ <lrm_rsc_op id="base-bundle-0_last_0" operation_key="base-bundle-0_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="9:29:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;9:29:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="2" rc-code="0" op-status="0" interval="0" last-rc-change="1687218074" exec-time="0" queue-time="0" op-digest="2b7683df7d64ff71ec5fd3675fd12017" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ <lrm_rsc_op id="base-bundle-0_monitor_30000" operation_key="base-bundle-0_monitor_30000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="11:30:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;11:30:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="3" rc-code="0" op-status="0" interval="30000" last-rc-change="1687218075" exec-time="0" queue-time="0" op-digest="6f3c7e233bacb8420fef5f9581190d00"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state id="2" uname="node2" crmd="online" crm-debug-origin="controld_update_resource_history" in_ccm="true" join="member" expected="member">
+ <transient_attributes id="2">
+ <instance_attributes id="status-2">
+ <nvpair id="status-2-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-2-master-base" name="master-base" value="12"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="2">
+ <lrm_resources>
+ <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="12:69:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;12:69:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="58" rc-code="0" op-status="0" interval="0" last-rc-change="1687226745" exec-time="33" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ <lrm_rsc_op id="Fencing_monitor_120000" operation_key="Fencing_monitor_120000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="4:69:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;4:69:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="59" rc-code="0" op-status="0" interval="120000" last-rc-change="1687226745" exec-time="27" queue-time="0" op-digest="24989640311980988fb77ddd1cc1002b"/>
+ </lrm_resource>
+ <lrm_resource id="vip" type="IPaddr2" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="vip_last_0" operation_key="vip_stop_0" operation="stop" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="50:60:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;50:60:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="53" rc-code="0" op-status="0" interval="0" last-rc-change="1687222197" exec-time="47" queue-time="0" op-digest="b274efb9afd1400d58df73d9925b6823"/>
+ <lrm_rsc_op id="vip_monitor_10000" operation_key="vip_monitor_10000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="26:23:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;26:23:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="37" rc-code="0" op-status="0" interval="10000" last-rc-change="1687217955" exec-time="42" queue-time="0" op-digest="20f7173b4af9ab62392ae5d9e5243580"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-2" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-2_last_0" operation_key="base-bundle-podman-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="12:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;12:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="47" rc-code="7" op-status="0" interval="0" last-rc-change="1687219402" exec-time="69" queue-time="1" op-digest="eb1c148b6d2d7a7013f75d596536409b"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-1" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-1_last_0" operation_key="base-bundle-podman-1_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="20:33:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;20:33:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="48" rc-code="0" op-status="0" interval="0" last-rc-change="1687219402" exec-time="665" queue-time="0" op-digest="91ec5f49c5fd88f29405389545fb7d59"/>
+ <lrm_rsc_op id="base-bundle-podman-1_monitor_60000" operation_key="base-bundle-podman-1_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="21:33:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;21:33:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="49" rc-code="0" op-status="0" interval="60000" last-rc-change="1687219403" exec-time="158" queue-time="0" op-digest="f0ef4729d120aa3f5d938cabca4d06c7"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-2" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-2">
+ <lrm_rsc_op id="base-bundle-2_last_0" operation_key="base-bundle-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="10:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;10:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="5" rc-code="7" op-status="0" interval="0" last-rc-change="1687219636" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-0" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-0_last_0" operation_key="base-bundle-podman-0_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="2:27:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;2:27:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="39" rc-code="0" op-status="0" interval="0" last-rc-change="1687217956" exec-time="161" queue-time="0" op-digest="bbac36b73a7a6604aefdd2cb3b5f42e6"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-1" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-1">
+ <lrm_rsc_op id="base-bundle-1_last_0" operation_key="base-bundle-1_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="22:33:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;22:33:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="3" rc-code="0" op-status="0" interval="0" last-rc-change="1687219403" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ <lrm_rsc_op id="base-bundle-1_monitor_30000" operation_key="base-bundle-1_monitor_30000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="20:34:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;20:34:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="4" rc-code="0" op-status="0" interval="30000" last-rc-change="1687219404" exec-time="0" queue-time="0" op-digest="3929eec440004bca31f813a8e6097506"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-0" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-0">
+ <lrm_rsc_op id="base-bundle-0_last_0" operation_key="base-bundle-0_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="3:7:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;3:7:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1687217701" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state remote_node="true" id="base-bundle-0" uname="base-bundle-0" in_ccm="true" crm-debug-origin="do_state_transition" node_fenced="0">
+ <lrm id="base-bundle-0">
+ <lrm_resources>
+ <lrm_resource id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="base_last_0" operation_key="base_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="16:30:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;16:30:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="8" rc-code="0" op-status="0" interval="0" last-rc-change="1687218075" exec-time="307" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="base_monitor_16000" operation_key="base_monitor_16000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="17:31:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;17:31:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="14" rc-code="0" op-status="0" interval="16000" last-rc-change="1687218076" exec-time="15" queue-time="0" op-digest="e62ad827732488b88ef8722ff6a484b3"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="base-bundle-0">
+ <instance_attributes id="status-base-bundle-0">
+ <nvpair id="status-base-bundle-0-master-base" name="master-base" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state id="3" uname="node3" crmd="online" crm-debug-origin="do_state_transition" in_ccm="true" join="member" expected="member">
+ <transient_attributes id="3">
+ <instance_attributes id="status-3">
+ <nvpair id="status-3-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-3-master-base" name="master-base" value="13"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="3">
+ <lrm_resources>
+ <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="11:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;11:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="8" rc-code="7" op-status="0" interval="0" last-rc-change="1687219634" exec-time="5" queue-time="0" op-digest="6197322ae276dfeb4a212d09787f9738"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-2" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-2_last_0" operation_key="base-bundle-podman-2_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="29:36:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;29:36:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="24" rc-code="0" op-status="0" interval="0" last-rc-change="1687219634" exec-time="936" queue-time="0" op-digest="eb1c148b6d2d7a7013f75d596536409b"/>
+ <lrm_rsc_op id="base-bundle-podman-2_monitor_60000" operation_key="base-bundle-podman-2_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="30:36:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;30:36:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="25" rc-code="0" op-status="0" interval="60000" last-rc-change="1687219635" exec-time="173" queue-time="0" op-digest="8eeca5a30b14f3d9ef7d2ddbd16c2e05"/>
+ </lrm_resource>
+ <lrm_resource id="vip" type="IPaddr2" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="vip_last_0" operation_key="vip_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="18:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;18:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="23" rc-code="7" op-status="0" interval="0" last-rc-change="1687219634" exec-time="71" queue-time="0" op-digest="b274efb9afd1400d58df73d9925b6823"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-1" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-1_last_0" operation_key="base-bundle-podman-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="14:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;14:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="14" rc-code="7" op-status="0" interval="0" last-rc-change="1687219634" exec-time="107" queue-time="0" op-digest="91ec5f49c5fd88f29405389545fb7d59"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-2" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-2">
+ <lrm_rsc_op id="base-bundle-2_last_0" operation_key="base-bundle-2_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="31:36:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;31:36:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="4" rc-code="0" op-status="0" interval="0" last-rc-change="1687219635" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ <lrm_rsc_op id="base-bundle-2_monitor_30000" operation_key="base-bundle-2_monitor_30000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="25:37:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;25:37:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="5" rc-code="0" op-status="0" interval="30000" last-rc-change="1687219637" exec-time="0" queue-time="0" op-digest="354b9acaa7ea1113d708dc11a1d6bbfa"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-0" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-0_last_0" operation_key="base-bundle-podman-0_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="12:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;12:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="10" rc-code="7" op-status="0" interval="0" last-rc-change="1687219634" exec-time="104" queue-time="0" op-digest="b93578750303776f6bb098395c53338a"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-1" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-1">
+ <lrm_rsc_op id="base-bundle-1_last_0" operation_key="base-bundle-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="15:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;15:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="2" rc-code="7" op-status="0" interval="0" last-rc-change="1687219634" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-0" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-0">
+ <lrm_rsc_op id="base-bundle-0_last_0" operation_key="base-bundle-0_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="13:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;13:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1687219634" exec-time="0" queue-time="0" op-digest="2b7683df7d64ff71ec5fd3675fd12017" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state remote_node="true" id="base-bundle-1" uname="base-bundle-1" in_ccm="true" crm-debug-origin="controld_update_resource_history" node_fenced="0">
+ <lrm id="base-bundle-1">
+ <lrm_resources>
+ <lrm_resource id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="base_last_0" operation_key="base_demote_0" operation="demote" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="35:55:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;35:55:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="86" rc-code="0" op-status="0" interval="0" last-rc-change="1687220328" exec-time="221" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="base_monitor_16000" operation_key="base_monitor_16000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="35:56:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;35:56:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="92" rc-code="0" op-status="0" interval="16000" last-rc-change="1687220329" exec-time="43" queue-time="0" op-digest="e62ad827732488b88ef8722ff6a484b3"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="base-bundle-1">
+ <instance_attributes id="status-base-bundle-1">
+ <nvpair id="status-base-bundle-1-master-base" name="master-base" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state remote_node="true" id="base-bundle-2" uname="base-bundle-2" in_ccm="true" crm-debug-origin="controld_update_resource_history" node_fenced="0">
+ <lrm id="base-bundle-2">
+ <lrm_resources>
+ <lrm_resource id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="base_last_0" operation_key="base_promote_0" operation="promote" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="32:43:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;32:43:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="30" rc-code="0" op-status="0" interval="0" last-rc-change="1687220058" exec-time="222" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="base_monitor_15000" operation_key="base_monitor_15000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="33:44:8:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:8;33:44:8:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="36" rc-code="8" op-status="0" interval="15000" last-rc-change="1687220059" exec-time="13" queue-time="0" op-digest="3ef575c5f050ae086f0f31bc8f085fdc"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="base-bundle-2">
+ <instance_attributes id="status-base-bundle-2">
+ <nvpair id="status-base-bundle-2-master-base" name="master-base" value="10"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ </status>
+</cib>
diff --git a/cts/scheduler/xml/bundle-promoted-colocation-4.xml b/cts/scheduler/xml/bundle-promoted-colocation-4.xml
new file mode 100644
index 0000000..4739472
--- /dev/null
+++ b/cts/scheduler/xml/bundle-promoted-colocation-4.xml
@@ -0,0 +1,237 @@
+<cib crm_feature_set="3.17.4" validate-with="pacemaker-3.9" epoch="45" num_updates="6" admin_epoch="0" cib-last-written="Mon Jun 19 19:05:45 2023" update-origin="node1" update-client="cibadmin" update-user="root" have-quorum="1" dc-uuid="1">
+ <configuration>
+ <!-- The essential elements of this test are:
+ * The promoted instance of a bundle (base-bundle) is optionally
+ colocated (score=5000) with a primitive (vip)
+ * The bundle's promoted instance is running on a different node from
+ the primitive
+ * There is no stickiness
+ * There are no location constraints
+ * There are three nodes available for both resources
+
+ In this situation, the bundle should demote its currently promoted
+ instance and promote the instance on the primitive's node.
+ -->
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.1.5-1.0a457786a.git.el9-0a457786a"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="test"/>
+ <nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1687217818"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="node1"/>
+ <node id="2" uname="node2"/>
+ <node id="3" uname="node3"/>
+ </nodes>
+ <resources>
+ <primitive class="stonith" id="Fencing" type="fence_xvm">
+ <instance_attributes id="Fencing-instance_attributes">
+ <nvpair id="Fencing-instance_attributes_pcmk_host_list" name="pcmk_host_list" value="node1 node2 node3"/>
+ </instance_attributes>
+ <operations>
+ <op id="Fencing-monitor-120s" interval="120s" name="monitor" timeout="120s"/>
+ </operations>
+ </primitive>
+ <bundle id="base-bundle">
+ <meta_attributes id="base-bundle-meta_attributes">
+ <nvpair id="base-bundle-meta_attributes-promotable" name="promotable" value="true"/>
+ <nvpair id="base-bundle-meta_attributes-container-attribute-target" name="container-attribute-target" value="host"/>
+ </meta_attributes>
+ <podman image="localhost/pcmktest" replicas="3"/>
+ <network control-port="3121"/>
+ <primitive id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <operations>
+ <op id="base-monitor-15s" interval="15s" name="monitor" timeout="15s" role="Promoted"/>
+ <op id="base-monitor-16s" interval="16s" name="monitor" timeout="16s" role="Unpromoted"/>
+ </operations>
+ </primitive>
+ </bundle>
+ <primitive class="ocf" id="vip" provider="heartbeat" type="IPaddr2">
+ <instance_attributes id="vip-instance_attributes">
+ <nvpair id="vip-instance_attributes-cidr_netmask" name="cidr_netmask" value="32"/>
+ <nvpair id="vip-instance_attributes-ip" name="ip" value="192.168.22.81"/>
+ </instance_attributes>
+ <operations>
+ <op id="vip-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/>
+ <op id="vip-start-interval-0s" interval="0s" name="start" timeout="20s"/>
+ <op id="vip-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
+ </operations>
+ </primitive>
+ </resources>
+ <constraints>
+ <rsc_colocation id="colocation-base-bundle-vip-5000" rsc="base-bundle" rsc-role="Promoted" score="5000" with-rsc="vip"/>
+ </constraints>
+ <fencing-topology/>
+ <op_defaults/>
+ <alerts/>
+ <rsc_defaults/>
+ </configuration>
+ <status>
+ <node_state id="1" uname="node1" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <transient_attributes id="1">
+ <instance_attributes id="status-1">
+ <nvpair id="status-1-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-1-master-base" name="master-base" value="11"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="1">
+ <lrm_resources>
+ <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_stop_0" operation="stop" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="12:62:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;12:62:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="55" rc-code="0" op-status="0" interval="0" last-rc-change="1687222219" exec-time="0" queue-time="0" op-digest="6197322ae276dfeb4a212d09787f9738"/>
+ <lrm_rsc_op id="Fencing_monitor_120000" operation_key="Fencing_monitor_120000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="2:23:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;2:23:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="38" rc-code="0" op-status="0" interval="120000" last-rc-change="1687217955" exec-time="43" queue-time="0" op-digest="8ca455a603cbe2dc8d68703e63c272f1"/>
+ </lrm_resource>
+ <lrm_resource id="vip" type="IPaddr2" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="vip_last_0" operation_key="vip_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="51:60:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;51:60:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="52" rc-code="0" op-status="0" interval="0" last-rc-change="1687222197" exec-time="61" queue-time="0" op-digest="b274efb9afd1400d58df73d9925b6823"/>
+ <lrm_rsc_op id="vip_monitor_10000" operation_key="vip_monitor_10000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="52:60:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;52:60:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="53" rc-code="0" op-status="0" interval="10000" last-rc-change="1687222197" exec-time="38" queue-time="0" op-digest="20f7173b4af9ab62392ae5d9e5243580"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-2" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-2_last_0" operation_key="base-bundle-podman-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="8:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;8:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="49" rc-code="7" op-status="0" interval="0" last-rc-change="1687219402" exec-time="103" queue-time="0" op-digest="eb1c148b6d2d7a7013f75d596536409b"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-1" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-1_last_0" operation_key="base-bundle-podman-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="6:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;6:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="45" rc-code="7" op-status="0" interval="0" last-rc-change="1687219402" exec-time="82" queue-time="0" op-digest="91ec5f49c5fd88f29405389545fb7d59"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-2" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-2">
+ <lrm_rsc_op id="base-bundle-2_last_0" operation_key="base-bundle-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="9:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;9:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="5" rc-code="7" op-status="0" interval="0" last-rc-change="1687219636" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-0" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-0_last_0" operation_key="base-bundle-podman-0_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="7:28:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;7:28:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="40" rc-code="0" op-status="0" interval="0" last-rc-change="1687218074" exec-time="795" queue-time="0" op-digest="b93578750303776f6bb098395c53338a"/>
+ <lrm_rsc_op id="base-bundle-podman-0_monitor_60000" operation_key="base-bundle-podman-0_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="8:29:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;8:29:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="41" rc-code="0" op-status="0" interval="60000" last-rc-change="1687218074" exec-time="166" queue-time="0" op-digest="902512fcf3e4556d9585c44184665d8c"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-1" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-1">
+ <lrm_rsc_op id="base-bundle-1_last_0" operation_key="base-bundle-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="7:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;7:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="4" rc-code="7" op-status="0" interval="0" last-rc-change="1687219403" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-0" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-0">
+ <lrm_rsc_op id="base-bundle-0_last_0" operation_key="base-bundle-0_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="9:29:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;9:29:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="2" rc-code="0" op-status="0" interval="0" last-rc-change="1687218074" exec-time="0" queue-time="0" op-digest="2b7683df7d64ff71ec5fd3675fd12017" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ <lrm_rsc_op id="base-bundle-0_monitor_30000" operation_key="base-bundle-0_monitor_30000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="11:30:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;11:30:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="3" rc-code="0" op-status="0" interval="30000" last-rc-change="1687218075" exec-time="0" queue-time="0" op-digest="6f3c7e233bacb8420fef5f9581190d00"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state id="2" uname="node2" crmd="online" crm-debug-origin="controld_update_resource_history" in_ccm="true" join="member" expected="member">
+ <transient_attributes id="2">
+ <instance_attributes id="status-2">
+ <nvpair id="status-2-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-2-master-base" name="master-base" value="12"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="2">
+ <lrm_resources>
+ <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="12:69:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;12:69:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="58" rc-code="0" op-status="0" interval="0" last-rc-change="1687226745" exec-time="33" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ <lrm_rsc_op id="Fencing_monitor_120000" operation_key="Fencing_monitor_120000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="4:69:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;4:69:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="59" rc-code="0" op-status="0" interval="120000" last-rc-change="1687226745" exec-time="27" queue-time="0" op-digest="24989640311980988fb77ddd1cc1002b"/>
+ </lrm_resource>
+ <lrm_resource id="vip" type="IPaddr2" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="vip_last_0" operation_key="vip_stop_0" operation="stop" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="50:60:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;50:60:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="53" rc-code="0" op-status="0" interval="0" last-rc-change="1687222197" exec-time="47" queue-time="0" op-digest="b274efb9afd1400d58df73d9925b6823"/>
+ <lrm_rsc_op id="vip_monitor_10000" operation_key="vip_monitor_10000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="26:23:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;26:23:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="37" rc-code="0" op-status="0" interval="10000" last-rc-change="1687217955" exec-time="42" queue-time="0" op-digest="20f7173b4af9ab62392ae5d9e5243580"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-2" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-2_last_0" operation_key="base-bundle-podman-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="12:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;12:33:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="47" rc-code="7" op-status="0" interval="0" last-rc-change="1687219402" exec-time="69" queue-time="1" op-digest="eb1c148b6d2d7a7013f75d596536409b"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-1" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-1_last_0" operation_key="base-bundle-podman-1_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="20:33:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;20:33:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="48" rc-code="0" op-status="0" interval="0" last-rc-change="1687219402" exec-time="665" queue-time="0" op-digest="91ec5f49c5fd88f29405389545fb7d59"/>
+ <lrm_rsc_op id="base-bundle-podman-1_monitor_60000" operation_key="base-bundle-podman-1_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="21:33:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;21:33:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="49" rc-code="0" op-status="0" interval="60000" last-rc-change="1687219403" exec-time="158" queue-time="0" op-digest="f0ef4729d120aa3f5d938cabca4d06c7"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-2" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-2">
+ <lrm_rsc_op id="base-bundle-2_last_0" operation_key="base-bundle-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="10:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;10:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="5" rc-code="7" op-status="0" interval="0" last-rc-change="1687219636" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-0" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-0_last_0" operation_key="base-bundle-podman-0_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="2:27:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;2:27:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="39" rc-code="0" op-status="0" interval="0" last-rc-change="1687217956" exec-time="161" queue-time="0" op-digest="bbac36b73a7a6604aefdd2cb3b5f42e6"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-1" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-1">
+ <lrm_rsc_op id="base-bundle-1_last_0" operation_key="base-bundle-1_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="22:33:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;22:33:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="3" rc-code="0" op-status="0" interval="0" last-rc-change="1687219403" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ <lrm_rsc_op id="base-bundle-1_monitor_30000" operation_key="base-bundle-1_monitor_30000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="20:34:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;20:34:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="4" rc-code="0" op-status="0" interval="30000" last-rc-change="1687219404" exec-time="0" queue-time="0" op-digest="3929eec440004bca31f813a8e6097506"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-0" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-0">
+ <lrm_rsc_op id="base-bundle-0_last_0" operation_key="base-bundle-0_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="3:7:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;3:7:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1687217701" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state remote_node="true" id="base-bundle-0" uname="base-bundle-0" in_ccm="true" crm-debug-origin="do_state_transition" node_fenced="0">
+ <lrm id="base-bundle-0">
+ <lrm_resources>
+ <lrm_resource id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="base_last_0" operation_key="base_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="16:30:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;16:30:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="8" rc-code="0" op-status="0" interval="0" last-rc-change="1687218075" exec-time="307" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="base_monitor_16000" operation_key="base_monitor_16000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="17:31:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;17:31:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="14" rc-code="0" op-status="0" interval="16000" last-rc-change="1687218076" exec-time="15" queue-time="0" op-digest="e62ad827732488b88ef8722ff6a484b3"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="base-bundle-0">
+ <instance_attributes id="status-base-bundle-0">
+ <nvpair id="status-base-bundle-0-master-base" name="master-base" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state id="3" uname="node3" crmd="online" crm-debug-origin="do_state_transition" in_ccm="true" join="member" expected="member">
+ <transient_attributes id="3">
+ <instance_attributes id="status-3">
+ <nvpair id="status-3-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-3-master-base" name="master-base" value="13"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="3">
+ <lrm_resources>
+ <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="11:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;11:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="8" rc-code="7" op-status="0" interval="0" last-rc-change="1687219634" exec-time="5" queue-time="0" op-digest="6197322ae276dfeb4a212d09787f9738"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-2" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-2_last_0" operation_key="base-bundle-podman-2_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="29:36:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;29:36:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="24" rc-code="0" op-status="0" interval="0" last-rc-change="1687219634" exec-time="936" queue-time="0" op-digest="eb1c148b6d2d7a7013f75d596536409b"/>
+ <lrm_rsc_op id="base-bundle-podman-2_monitor_60000" operation_key="base-bundle-podman-2_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="30:36:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;30:36:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="25" rc-code="0" op-status="0" interval="60000" last-rc-change="1687219635" exec-time="173" queue-time="0" op-digest="8eeca5a30b14f3d9ef7d2ddbd16c2e05"/>
+ </lrm_resource>
+ <lrm_resource id="vip" type="IPaddr2" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="vip_last_0" operation_key="vip_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="18:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;18:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="23" rc-code="7" op-status="0" interval="0" last-rc-change="1687219634" exec-time="71" queue-time="0" op-digest="b274efb9afd1400d58df73d9925b6823"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-1" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-1_last_0" operation_key="base-bundle-podman-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="14:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;14:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="14" rc-code="7" op-status="0" interval="0" last-rc-change="1687219634" exec-time="107" queue-time="0" op-digest="91ec5f49c5fd88f29405389545fb7d59"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-2" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-2">
+ <lrm_rsc_op id="base-bundle-2_last_0" operation_key="base-bundle-2_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="31:36:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;31:36:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="4" rc-code="0" op-status="0" interval="0" last-rc-change="1687219635" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ <lrm_rsc_op id="base-bundle-2_monitor_30000" operation_key="base-bundle-2_monitor_30000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="25:37:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;25:37:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="5" rc-code="0" op-status="0" interval="30000" last-rc-change="1687219637" exec-time="0" queue-time="0" op-digest="354b9acaa7ea1113d708dc11a1d6bbfa"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-0" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="base-bundle-podman-0_last_0" operation_key="base-bundle-podman-0_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="12:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;12:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="10" rc-code="7" op-status="0" interval="0" last-rc-change="1687219634" exec-time="104" queue-time="0" op-digest="b93578750303776f6bb098395c53338a"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-1" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-1">
+ <lrm_rsc_op id="base-bundle-1_last_0" operation_key="base-bundle-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="15:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;15:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="2" rc-code="7" op-status="0" interval="0" last-rc-change="1687219634" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-0" type="remote" class="ocf" provider="pacemaker" container="base-bundle-podman-0">
+ <lrm_rsc_op id="base-bundle-0_last_0" operation_key="base-bundle-0_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="13:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;13:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1687219634" exec-time="0" queue-time="0" op-digest="2b7683df7d64ff71ec5fd3675fd12017" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state remote_node="true" id="base-bundle-1" uname="base-bundle-1" in_ccm="true" crm-debug-origin="controld_update_resource_history" node_fenced="0">
+ <lrm id="base-bundle-1">
+ <lrm_resources>
+ <lrm_resource id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="base_last_0" operation_key="base_demote_0" operation="demote" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="35:55:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;35:55:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="86" rc-code="0" op-status="0" interval="0" last-rc-change="1687220328" exec-time="221" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="base_monitor_16000" operation_key="base_monitor_16000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="35:56:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;35:56:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="92" rc-code="0" op-status="0" interval="16000" last-rc-change="1687220329" exec-time="43" queue-time="0" op-digest="e62ad827732488b88ef8722ff6a484b3"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="base-bundle-1">
+ <instance_attributes id="status-base-bundle-1">
+ <nvpair id="status-base-bundle-1-master-base" name="master-base" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state remote_node="true" id="base-bundle-2" uname="base-bundle-2" in_ccm="true" crm-debug-origin="controld_update_resource_history" node_fenced="0">
+ <lrm id="base-bundle-2">
+ <lrm_resources>
+ <lrm_resource id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="base_last_0" operation_key="base_promote_0" operation="promote" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="32:43:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;32:43:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="30" rc-code="0" op-status="0" interval="0" last-rc-change="1687220058" exec-time="222" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="base_monitor_15000" operation_key="base_monitor_15000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="33:44:8:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:8;33:44:8:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="36" rc-code="8" op-status="0" interval="15000" last-rc-change="1687220059" exec-time="13" queue-time="0" op-digest="3ef575c5f050ae086f0f31bc8f085fdc"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="base-bundle-2">
+ <instance_attributes id="status-base-bundle-2">
+ <nvpair id="status-base-bundle-2-master-base" name="master-base" value="10"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ </status>
+</cib>
diff --git a/cts/scheduler/xml/bundle-promoted-colocation-5.xml b/cts/scheduler/xml/bundle-promoted-colocation-5.xml
new file mode 100644
index 0000000..76367d7
--- /dev/null
+++ b/cts/scheduler/xml/bundle-promoted-colocation-5.xml
@@ -0,0 +1,367 @@
+<cib crm_feature_set="3.17.4" validate-with="pacemaker-3.9" epoch="56" num_updates="140" admin_epoch="0" cib-last-written="Tue Jun 20 12:12:10 2023" update-origin="node1" update-client="crmd" update-user="hacluster" have-quorum="1" dc-uuid="1">
+ <configuration>
+ <!-- The essential elements of this test are:
+ * The promoted instance of a bundle (bundle-a) is mandatorily colocated
+ with the promoted instance of another bundle (bundle-b)
+ * bundle-a's promoted instance is running on a different node from
+ bundle-b's promoted instance
+ * There is no stickiness
+ * There are no location constraints
+ * There are three nodes available for both resources
+
+ In this situation, bundle-a should demote its currently promoted
+ instance and promote the instance on the node where bundle-b is
+ promoted.
+ -->
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.1.5-1.0a457786a.git.el9-0a457786a"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="test"/>
+ <nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1687288330"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="node1"/>
+ <node id="2" uname="node2"/>
+ <node id="3" uname="node3"/>
+ </nodes>
+ <resources>
+ <primitive class="stonith" id="Fencing" type="fence_xvm">
+ <instance_attributes id="Fencing-instance_attributes">
+ <nvpair id="Fencing-instance_attributes_pcmk_host_list" name="pcmk_host_list" value="node1 node2 node3"/>
+ </instance_attributes>
+ <operations>
+ <op id="Fencing-monitor-120s" interval="120s" name="monitor" timeout="120s"/>
+ </operations>
+ </primitive>
+ <bundle id="bundle-a">
+ <meta_attributes id="bundle-a-meta_attributes">
+ <nvpair id="bundle-a-meta_attributes-promotable" name="promotable" value="true"/>
+ <nvpair id="bundle-a-meta_attributes-container-attribute-target" name="container-attribute-target" value="host"/>
+ </meta_attributes>
+ <podman image="localhost/pcmktest" replicas="3"/>
+ <network control-port="3121"/>
+ <primitive id="bundle-a-rsc" class="ocf" provider="pacemaker" type="Stateful">
+ <operations>
+ <op id="bundle-a-rsc-monitor-15s" interval="15s" name="monitor" timeout="15s" role="Promoted"/>
+ <op id="bundle-a-rsc-monitor-16s" interval="16s" name="monitor" timeout="16s" role="Unpromoted"/>
+ </operations>
+ </primitive>
+ </bundle>
+ <bundle id="bundle-b">
+ <meta_attributes id="bundle-b-meta_attributes">
+ <nvpair id="bundle-b-meta_attributes-promotable" name="promotable" value="true"/>
+ <nvpair id="bundle-b-meta_attributes-container-attribute-target" name="container-attribute-target" value="host"/>
+ </meta_attributes>
+ <podman image="localhost/pcmktest" replicas="3"/>
+ <network control-port="3122"/>
+ <primitive id="bundle-b-rsc" class="ocf" provider="pacemaker" type="Stateful">
+ <operations>
+ <op id="bundle-b-rsc-monitor-15s" interval="15s" name="monitor" timeout="15s" role="Promoted"/>
+ <op id="bundle-b-rsc-monitor-16s" interval="16s" name="monitor" timeout="16s" role="Unpromoted"/>
+ </operations>
+ </primitive>
+ </bundle>
+ </resources>
+ <constraints>
+ <rsc_colocation id="colocation-bundle-a-bundle-b-INFINITY" rsc="bundle-a" rsc-role="Promoted" score="INFINITY" with-rsc="bundle-b" with-rsc-role="Promoted"/>
+ </constraints>
+ <fencing-topology/>
+ <op_defaults/>
+ <alerts/>
+ <rsc_defaults/>
+ </configuration>
+ <status>
+ <node_state id="1" uname="node1" in_ccm="true" crmd="online" crm-debug-origin="do_state_transition" join="member" expected="member">
+ <transient_attributes id="1">
+ <instance_attributes id="status-1">
+ <nvpair id="status-1-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-1-promotion-bundle-a-rsc" name="master-bundle-a-rsc" value="11"/>
+ <nvpair id="status-1-promotion-bundle-b-rsc" name="master-bundle-b-rsc" value="12"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="1">
+ <lrm_resources>
+ <lrm_resource id="bundle-b-podman-2" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-b-podman-2_last_0" operation_key="bundle-b-podman-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="4:154:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;4:154:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="132" rc-code="7" op-status="0" interval="0" last-rc-change="1687288263" exec-time="237" queue-time="0" op-digest="4e4b17530bea4f6fd27df1cf68bd9f22"/>
+ </lrm_resource>
+ <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="12:62:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;12:62:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="55" rc-code="0" op-status="0" interval="0" last-rc-change="1687222219" exec-time="0" queue-time="0" op-digest="6197322ae276dfeb4a212d09787f9738"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-0" type="remote" class="ocf" provider="pacemaker" container="bundle-a-podman-0">
+ <lrm_rsc_op id="bundle-a-0_last_0" operation_key="bundle-a-0_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="13:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;13:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="19" rc-code="0" op-status="0" interval="0" last-rc-change="1687288332" exec-time="0" queue-time="0" op-digest="2b7683df7d64ff71ec5fd3675fd12017" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ <lrm_rsc_op id="bundle-a-0_monitor_30000" operation_key="bundle-a-0_monitor_30000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="22:162:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;22:162:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="20" rc-code="0" op-status="0" interval="30000" last-rc-change="1687288333" exec-time="0" queue-time="0" op-digest="6f3c7e233bacb8420fef5f9581190d00"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-podman-1" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-a-podman-1_last_0" operation_key="bundle-a-podman-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="3:153:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;3:153:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="117" rc-code="7" op-status="0" interval="0" last-rc-change="1687288204" exec-time="106" queue-time="0" op-digest="90f1390dea2a7bca12099b080987eae7"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-b-podman-0" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-b-podman-0_last_0" operation_key="bundle-b-podman-0_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="45:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;45:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="144" rc-code="0" op-status="0" interval="0" last-rc-change="1687288330" exec-time="1158" queue-time="0" op-digest="94098b91d583b0b8498741508c609a37"/>
+ <lrm_rsc_op id="bundle-b-podman-0_monitor_60000" operation_key="bundle-b-podman-0_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="46:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;46:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="146" rc-code="0" op-status="0" interval="60000" last-rc-change="1687288332" exec-time="182" queue-time="0" op-digest="b7a9f966374c886087fb6f03d6e4c286"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-b-0" type="remote" class="ocf" provider="pacemaker" container="bundle-b-podman-0">
+ <lrm_rsc_op id="bundle-b-0_last_0" operation_key="bundle-b-0_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="47:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;47:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="18" rc-code="0" op-status="0" interval="0" last-rc-change="1687288332" exec-time="0" queue-time="0" op-digest="06b6a770601e99a2d691d4cf853acceb" op-force-restart=" port server " op-restart-digest="5b00a5a6fae3dc7e25288679ad0c92ce"/>
+ <lrm_rsc_op id="bundle-b-0_monitor_30000" operation_key="bundle-b-0_monitor_30000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="57:162:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;57:162:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="21" rc-code="0" op-status="0" interval="30000" last-rc-change="1687288333" exec-time="0" queue-time="0" op-digest="b4ca332a542717c28b117ec80e27b838"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-1" type="remote" class="ocf" provider="pacemaker" container="bundle-a-podman-1">
+ <lrm_rsc_op id="bundle-a-1_last_0" operation_key="bundle-a-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="8:103:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;8:103:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="10" rc-code="7" op-status="0" interval="0" last-rc-change="1687248552" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-podman-2" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-a-podman-2_last_0" operation_key="bundle-a-podman-2_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="17:105:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;17:105:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="107" rc-code="0" op-status="0" interval="0" last-rc-change="1687248553" exec-time="10540" queue-time="0" op-digest="a5415a2b8dfba2a5741d70bab5591c1b"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-b-1" type="remote" class="ocf" provider="pacemaker" container="bundle-b-podman-1">
+ <lrm_rsc_op id="bundle-b-1_last_0" operation_key="bundle-b-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="28:99:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;28:99:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="8" rc-code="7" op-status="0" interval="0" last-rc-change="1687248540" exec-time="0" queue-time="0" op-digest="2b7683df7d64ff71ec5fd3675fd12017" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-b-podman-1" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-b-podman-1_last_0" operation_key="bundle-b-podman-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="3:154:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;3:154:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="128" rc-code="7" op-status="0" interval="0" last-rc-change="1687288263" exec-time="240" queue-time="0" op-digest="e1aa4eab4c9e9334ee91c66cce449b13"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-podman-0" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-a-podman-0_last_0" operation_key="bundle-a-podman-0_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="11:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;11:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="145" rc-code="0" op-status="0" interval="0" last-rc-change="1687288331" exec-time="1143" queue-time="0" op-digest="bc99a652606652014a6cb5f743b5d75d"/>
+ <lrm_rsc_op id="bundle-a-podman-0_monitor_60000" operation_key="bundle-a-podman-0_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="12:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;12:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="147" rc-code="0" op-status="0" interval="60000" last-rc-change="1687288332" exec-time="161" queue-time="0" op-digest="d39778305ffa599cc7f7a94b47d18783"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-b-2" type="remote" class="ocf" provider="pacemaker" container="bundle-b-podman-2">
+ <lrm_rsc_op id="bundle-b-2_last_0" operation_key="bundle-b-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="37:97:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;37:97:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="6" rc-code="7" op-status="0" interval="0" last-rc-change="1687248528" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-2" type="remote" class="ocf" provider="pacemaker" container="bundle-a-podman-2">
+ <lrm_rsc_op id="bundle-a-2_last_0" operation_key="bundle-a-2_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="18:105:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;18:105:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="14" rc-code="0" op-status="0" interval="0" last-rc-change="1687248553" exec-time="0" queue-time="0" op-digest="2b7683df7d64ff71ec5fd3675fd12017"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state id="2" uname="node2" crmd="online" crm-debug-origin="do_state_transition" in_ccm="true" join="member" expected="member">
+ <transient_attributes id="2">
+ <instance_attributes id="status-2">
+ <nvpair id="status-2-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-2-promotion-bundle-a-rsc" name="master-bundle-a-rsc" value="12"/>
+ <nvpair id="status-2-promotion-bundle-b-rsc" name="master-bundle-b-rsc" value="13"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="2">
+ <lrm_resources>
+ <lrm_resource id="bundle-b-podman-2" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-b-podman-2_last_0" operation_key="bundle-b-podman-2_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="53:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;53:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="141" rc-code="0" op-status="0" interval="0" last-rc-change="1687288330" exec-time="752" queue-time="0" op-digest="35320d55914b88e0c9d4d13b574cef8a"/>
+ <lrm_rsc_op id="bundle-b-podman-2_monitor_60000" operation_key="bundle-b-podman-2_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="54:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;54:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="142" rc-code="0" op-status="0" interval="60000" last-rc-change="1687288331" exec-time="120" queue-time="0" op-digest="62d15df41b3f596ca94b0dbe7edda857"/>
+ </lrm_resource>
+ <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="12:69:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;12:69:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="58" rc-code="0" op-status="0" interval="0" last-rc-change="1687226745" exec-time="33" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ <lrm_rsc_op id="Fencing_monitor_120000" operation_key="Fencing_monitor_120000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="4:69:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;4:69:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="59" rc-code="0" op-status="0" interval="120000" last-rc-change="1687226745" exec-time="27" queue-time="0" op-digest="24989640311980988fb77ddd1cc1002b"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-0" type="remote" class="ocf" provider="pacemaker" container="bundle-a-podman-0">
+ <lrm_rsc_op id="bundle-a-0_last_0" operation_key="bundle-a-0_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="11:101:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;11:101:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="9" rc-code="7" op-status="0" interval="0" last-rc-change="1687248551" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-podman-1" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-a-podman-1_last_0" operation_key="bundle-a-podman-1_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="13:105:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;13:105:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="109" rc-code="0" op-status="0" interval="0" last-rc-change="1687248553" exec-time="10646" queue-time="0" op-digest="90f1390dea2a7bca12099b080987eae7"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-b-podman-0" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-b-podman-0_last_0" operation_key="bundle-b-podman-0_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="5:154:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;5:154:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="126" rc-code="7" op-status="0" interval="0" last-rc-change="1687288263" exec-time="124" queue-time="0" op-digest="cad283aeb094013845bac465ab6a198d"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-b-0" type="remote" class="ocf" provider="pacemaker" container="bundle-b-podman-0">
+ <lrm_rsc_op id="bundle-b-0_last_0" operation_key="bundle-b-0_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="13:103:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;13:103:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="12" rc-code="7" op-status="0" interval="0" last-rc-change="1687248552" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-1" type="remote" class="ocf" provider="pacemaker" container="bundle-a-podman-1">
+ <lrm_rsc_op id="bundle-a-1_last_0" operation_key="bundle-a-1_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="14:105:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;14:105:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="14" rc-code="0" op-status="0" interval="0" last-rc-change="1687248553" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-podman-2" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-a-podman-2_last_0" operation_key="bundle-a-podman-2_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="21:154:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;21:154:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="135" rc-code="0" op-status="0" interval="0" last-rc-change="1687288263" exec-time="1059" queue-time="0" op-digest="a5415a2b8dfba2a5741d70bab5591c1b"/>
+ <lrm_rsc_op id="bundle-a-podman-2_monitor_60000" operation_key="bundle-a-podman-2_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="19:155:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;19:155:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="137" rc-code="0" op-status="0" interval="60000" last-rc-change="1687288265" exec-time="284" queue-time="0" op-digest="77326e7c2187a5c4fe386812dfcd4118"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-b-1" type="remote" class="ocf" provider="pacemaker" container="bundle-b-podman-1">
+ <lrm_rsc_op id="bundle-b-1_last_0" operation_key="bundle-b-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="33:99:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;33:99:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="8" rc-code="7" op-status="0" interval="0" last-rc-change="1687248540" exec-time="0" queue-time="0" op-digest="2b7683df7d64ff71ec5fd3675fd12017" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-b-podman-1" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-b-podman-1_last_0" operation_key="bundle-b-podman-1_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="4:156:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;4:156:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="140" rc-code="0" op-status="0" interval="0" last-rc-change="1687288267" exec-time="216" queue-time="0" op-digest="e1aa4eab4c9e9334ee91c66cce449b13"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-podman-0" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-a-podman-0_last_0" operation_key="bundle-a-podman-0_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="4:153:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;4:153:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="115" rc-code="7" op-status="0" interval="0" last-rc-change="1687288204" exec-time="101" queue-time="0" op-digest="bc99a652606652014a6cb5f743b5d75d"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-b-2" type="remote" class="ocf" provider="pacemaker" container="bundle-b-podman-2">
+ <lrm_rsc_op id="bundle-b-2_last_0" operation_key="bundle-b-2_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="55:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;55:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="17" rc-code="0" op-status="0" interval="0" last-rc-change="1687288331" exec-time="0" queue-time="0" op-digest="077939dc4a5dbbd799045f9120d057b4" op-force-restart=" port server " op-restart-digest="5b00a5a6fae3dc7e25288679ad0c92ce"/>
+ <lrm_rsc_op id="bundle-b-2_monitor_30000" operation_key="bundle-b-2_monitor_30000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="67:162:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;67:162:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="18" rc-code="0" op-status="0" interval="30000" last-rc-change="1687288333" exec-time="0" queue-time="0" op-digest="f9f13135e91dabbc6c77a3fd4b23ab80"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-2" type="remote" class="ocf" provider="pacemaker" container="bundle-a-podman-2">
+ <lrm_rsc_op id="bundle-a-2_last_0" operation_key="bundle-a-2_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="20:155:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;20:155:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="15" rc-code="0" op-status="0" interval="0" last-rc-change="1687288265" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ <lrm_rsc_op id="bundle-a-2_monitor_30000" operation_key="bundle-a-2_monitor_30000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="23:156:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;23:156:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="16" rc-code="0" op-status="0" interval="30000" last-rc-change="1687288267" exec-time="0" queue-time="0" op-digest="3929eec440004bca31f813a8e6097506"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state remote_node="true" id="base-bundle-0" uname="base-bundle-0" in_ccm="false" crm-debug-origin="do_state_transition" node_fenced="0">
+ <lrm id="base-bundle-0">
+ <lrm_resources>
+ <lrm_resource id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="base_last_0" operation_key="base_stop_0" operation="stop" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="124:97:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;124:97:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="1035" rc-code="0" op-status="0" interval="0" last-rc-change="1687248529" exec-time="180" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="base_monitor_16000" operation_key="base_monitor_16000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="17:31:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;17:31:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="14" rc-code="0" op-status="0" interval="16000" last-rc-change="1687218076" exec-time="15" queue-time="0" op-digest="e62ad827732488b88ef8722ff6a484b3"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state id="3" uname="node3" crmd="online" crm-debug-origin="do_state_transition" in_ccm="true" join="member" expected="member">
+ <transient_attributes id="3">
+ <instance_attributes id="status-3">
+ <nvpair id="status-3-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-3-promotion-bundle-a-rsc" name="master-bundle-a-rsc" value="13"/>
+ <nvpair id="status-3-promotion-bundle-b-rsc" name="master-bundle-b-rsc" value="11"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="3">
+ <lrm_resources>
+ <lrm_resource id="bundle-b-podman-2" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-b-podman-2_last_0" operation_key="bundle-b-podman-2_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="6:156:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;6:156:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="111" rc-code="0" op-status="0" interval="0" last-rc-change="1687288267" exec-time="243" queue-time="0" op-digest="4e4b17530bea4f6fd27df1cf68bd9f22"/>
+ </lrm_resource>
+ <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="11:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;11:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="8" rc-code="7" op-status="0" interval="0" last-rc-change="1687219634" exec-time="5" queue-time="0" op-digest="6197322ae276dfeb4a212d09787f9738"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-0" type="remote" class="ocf" provider="pacemaker" container="bundle-a-podman-0">
+ <lrm_rsc_op id="bundle-a-0_last_0" operation_key="bundle-a-0_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="10:105:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;10:105:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="14" rc-code="0" op-status="0" interval="0" last-rc-change="1687248552" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-podman-1" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-a-podman-1_last_0" operation_key="bundle-a-podman-1_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="17:154:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;17:154:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="106" rc-code="0" op-status="0" interval="0" last-rc-change="1687288263" exec-time="1002" queue-time="0" op-digest="90f1390dea2a7bca12099b080987eae7"/>
+ <lrm_rsc_op id="bundle-a-podman-1_monitor_60000" operation_key="bundle-a-podman-1_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="14:155:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;14:155:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="108" rc-code="0" op-status="0" interval="60000" last-rc-change="1687288264" exec-time="156" queue-time="0" op-digest="34c506d61f8ef4e0d77193cce6e838ce"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-b-podman-0" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-b-podman-0_last_0" operation_key="bundle-b-podman-0_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="8:154:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;8:154:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="97" rc-code="7" op-status="0" interval="0" last-rc-change="1687288263" exec-time="119" queue-time="0" op-digest="cad283aeb094013845bac465ab6a198d"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-b-0" type="remote" class="ocf" provider="pacemaker" container="bundle-b-podman-0">
+ <lrm_rsc_op id="bundle-b-0_last_0" operation_key="bundle-b-0_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="16:103:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;16:103:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="12" rc-code="7" op-status="0" interval="0" last-rc-change="1687248551" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-podman-2" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-a-podman-2_last_0" operation_key="bundle-a-podman-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="7:153:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;7:153:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="90" rc-code="7" op-status="0" interval="0" last-rc-change="1687288203" exec-time="78" queue-time="0" op-digest="a5415a2b8dfba2a5741d70bab5591c1b"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-b-1" type="remote" class="ocf" provider="pacemaker" container="bundle-b-podman-1">
+ <lrm_rsc_op id="bundle-b-1_last_0" operation_key="bundle-b-1_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="51:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;51:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="17" rc-code="0" op-status="0" interval="0" last-rc-change="1687288331" exec-time="0" queue-time="0" op-digest="aeee9bdab74d90d67c45ff8a2e53b020" op-force-restart=" port server " op-restart-digest="5b00a5a6fae3dc7e25288679ad0c92ce"/>
+ <lrm_rsc_op id="bundle-b-1_monitor_30000" operation_key="bundle-b-1_monitor_30000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="62:162:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;62:162:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="18" rc-code="0" op-status="0" interval="30000" last-rc-change="1687288333" exec-time="0" queue-time="0" op-digest="6a12d3c71ed94e6f961711073ca69f24"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-b-podman-1" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-b-podman-1_last_0" operation_key="bundle-b-podman-1_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="49:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;49:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="112" rc-code="0" op-status="0" interval="0" last-rc-change="1687288330" exec-time="821" queue-time="0" op-digest="9ba708e8e80ce11a8a1ca4908e76e75e"/>
+ <lrm_rsc_op id="bundle-b-podman-1_monitor_60000" operation_key="bundle-b-podman-1_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="50:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;50:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="113" rc-code="0" op-status="0" interval="60000" last-rc-change="1687288331" exec-time="171" queue-time="0" op-digest="12e3bf09305d4a76f940376759128ae1"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-1" type="remote" class="ocf" provider="pacemaker" container="bundle-a-podman-1">
+ <lrm_rsc_op id="bundle-a-1_last_0" operation_key="bundle-a-1_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="15:155:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;15:155:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="15" rc-code="0" op-status="0" interval="0" last-rc-change="1687288264" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ <lrm_rsc_op id="bundle-a-1_monitor_30000" operation_key="bundle-a-1_monitor_30000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="18:156:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;18:156:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="16" rc-code="0" op-status="0" interval="30000" last-rc-change="1687288267" exec-time="0" queue-time="0" op-digest="354b9acaa7ea1113d708dc11a1d6bbfa"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-podman-0" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-a-podman-0_last_0" operation_key="bundle-a-podman-0_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="9:105:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;9:105:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="80" rc-code="0" op-status="0" interval="0" last-rc-change="1687248552" exec-time="10639" queue-time="0" op-digest="bc99a652606652014a6cb5f743b5d75d"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-b-2" type="remote" class="ocf" provider="pacemaker" container="bundle-b-podman-2">
+ <lrm_rsc_op id="bundle-b-2_last_0" operation_key="bundle-b-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="49:97:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;49:97:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="6" rc-code="7" op-status="0" interval="0" last-rc-change="1687248528" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-2" type="remote" class="ocf" provider="pacemaker" container="bundle-a-podman-2">
+ <lrm_rsc_op id="bundle-a-2_last_0" operation_key="bundle-a-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="15:103:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;15:103:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="11" rc-code="7" op-status="0" interval="0" last-rc-change="1687248551" exec-time="0" queue-time="0" op-digest="2b7683df7d64ff71ec5fd3675fd12017" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state remote_node="true" id="base-bundle-1" uname="base-bundle-1" in_ccm="false" crm-debug-origin="do_state_transition" node_fenced="0">
+ <lrm id="base-bundle-1">
+ <lrm_resources>
+ <lrm_resource id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="base_last_0" operation_key="base_stop_0" operation="stop" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="125:97:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;125:97:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="1039" rc-code="0" op-status="0" interval="0" last-rc-change="1687248529" exec-time="177" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="base_monitor_16000" operation_key="base_monitor_16000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="35:56:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;35:56:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="92" rc-code="0" op-status="0" interval="16000" last-rc-change="1687220329" exec-time="43" queue-time="0" op-digest="e62ad827732488b88ef8722ff6a484b3"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state remote_node="true" id="base-bundle-2" uname="base-bundle-2" in_ccm="false" crm-debug-origin="do_state_transition" node_fenced="0">
+ <lrm id="base-bundle-2">
+ <lrm_resources>
+ <lrm_resource id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="base_last_0" operation_key="base_stop_0" operation="stop" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="126:97:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;126:97:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="991" rc-code="0" op-status="0" interval="0" last-rc-change="1687248528" exec-time="201" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="base_monitor_15000" operation_key="base_monitor_15000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="33:44:8:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:8;33:44:8:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="36" rc-code="8" op-status="0" interval="15000" last-rc-change="1687220059" exec-time="13" queue-time="0" op-digest="3ef575c5f050ae086f0f31bc8f085fdc"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state remote_node="true" id="bundle-a-1" uname="bundle-a-1" in_ccm="true" crm-debug-origin="controld_update_resource_history" node_fenced="0">
+ <lrm id="bundle-a-1">
+ <lrm_resources>
+ <lrm_resource id="bundle-a-rsc" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="bundle-a-rsc_last_0" operation_key="bundle-a-rsc_promote_0" operation="promote" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="40:168:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;40:168:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="25" rc-code="0" op-status="0" interval="0" last-rc-change="1687288549" exec-time="216" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="bundle-a-rsc_monitor_15000" operation_key="bundle-a-rsc_monitor_15000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="40:169:8:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:8;40:169:8:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="31" rc-code="8" op-status="0" interval="15000" last-rc-change="1687288549" exec-time="9" queue-time="0" op-digest="3ef575c5f050ae086f0f31bc8f085fdc"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="bundle-a-1">
+ <instance_attributes id="status-bundle-a-1">
+ <nvpair id="status-bundle-a-1-master-bundle-a-rsc" name="master-bundle-a-rsc" value="10"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state remote_node="true" id="bundle-b-2" uname="bundle-b-2" in_ccm="true" crm-debug-origin="controld_update_resource_history" node_fenced="0">
+ <lrm id="bundle-b-2">
+ <lrm_resources>
+ <lrm_resource id="bundle-b-rsc" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="bundle-b-rsc_last_0" operation_key="bundle-b-rsc_promote_0" operation="promote" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="76:168:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;76:168:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="23" rc-code="0" op-status="0" interval="0" last-rc-change="1687288550" exec-time="219" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="bundle-b-rsc_monitor_15000" operation_key="bundle-b-rsc_monitor_15000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="77:169:8:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:8;77:169:8:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="29" rc-code="8" op-status="0" interval="15000" last-rc-change="1687288550" exec-time="10" queue-time="0" op-digest="3ef575c5f050ae086f0f31bc8f085fdc"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="bundle-b-2">
+ <instance_attributes id="status-bundle-b-2">
+ <nvpair id="status-bundle-b-2-master-bundle-b-rsc" name="master-bundle-b-rsc" value="10"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state remote_node="true" id="bundle-b-0" uname="bundle-b-0" in_ccm="true" crm-debug-origin="do_state_transition" node_fenced="0">
+ <lrm id="bundle-b-0">
+ <lrm_resources>
+ <lrm_resource id="bundle-b-rsc" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="bundle-b-rsc_last_0" operation_key="bundle-b-rsc_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="69:163:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;69:163:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="8" rc-code="0" op-status="0" interval="0" last-rc-change="1687288333" exec-time="290" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="bundle-b-rsc_monitor_16000" operation_key="bundle-b-rsc_monitor_16000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="71:164:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;71:164:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="14" rc-code="0" op-status="0" interval="16000" last-rc-change="1687288334" exec-time="12" queue-time="0" op-digest="e62ad827732488b88ef8722ff6a484b3"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="bundle-b-0">
+ <instance_attributes id="status-bundle-b-0">
+ <nvpair id="status-bundle-b-0-master-bundle-b-rsc" name="master-bundle-b-rsc" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state remote_node="true" id="bundle-b-1" uname="bundle-b-1" in_ccm="true" crm-debug-origin="do_state_transition" node_fenced="0">
+ <lrm id="bundle-b-1">
+ <lrm_resources>
+ <lrm_resource id="bundle-b-rsc" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="bundle-b-rsc_last_0" operation_key="bundle-b-rsc_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="72:164:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;72:164:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="8" rc-code="0" op-status="0" interval="0" last-rc-change="1687288333" exec-time="274" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="bundle-b-rsc_monitor_16000" operation_key="bundle-b-rsc_monitor_16000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="74:165:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;74:165:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="14" rc-code="0" op-status="0" interval="16000" last-rc-change="1687288334" exec-time="9" queue-time="0" op-digest="e62ad827732488b88ef8722ff6a484b3"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="bundle-b-1">
+ <instance_attributes id="status-bundle-b-1">
+ <nvpair id="status-bundle-b-1-master-bundle-b-rsc" name="master-bundle-b-rsc" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state remote_node="true" id="bundle-a-0" uname="bundle-a-0" in_ccm="true" crm-debug-origin="do_state_transition" node_fenced="0">
+ <lrm id="bundle-a-0">
+ <lrm_resources>
+ <lrm_resource id="bundle-a-rsc" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="bundle-a-rsc_last_0" operation_key="bundle-a-rsc_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="39:162:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;39:162:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="8" rc-code="0" op-status="0" interval="0" last-rc-change="1687288333" exec-time="250" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="bundle-a-rsc_monitor_16000" operation_key="bundle-a-rsc_monitor_16000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="36:163:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;36:163:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="14" rc-code="0" op-status="0" interval="16000" last-rc-change="1687288333" exec-time="10" queue-time="0" op-digest="e62ad827732488b88ef8722ff6a484b3"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="bundle-a-0">
+ <instance_attributes id="status-bundle-a-0">
+ <nvpair id="status-bundle-a-0-master-bundle-a-rsc" name="master-bundle-a-rsc" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state remote_node="true" id="bundle-a-2" uname="bundle-a-2" in_ccm="true" crm-debug-origin="do_state_transition" node_fenced="0">
+ <lrm id="bundle-a-2">
+ <lrm_resources>
+ <lrm_resource id="bundle-a-rsc" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="bundle-a-rsc_last_0" operation_key="bundle-a-rsc_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="29:157:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;29:157:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="8" rc-code="0" op-status="0" interval="0" last-rc-change="1687288268" exec-time="236" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="bundle-a-rsc_monitor_16000" operation_key="bundle-a-rsc_monitor_16000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="30:158:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;30:158:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="14" rc-code="0" op-status="0" interval="16000" last-rc-change="1687288268" exec-time="10" queue-time="0" op-digest="e62ad827732488b88ef8722ff6a484b3"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="bundle-a-2">
+ <instance_attributes id="status-bundle-a-2">
+ <nvpair id="status-bundle-a-2-master-bundle-a-rsc" name="master-bundle-a-rsc" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ </status>
+</cib>
diff --git a/cts/scheduler/xml/bundle-promoted-colocation-6.xml b/cts/scheduler/xml/bundle-promoted-colocation-6.xml
new file mode 100644
index 0000000..a14e7c4
--- /dev/null
+++ b/cts/scheduler/xml/bundle-promoted-colocation-6.xml
@@ -0,0 +1,367 @@
+<cib crm_feature_set="3.17.4" validate-with="pacemaker-3.9" epoch="56" num_updates="140" admin_epoch="0" cib-last-written="Tue Jun 20 12:12:10 2023" update-origin="node1" update-client="crmd" update-user="hacluster" have-quorum="1" dc-uuid="1">
+ <configuration>
+ <!-- The essential elements of this test are:
+ * The promoted instance of a bundle (bundle-a) is optionally colocated
+ (score=5000) with the promoted instance of another bundle (bundle-b)
+ * bundle-a's promoted instance is running on a different node from
+ bundle-b's promoted instance
+ * There is no stickiness
+ * There are no location constraints
+ * There are three nodes available for both resources
+
+ In this situation, bundle-a should demote its currently promoted
+ instance and promote the instance on the node where bundle-b is
+ promoted.
+ -->
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.1.5-1.0a457786a.git.el9-0a457786a"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="test"/>
+ <nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1687288330"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="node1"/>
+ <node id="2" uname="node2"/>
+ <node id="3" uname="node3"/>
+ </nodes>
+ <resources>
+ <primitive class="stonith" id="Fencing" type="fence_xvm">
+ <instance_attributes id="Fencing-instance_attributes">
+ <nvpair id="Fencing-instance_attributes_pcmk_host_list" name="pcmk_host_list" value="node1 node2 node3"/>
+ </instance_attributes>
+ <operations>
+ <op id="Fencing-monitor-120s" interval="120s" name="monitor" timeout="120s"/>
+ </operations>
+ </primitive>
+ <bundle id="bundle-a">
+ <meta_attributes id="bundle-a-meta_attributes">
+ <nvpair id="bundle-a-meta_attributes-promotable" name="promotable" value="true"/>
+ <nvpair id="bundle-a-meta_attributes-container-attribute-target" name="container-attribute-target" value="host"/>
+ </meta_attributes>
+ <podman image="localhost/pcmktest" replicas="3"/>
+ <network control-port="3121"/>
+ <primitive id="bundle-a-rsc" class="ocf" provider="pacemaker" type="Stateful">
+ <operations>
+ <op id="bundle-a-rsc-monitor-15s" interval="15s" name="monitor" timeout="15s" role="Promoted"/>
+ <op id="bundle-a-rsc-monitor-16s" interval="16s" name="monitor" timeout="16s" role="Unpromoted"/>
+ </operations>
+ </primitive>
+ </bundle>
+ <bundle id="bundle-b">
+ <meta_attributes id="bundle-b-meta_attributes">
+ <nvpair id="bundle-b-meta_attributes-promotable" name="promotable" value="true"/>
+ <nvpair id="bundle-b-meta_attributes-container-attribute-target" name="container-attribute-target" value="host"/>
+ </meta_attributes>
+ <podman image="localhost/pcmktest" replicas="3"/>
+ <network control-port="3122"/>
+ <primitive id="bundle-b-rsc" class="ocf" provider="pacemaker" type="Stateful">
+ <operations>
+ <op id="bundle-b-rsc-monitor-15s" interval="15s" name="monitor" timeout="15s" role="Promoted"/>
+ <op id="bundle-b-rsc-monitor-16s" interval="16s" name="monitor" timeout="16s" role="Unpromoted"/>
+ </operations>
+ </primitive>
+ </bundle>
+ </resources>
+ <constraints>
+ <rsc_colocation id="colocation-bundle-a-bundle-b-5000" rsc="bundle-a" rsc-role="Promoted" score="5000" with-rsc="bundle-b" with-rsc-role="Promoted"/>
+ </constraints>
+ <fencing-topology/>
+ <op_defaults/>
+ <alerts/>
+ <rsc_defaults/>
+ </configuration>
+ <status>
+ <node_state id="1" uname="node1" in_ccm="true" crmd="online" crm-debug-origin="do_state_transition" join="member" expected="member">
+ <transient_attributes id="1">
+ <instance_attributes id="status-1">
+ <nvpair id="status-1-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-1-promotion-bundle-a-rsc" name="master-bundle-a-rsc" value="11"/>
+ <nvpair id="status-1-promotion-bundle-b-rsc" name="master-bundle-b-rsc" value="12"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="1">
+ <lrm_resources>
+ <lrm_resource id="bundle-b-podman-2" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-b-podman-2_last_0" operation_key="bundle-b-podman-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="4:154:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;4:154:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="132" rc-code="7" op-status="0" interval="0" last-rc-change="1687288263" exec-time="237" queue-time="0" op-digest="4e4b17530bea4f6fd27df1cf68bd9f22"/>
+ </lrm_resource>
+ <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="12:62:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;12:62:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="55" rc-code="0" op-status="0" interval="0" last-rc-change="1687222219" exec-time="0" queue-time="0" op-digest="6197322ae276dfeb4a212d09787f9738"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-0" type="remote" class="ocf" provider="pacemaker" container="bundle-a-podman-0">
+ <lrm_rsc_op id="bundle-a-0_last_0" operation_key="bundle-a-0_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="13:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;13:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="19" rc-code="0" op-status="0" interval="0" last-rc-change="1687288332" exec-time="0" queue-time="0" op-digest="2b7683df7d64ff71ec5fd3675fd12017" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ <lrm_rsc_op id="bundle-a-0_monitor_30000" operation_key="bundle-a-0_monitor_30000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="22:162:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;22:162:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="20" rc-code="0" op-status="0" interval="30000" last-rc-change="1687288333" exec-time="0" queue-time="0" op-digest="6f3c7e233bacb8420fef5f9581190d00"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-podman-1" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-a-podman-1_last_0" operation_key="bundle-a-podman-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="3:153:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;3:153:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="117" rc-code="7" op-status="0" interval="0" last-rc-change="1687288204" exec-time="106" queue-time="0" op-digest="90f1390dea2a7bca12099b080987eae7"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-b-podman-0" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-b-podman-0_last_0" operation_key="bundle-b-podman-0_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="45:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;45:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="144" rc-code="0" op-status="0" interval="0" last-rc-change="1687288330" exec-time="1158" queue-time="0" op-digest="94098b91d583b0b8498741508c609a37"/>
+ <lrm_rsc_op id="bundle-b-podman-0_monitor_60000" operation_key="bundle-b-podman-0_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="46:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;46:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="146" rc-code="0" op-status="0" interval="60000" last-rc-change="1687288332" exec-time="182" queue-time="0" op-digest="b7a9f966374c886087fb6f03d6e4c286"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-b-0" type="remote" class="ocf" provider="pacemaker" container="bundle-b-podman-0">
+ <lrm_rsc_op id="bundle-b-0_last_0" operation_key="bundle-b-0_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="47:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;47:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="18" rc-code="0" op-status="0" interval="0" last-rc-change="1687288332" exec-time="0" queue-time="0" op-digest="06b6a770601e99a2d691d4cf853acceb" op-force-restart=" port server " op-restart-digest="5b00a5a6fae3dc7e25288679ad0c92ce"/>
+ <lrm_rsc_op id="bundle-b-0_monitor_30000" operation_key="bundle-b-0_monitor_30000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="57:162:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;57:162:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="21" rc-code="0" op-status="0" interval="30000" last-rc-change="1687288333" exec-time="0" queue-time="0" op-digest="b4ca332a542717c28b117ec80e27b838"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-1" type="remote" class="ocf" provider="pacemaker" container="bundle-a-podman-1">
+ <lrm_rsc_op id="bundle-a-1_last_0" operation_key="bundle-a-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="8:103:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;8:103:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="10" rc-code="7" op-status="0" interval="0" last-rc-change="1687248552" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-podman-2" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-a-podman-2_last_0" operation_key="bundle-a-podman-2_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="17:105:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;17:105:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="107" rc-code="0" op-status="0" interval="0" last-rc-change="1687248553" exec-time="10540" queue-time="0" op-digest="a5415a2b8dfba2a5741d70bab5591c1b"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-b-1" type="remote" class="ocf" provider="pacemaker" container="bundle-b-podman-1">
+ <lrm_rsc_op id="bundle-b-1_last_0" operation_key="bundle-b-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="28:99:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;28:99:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="8" rc-code="7" op-status="0" interval="0" last-rc-change="1687248540" exec-time="0" queue-time="0" op-digest="2b7683df7d64ff71ec5fd3675fd12017" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-b-podman-1" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-b-podman-1_last_0" operation_key="bundle-b-podman-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="3:154:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;3:154:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="128" rc-code="7" op-status="0" interval="0" last-rc-change="1687288263" exec-time="240" queue-time="0" op-digest="e1aa4eab4c9e9334ee91c66cce449b13"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-podman-0" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-a-podman-0_last_0" operation_key="bundle-a-podman-0_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="11:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;11:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="145" rc-code="0" op-status="0" interval="0" last-rc-change="1687288331" exec-time="1143" queue-time="0" op-digest="bc99a652606652014a6cb5f743b5d75d"/>
+ <lrm_rsc_op id="bundle-a-podman-0_monitor_60000" operation_key="bundle-a-podman-0_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="12:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;12:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="147" rc-code="0" op-status="0" interval="60000" last-rc-change="1687288332" exec-time="161" queue-time="0" op-digest="d39778305ffa599cc7f7a94b47d18783"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-b-2" type="remote" class="ocf" provider="pacemaker" container="bundle-b-podman-2">
+ <lrm_rsc_op id="bundle-b-2_last_0" operation_key="bundle-b-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="37:97:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;37:97:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="6" rc-code="7" op-status="0" interval="0" last-rc-change="1687248528" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-2" type="remote" class="ocf" provider="pacemaker" container="bundle-a-podman-2">
+ <lrm_rsc_op id="bundle-a-2_last_0" operation_key="bundle-a-2_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="18:105:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;18:105:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="14" rc-code="0" op-status="0" interval="0" last-rc-change="1687248553" exec-time="0" queue-time="0" op-digest="2b7683df7d64ff71ec5fd3675fd12017"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state id="2" uname="node2" crmd="online" crm-debug-origin="do_state_transition" in_ccm="true" join="member" expected="member">
+ <transient_attributes id="2">
+ <instance_attributes id="status-2">
+ <nvpair id="status-2-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-2-promotion-bundle-a-rsc" name="master-bundle-a-rsc" value="12"/>
+ <nvpair id="status-2-promotion-bundle-b-rsc" name="master-bundle-b-rsc" value="13"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="2">
+ <lrm_resources>
+ <lrm_resource id="bundle-b-podman-2" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-b-podman-2_last_0" operation_key="bundle-b-podman-2_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="53:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;53:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="141" rc-code="0" op-status="0" interval="0" last-rc-change="1687288330" exec-time="752" queue-time="0" op-digest="35320d55914b88e0c9d4d13b574cef8a"/>
+ <lrm_rsc_op id="bundle-b-podman-2_monitor_60000" operation_key="bundle-b-podman-2_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="54:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;54:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="142" rc-code="0" op-status="0" interval="60000" last-rc-change="1687288331" exec-time="120" queue-time="0" op-digest="62d15df41b3f596ca94b0dbe7edda857"/>
+ </lrm_resource>
+ <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="12:69:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;12:69:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="58" rc-code="0" op-status="0" interval="0" last-rc-change="1687226745" exec-time="33" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ <lrm_rsc_op id="Fencing_monitor_120000" operation_key="Fencing_monitor_120000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="4:69:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;4:69:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="59" rc-code="0" op-status="0" interval="120000" last-rc-change="1687226745" exec-time="27" queue-time="0" op-digest="24989640311980988fb77ddd1cc1002b"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-0" type="remote" class="ocf" provider="pacemaker" container="bundle-a-podman-0">
+ <lrm_rsc_op id="bundle-a-0_last_0" operation_key="bundle-a-0_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="11:101:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;11:101:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="9" rc-code="7" op-status="0" interval="0" last-rc-change="1687248551" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-podman-1" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-a-podman-1_last_0" operation_key="bundle-a-podman-1_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="13:105:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;13:105:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="109" rc-code="0" op-status="0" interval="0" last-rc-change="1687248553" exec-time="10646" queue-time="0" op-digest="90f1390dea2a7bca12099b080987eae7"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-b-podman-0" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-b-podman-0_last_0" operation_key="bundle-b-podman-0_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="5:154:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;5:154:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="126" rc-code="7" op-status="0" interval="0" last-rc-change="1687288263" exec-time="124" queue-time="0" op-digest="cad283aeb094013845bac465ab6a198d"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-b-0" type="remote" class="ocf" provider="pacemaker" container="bundle-b-podman-0">
+ <lrm_rsc_op id="bundle-b-0_last_0" operation_key="bundle-b-0_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="13:103:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;13:103:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="12" rc-code="7" op-status="0" interval="0" last-rc-change="1687248552" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-1" type="remote" class="ocf" provider="pacemaker" container="bundle-a-podman-1">
+ <lrm_rsc_op id="bundle-a-1_last_0" operation_key="bundle-a-1_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="14:105:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;14:105:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="14" rc-code="0" op-status="0" interval="0" last-rc-change="1687248553" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-podman-2" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-a-podman-2_last_0" operation_key="bundle-a-podman-2_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="21:154:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;21:154:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="135" rc-code="0" op-status="0" interval="0" last-rc-change="1687288263" exec-time="1059" queue-time="0" op-digest="a5415a2b8dfba2a5741d70bab5591c1b"/>
+ <lrm_rsc_op id="bundle-a-podman-2_monitor_60000" operation_key="bundle-a-podman-2_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="19:155:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;19:155:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="137" rc-code="0" op-status="0" interval="60000" last-rc-change="1687288265" exec-time="284" queue-time="0" op-digest="77326e7c2187a5c4fe386812dfcd4118"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-b-1" type="remote" class="ocf" provider="pacemaker" container="bundle-b-podman-1">
+ <lrm_rsc_op id="bundle-b-1_last_0" operation_key="bundle-b-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="33:99:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;33:99:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="8" rc-code="7" op-status="0" interval="0" last-rc-change="1687248540" exec-time="0" queue-time="0" op-digest="2b7683df7d64ff71ec5fd3675fd12017" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-b-podman-1" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-b-podman-1_last_0" operation_key="bundle-b-podman-1_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="4:156:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;4:156:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="140" rc-code="0" op-status="0" interval="0" last-rc-change="1687288267" exec-time="216" queue-time="0" op-digest="e1aa4eab4c9e9334ee91c66cce449b13"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-podman-0" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-a-podman-0_last_0" operation_key="bundle-a-podman-0_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="4:153:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;4:153:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="115" rc-code="7" op-status="0" interval="0" last-rc-change="1687288204" exec-time="101" queue-time="0" op-digest="bc99a652606652014a6cb5f743b5d75d"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-b-2" type="remote" class="ocf" provider="pacemaker" container="bundle-b-podman-2">
+ <lrm_rsc_op id="bundle-b-2_last_0" operation_key="bundle-b-2_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="55:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;55:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="17" rc-code="0" op-status="0" interval="0" last-rc-change="1687288331" exec-time="0" queue-time="0" op-digest="077939dc4a5dbbd799045f9120d057b4" op-force-restart=" port server " op-restart-digest="5b00a5a6fae3dc7e25288679ad0c92ce"/>
+ <lrm_rsc_op id="bundle-b-2_monitor_30000" operation_key="bundle-b-2_monitor_30000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="67:162:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;67:162:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="18" rc-code="0" op-status="0" interval="30000" last-rc-change="1687288333" exec-time="0" queue-time="0" op-digest="f9f13135e91dabbc6c77a3fd4b23ab80"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-2" type="remote" class="ocf" provider="pacemaker" container="bundle-a-podman-2">
+ <lrm_rsc_op id="bundle-a-2_last_0" operation_key="bundle-a-2_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="20:155:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;20:155:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="15" rc-code="0" op-status="0" interval="0" last-rc-change="1687288265" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ <lrm_rsc_op id="bundle-a-2_monitor_30000" operation_key="bundle-a-2_monitor_30000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="23:156:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;23:156:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="16" rc-code="0" op-status="0" interval="30000" last-rc-change="1687288267" exec-time="0" queue-time="0" op-digest="3929eec440004bca31f813a8e6097506"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state remote_node="true" id="base-bundle-0" uname="base-bundle-0" in_ccm="false" crm-debug-origin="do_state_transition" node_fenced="0">
+ <lrm id="base-bundle-0">
+ <lrm_resources>
+ <lrm_resource id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="base_last_0" operation_key="base_stop_0" operation="stop" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="124:97:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;124:97:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="1035" rc-code="0" op-status="0" interval="0" last-rc-change="1687248529" exec-time="180" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="base_monitor_16000" operation_key="base_monitor_16000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="17:31:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;17:31:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="14" rc-code="0" op-status="0" interval="16000" last-rc-change="1687218076" exec-time="15" queue-time="0" op-digest="e62ad827732488b88ef8722ff6a484b3"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state id="3" uname="node3" crmd="online" crm-debug-origin="do_state_transition" in_ccm="true" join="member" expected="member">
+ <transient_attributes id="3">
+ <instance_attributes id="status-3">
+ <nvpair id="status-3-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-3-promotion-bundle-a-rsc" name="master-bundle-a-rsc" value="13"/>
+ <nvpair id="status-3-promotion-bundle-b-rsc" name="master-bundle-b-rsc" value="11"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="3">
+ <lrm_resources>
+ <lrm_resource id="bundle-b-podman-2" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-b-podman-2_last_0" operation_key="bundle-b-podman-2_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="6:156:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;6:156:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="111" rc-code="0" op-status="0" interval="0" last-rc-change="1687288267" exec-time="243" queue-time="0" op-digest="4e4b17530bea4f6fd27df1cf68bd9f22"/>
+ </lrm_resource>
+ <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="11:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;11:36:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="8" rc-code="7" op-status="0" interval="0" last-rc-change="1687219634" exec-time="5" queue-time="0" op-digest="6197322ae276dfeb4a212d09787f9738"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-0" type="remote" class="ocf" provider="pacemaker" container="bundle-a-podman-0">
+ <lrm_rsc_op id="bundle-a-0_last_0" operation_key="bundle-a-0_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="10:105:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;10:105:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="14" rc-code="0" op-status="0" interval="0" last-rc-change="1687248552" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-podman-1" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-a-podman-1_last_0" operation_key="bundle-a-podman-1_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="17:154:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;17:154:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="106" rc-code="0" op-status="0" interval="0" last-rc-change="1687288263" exec-time="1002" queue-time="0" op-digest="90f1390dea2a7bca12099b080987eae7"/>
+ <lrm_rsc_op id="bundle-a-podman-1_monitor_60000" operation_key="bundle-a-podman-1_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="14:155:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;14:155:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="108" rc-code="0" op-status="0" interval="60000" last-rc-change="1687288264" exec-time="156" queue-time="0" op-digest="34c506d61f8ef4e0d77193cce6e838ce"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-b-podman-0" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-b-podman-0_last_0" operation_key="bundle-b-podman-0_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="8:154:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;8:154:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="97" rc-code="7" op-status="0" interval="0" last-rc-change="1687288263" exec-time="119" queue-time="0" op-digest="cad283aeb094013845bac465ab6a198d"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-b-0" type="remote" class="ocf" provider="pacemaker" container="bundle-b-podman-0">
+ <lrm_rsc_op id="bundle-b-0_last_0" operation_key="bundle-b-0_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="16:103:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;16:103:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="12" rc-code="7" op-status="0" interval="0" last-rc-change="1687248551" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-podman-2" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-a-podman-2_last_0" operation_key="bundle-a-podman-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="7:153:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;7:153:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="90" rc-code="7" op-status="0" interval="0" last-rc-change="1687288203" exec-time="78" queue-time="0" op-digest="a5415a2b8dfba2a5741d70bab5591c1b"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-b-1" type="remote" class="ocf" provider="pacemaker" container="bundle-b-podman-1">
+ <lrm_rsc_op id="bundle-b-1_last_0" operation_key="bundle-b-1_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="51:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;51:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="17" rc-code="0" op-status="0" interval="0" last-rc-change="1687288331" exec-time="0" queue-time="0" op-digest="aeee9bdab74d90d67c45ff8a2e53b020" op-force-restart=" port server " op-restart-digest="5b00a5a6fae3dc7e25288679ad0c92ce"/>
+ <lrm_rsc_op id="bundle-b-1_monitor_30000" operation_key="bundle-b-1_monitor_30000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="62:162:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;62:162:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="18" rc-code="0" op-status="0" interval="30000" last-rc-change="1687288333" exec-time="0" queue-time="0" op-digest="6a12d3c71ed94e6f961711073ca69f24"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-b-podman-1" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-b-podman-1_last_0" operation_key="bundle-b-podman-1_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="49:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;49:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="112" rc-code="0" op-status="0" interval="0" last-rc-change="1687288330" exec-time="821" queue-time="0" op-digest="9ba708e8e80ce11a8a1ca4908e76e75e"/>
+ <lrm_rsc_op id="bundle-b-podman-1_monitor_60000" operation_key="bundle-b-podman-1_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="50:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;50:161:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="113" rc-code="0" op-status="0" interval="60000" last-rc-change="1687288331" exec-time="171" queue-time="0" op-digest="12e3bf09305d4a76f940376759128ae1"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-1" type="remote" class="ocf" provider="pacemaker" container="bundle-a-podman-1">
+ <lrm_rsc_op id="bundle-a-1_last_0" operation_key="bundle-a-1_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="15:155:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;15:155:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="15" rc-code="0" op-status="0" interval="0" last-rc-change="1687288264" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ <lrm_rsc_op id="bundle-a-1_monitor_30000" operation_key="bundle-a-1_monitor_30000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="18:156:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;18:156:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="16" rc-code="0" op-status="0" interval="30000" last-rc-change="1687288267" exec-time="0" queue-time="0" op-digest="354b9acaa7ea1113d708dc11a1d6bbfa"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-podman-0" type="podman" class="ocf" provider="heartbeat">
+ <lrm_rsc_op id="bundle-a-podman-0_last_0" operation_key="bundle-a-podman-0_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="9:105:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;9:105:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="80" rc-code="0" op-status="0" interval="0" last-rc-change="1687248552" exec-time="10639" queue-time="0" op-digest="bc99a652606652014a6cb5f743b5d75d"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-b-2" type="remote" class="ocf" provider="pacemaker" container="bundle-b-podman-2">
+ <lrm_rsc_op id="bundle-b-2_last_0" operation_key="bundle-b-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="49:97:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;49:97:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="6" rc-code="7" op-status="0" interval="0" last-rc-change="1687248528" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="bundle-a-2" type="remote" class="ocf" provider="pacemaker" container="bundle-a-podman-2">
+ <lrm_rsc_op id="bundle-a-2_last_0" operation_key="bundle-a-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="15:103:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:7;15:103:7:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="11" rc-code="7" op-status="0" interval="0" last-rc-change="1687248551" exec-time="0" queue-time="0" op-digest="2b7683df7d64ff71ec5fd3675fd12017" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state remote_node="true" id="base-bundle-1" uname="base-bundle-1" in_ccm="false" crm-debug-origin="do_state_transition" node_fenced="0">
+ <lrm id="base-bundle-1">
+ <lrm_resources>
+ <lrm_resource id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="base_last_0" operation_key="base_stop_0" operation="stop" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="125:97:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;125:97:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="1039" rc-code="0" op-status="0" interval="0" last-rc-change="1687248529" exec-time="177" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="base_monitor_16000" operation_key="base_monitor_16000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="35:56:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;35:56:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="92" rc-code="0" op-status="0" interval="16000" last-rc-change="1687220329" exec-time="43" queue-time="0" op-digest="e62ad827732488b88ef8722ff6a484b3"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state remote_node="true" id="base-bundle-2" uname="base-bundle-2" in_ccm="false" crm-debug-origin="do_state_transition" node_fenced="0">
+ <lrm id="base-bundle-2">
+ <lrm_resources>
+ <lrm_resource id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="base_last_0" operation_key="base_stop_0" operation="stop" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="126:97:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;126:97:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="991" rc-code="0" op-status="0" interval="0" last-rc-change="1687248528" exec-time="201" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="base_monitor_15000" operation_key="base_monitor_15000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="33:44:8:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:8;33:44:8:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="36" rc-code="8" op-status="0" interval="15000" last-rc-change="1687220059" exec-time="13" queue-time="0" op-digest="3ef575c5f050ae086f0f31bc8f085fdc"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state remote_node="true" id="bundle-a-1" uname="bundle-a-1" in_ccm="true" crm-debug-origin="controld_update_resource_history" node_fenced="0">
+ <lrm id="bundle-a-1">
+ <lrm_resources>
+ <lrm_resource id="bundle-a-rsc" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="bundle-a-rsc_last_0" operation_key="bundle-a-rsc_promote_0" operation="promote" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="40:168:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;40:168:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="25" rc-code="0" op-status="0" interval="0" last-rc-change="1687288549" exec-time="216" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="bundle-a-rsc_monitor_15000" operation_key="bundle-a-rsc_monitor_15000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="40:169:8:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:8;40:169:8:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="31" rc-code="8" op-status="0" interval="15000" last-rc-change="1687288549" exec-time="9" queue-time="0" op-digest="3ef575c5f050ae086f0f31bc8f085fdc"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="bundle-a-1">
+ <instance_attributes id="status-bundle-a-1">
+ <nvpair id="status-bundle-a-1-master-bundle-a-rsc" name="master-bundle-a-rsc" value="10"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state remote_node="true" id="bundle-b-2" uname="bundle-b-2" in_ccm="true" crm-debug-origin="controld_update_resource_history" node_fenced="0">
+ <lrm id="bundle-b-2">
+ <lrm_resources>
+ <lrm_resource id="bundle-b-rsc" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="bundle-b-rsc_last_0" operation_key="bundle-b-rsc_promote_0" operation="promote" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="76:168:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;76:168:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="23" rc-code="0" op-status="0" interval="0" last-rc-change="1687288550" exec-time="219" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="bundle-b-rsc_monitor_15000" operation_key="bundle-b-rsc_monitor_15000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="77:169:8:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:8;77:169:8:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="29" rc-code="8" op-status="0" interval="15000" last-rc-change="1687288550" exec-time="10" queue-time="0" op-digest="3ef575c5f050ae086f0f31bc8f085fdc"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="bundle-b-2">
+ <instance_attributes id="status-bundle-b-2">
+ <nvpair id="status-bundle-b-2-master-bundle-b-rsc" name="master-bundle-b-rsc" value="10"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state remote_node="true" id="bundle-b-0" uname="bundle-b-0" in_ccm="true" crm-debug-origin="do_state_transition" node_fenced="0">
+ <lrm id="bundle-b-0">
+ <lrm_resources>
+ <lrm_resource id="bundle-b-rsc" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="bundle-b-rsc_last_0" operation_key="bundle-b-rsc_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="69:163:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;69:163:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="8" rc-code="0" op-status="0" interval="0" last-rc-change="1687288333" exec-time="290" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="bundle-b-rsc_monitor_16000" operation_key="bundle-b-rsc_monitor_16000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="71:164:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;71:164:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="14" rc-code="0" op-status="0" interval="16000" last-rc-change="1687288334" exec-time="12" queue-time="0" op-digest="e62ad827732488b88ef8722ff6a484b3"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="bundle-b-0">
+ <instance_attributes id="status-bundle-b-0">
+ <nvpair id="status-bundle-b-0-master-bundle-b-rsc" name="master-bundle-b-rsc" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state remote_node="true" id="bundle-b-1" uname="bundle-b-1" in_ccm="true" crm-debug-origin="do_state_transition" node_fenced="0">
+ <lrm id="bundle-b-1">
+ <lrm_resources>
+ <lrm_resource id="bundle-b-rsc" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="bundle-b-rsc_last_0" operation_key="bundle-b-rsc_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="72:164:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;72:164:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="8" rc-code="0" op-status="0" interval="0" last-rc-change="1687288333" exec-time="274" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="bundle-b-rsc_monitor_16000" operation_key="bundle-b-rsc_monitor_16000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="74:165:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;74:165:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node3" call-id="14" rc-code="0" op-status="0" interval="16000" last-rc-change="1687288334" exec-time="9" queue-time="0" op-digest="e62ad827732488b88ef8722ff6a484b3"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="bundle-b-1">
+ <instance_attributes id="status-bundle-b-1">
+ <nvpair id="status-bundle-b-1-master-bundle-b-rsc" name="master-bundle-b-rsc" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state remote_node="true" id="bundle-a-0" uname="bundle-a-0" in_ccm="true" crm-debug-origin="do_state_transition" node_fenced="0">
+ <lrm id="bundle-a-0">
+ <lrm_resources>
+ <lrm_resource id="bundle-a-rsc" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="bundle-a-rsc_last_0" operation_key="bundle-a-rsc_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="39:162:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;39:162:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="8" rc-code="0" op-status="0" interval="0" last-rc-change="1687288333" exec-time="250" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="bundle-a-rsc_monitor_16000" operation_key="bundle-a-rsc_monitor_16000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="36:163:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;36:163:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node1" call-id="14" rc-code="0" op-status="0" interval="16000" last-rc-change="1687288333" exec-time="10" queue-time="0" op-digest="e62ad827732488b88ef8722ff6a484b3"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="bundle-a-0">
+ <instance_attributes id="status-bundle-a-0">
+ <nvpair id="status-bundle-a-0-master-bundle-a-rsc" name="master-bundle-a-rsc" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state remote_node="true" id="bundle-a-2" uname="bundle-a-2" in_ccm="true" crm-debug-origin="do_state_transition" node_fenced="0">
+ <lrm id="bundle-a-2">
+ <lrm_resources>
+ <lrm_resource id="bundle-a-rsc" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="bundle-a-rsc_last_0" operation_key="bundle-a-rsc_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="29:157:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;29:157:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="8" rc-code="0" op-status="0" interval="0" last-rc-change="1687288268" exec-time="236" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="bundle-a-rsc_monitor_16000" operation_key="bundle-a-rsc_monitor_16000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="30:158:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" transition-magic="0:0;30:158:0:20fd8985-857e-43d6-9bc5-45c5cd86e96e" exit-reason="" on_node="node2" call-id="14" rc-code="0" op-status="0" interval="16000" last-rc-change="1687288268" exec-time="10" queue-time="0" op-digest="e62ad827732488b88ef8722ff6a484b3"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="bundle-a-2">
+ <instance_attributes id="status-bundle-a-2">
+ <nvpair id="status-bundle-a-2-master-bundle-a-rsc" name="master-bundle-a-rsc" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ </status>
+</cib>
diff --git a/cts/scheduler/xml/bundle-promoted-location-1.xml b/cts/scheduler/xml/bundle-promoted-location-1.xml
new file mode 100644
index 0000000..bba9980
--- /dev/null
+++ b/cts/scheduler/xml/bundle-promoted-location-1.xml
@@ -0,0 +1,221 @@
+<cib crm_feature_set="3.17.4" validate-with="pacemaker-3.9" epoch="135" num_updates="0" admin_epoch="0" cib-last-written="Thu Jun 22 17:00:31 2023" update-origin="node1" update-client="cibadmin" update-user="root" have-quorum="1" dc-uuid="1">
+ <configuration>
+ <!-- The essential elements of this test are:
+ * A promotable bundle (base-bundle) is promoted on node3 and unpromoted
+ on node1 and node2
+ * There is a positive location constraint (score=5000) for the bundle,
+ preferring node2
+ * There are no other constraints
+ * There is no stickiness
+
+ In this situation, the bundle should demote on node3 and promote on
+ node2.
+
+ This test is incorrect. The bundle remains promoted on node3 and
+ unpromoted on node2.
+ -->
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.1.5-1.0a457786a.git.el9-0a457786a"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="test"/>
+ <nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1687288330"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="node1">
+ <instance_attributes id="nodes-1"/>
+ </node>
+ <node id="2" uname="node2">
+ <instance_attributes id="nodes-2"/>
+ </node>
+ <node id="3" uname="node3">
+ <instance_attributes id="nodes-3"/>
+ </node>
+ </nodes>
+ <resources>
+ <primitive class="stonith" id="Fencing" type="fence_xvm">
+ <instance_attributes id="Fencing-instance_attributes">
+ <nvpair id="Fencing-instance_attributes_pcmk_host_list" name="pcmk_host_list" value="node1 node2 node3"/>
+ </instance_attributes>
+ <operations>
+ <op id="Fencing-monitor-120s" interval="120s" name="monitor" timeout="120s"/>
+ </operations>
+ </primitive>
+ <bundle id="base-bundle">
+ <meta_attributes id="base-bundle-meta_attributes">
+ <nvpair id="base-bundle-meta_attributes-container-attribute-target" name="container-attribute-target" value="host"/>
+ <nvpair id="base-bundle-meta_attributes-promotable" name="promotable" value="true"/>
+ </meta_attributes>
+ <podman image="localhost/pcmktest" replicas="3"/>
+ <network control-port="3121"/>
+ <primitive id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <operations>
+ <op id="base-monitor-15s" interval="15s" name="monitor" timeout="15s" role="Promoted"/>
+ <op id="base-monitor-16s" interval="16s" name="monitor" timeout="16s" role="Unpromoted"/>
+ </operations>
+ </primitive>
+ </bundle>
+ </resources>
+ <constraints>
+ <rsc_location id="location-base-bundle-node2-5000" rsc="base-bundle" node="node2" score="5000"/>
+ </constraints>
+ <op_defaults/>
+ <alerts/>
+ <rsc_defaults/>
+ </configuration>
+ <status>
+ <node_state id="1" uname="node1" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <transient_attributes id="1">
+ <instance_attributes id="status-1">
+ <nvpair id="status-1-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-1-master-base" name="master-base" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="1">
+ <lrm_resources>
+ <lrm_resource id="Fencing" class="stonith" type="fence_xvm">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="1:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;1:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="8" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="2" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-0" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-0_last_0" operation_key="base-bundle-podman-0_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="2:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;2:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="12" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="105" queue-time="0" op-digest="b93578750303776f6bb098395c53338a"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-1" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-1_last_0" operation_key="base-bundle-podman-1_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="4:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;4:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="13" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="114" queue-time="0" op-digest="91ec5f49c5fd88f29405389545fb7d59"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-2" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-2_last_0" operation_key="base-bundle-podman-2_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="21:171:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;21:171:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="20" rc-code="0" op-status="0" interval="0" last-rc-change="1687478431" exec-time="1223" queue-time="0" op-digest="eb1c148b6d2d7a7013f75d596536409b"/>
+ <lrm_rsc_op id="base-bundle-podman-2_monitor_60000" operation_key="base-bundle-podman-2_monitor_60000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="22:171:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;22:171:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="21" rc-code="0" op-status="0" interval="60000" last-rc-change="1687478432" exec-time="176" queue-time="0" op-digest="8eeca5a30b14f3d9ef7d2ddbd16c2e05"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-1" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-1">
+ <lrm_rsc_op id="base-bundle-1_last_0" operation_key="base-bundle-1_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="5:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;5:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-0" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-0">
+ <lrm_rsc_op id="base-bundle-0_last_0" operation_key="base-bundle-0_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="3:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;3:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="2" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-2" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-2">
+ <lrm_rsc_op id="base-bundle-2_last_0" operation_key="base-bundle-2_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="23:171:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;23:171:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="4" rc-code="0" op-status="0" interval="0" last-rc-change="1687478432" exec-time="0" queue-time="0" op-digest="2b7683df7d64ff71ec5fd3675fd12017" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ <lrm_rsc_op id="base-bundle-2_monitor_30000" operation_key="base-bundle-2_monitor_30000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="24:172:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;24:172:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="5" rc-code="0" op-status="0" interval="30000" last-rc-change="1687478435" exec-time="0" queue-time="0" op-digest="6f3c7e233bacb8420fef5f9581190d00"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state id="3" uname="node3" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <lrm id="3">
+ <lrm_resources>
+ <lrm_resource id="Fencing" class="stonith" type="fence_xvm">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="15:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;15:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="8" rc-code="7" op-status="0" interval="0" last-rc-change="1687330528" exec-time="3" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-0" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-0_last_0" operation_key="base-bundle-podman-0_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="24:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;24:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="18" rc-code="0" op-status="0" interval="0" last-rc-change="1687330528" exec-time="663" queue-time="0" op-digest="b93578750303776f6bb098395c53338a"/>
+ <lrm_rsc_op id="base-bundle-podman-0_monitor_60000" operation_key="base-bundle-podman-0_monitor_60000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="25:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;25:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="20" rc-code="0" op-status="0" interval="60000" last-rc-change="1687330529" exec-time="118" queue-time="0" op-digest="902512fcf3e4556d9585c44184665d8c"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-1" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-1_last_0" operation_key="base-bundle-podman-1_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="18:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;18:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="13" rc-code="7" op-status="0" interval="0" last-rc-change="1687330528" exec-time="90" queue-time="0" op-digest="91ec5f49c5fd88f29405389545fb7d59"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-2" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-2_last_0" operation_key="base-bundle-podman-2_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="20:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;20:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="17" rc-code="7" op-status="0" interval="0" last-rc-change="1687330528" exec-time="71" queue-time="0" op-digest="eb1c148b6d2d7a7013f75d596536409b"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-1" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-1">
+ <lrm_rsc_op id="base-bundle-1_last_0" operation_key="base-bundle-1_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="19:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;19:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-0" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-0">
+ <lrm_rsc_op id="base-bundle-0_last_0" operation_key="base-bundle-0_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="26:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;26:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="3" rc-code="0" op-status="0" interval="0" last-rc-change="1687330529" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ <lrm_rsc_op id="base-bundle-0_monitor_30000" operation_key="base-bundle-0_monitor_30000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="15:1:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;15:1:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="4" rc-code="0" op-status="0" interval="30000" last-rc-change="1687330531" exec-time="0" queue-time="0" op-digest="354b9acaa7ea1113d708dc11a1d6bbfa"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-2" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-2">
+ <lrm_rsc_op id="base-bundle-2_last_0" operation_key="base-bundle-2_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="10:171:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;10:171:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="5" rc-code="7" op-status="0" interval="0" last-rc-change="1687478432" exec-time="0" queue-time="0" op-digest="2b7683df7d64ff71ec5fd3675fd12017" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="3">
+ <instance_attributes id="status-3">
+ <nvpair id="status-3-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-3-master-base" name="master-base" value="10"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state id="2" uname="node2" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <transient_attributes id="2">
+ <instance_attributes id="status-2">
+ <nvpair id="status-2-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-2-master-base" name="master-base" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="2">
+ <lrm_resources>
+ <lrm_resource id="Fencing" class="stonith" type="fence_xvm">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="22:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;22:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="14" rc-code="0" op-status="0" interval="0" last-rc-change="1687330529" exec-time="33" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ <lrm_rsc_op id="Fencing_monitor_120000" operation_key="Fencing_monitor_120000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="23:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;23:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="19" rc-code="0" op-status="0" interval="120000" last-rc-change="1687330529" exec-time="26" queue-time="0" op-digest="24989640311980988fb77ddd1cc1002b"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-0" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-0_last_0" operation_key="base-bundle-podman-0_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="9:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;9:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="12" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="80" queue-time="0" op-digest="b93578750303776f6bb098395c53338a"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-1" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-1_last_0" operation_key="base-bundle-podman-1_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="28:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;28:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="20" rc-code="0" op-status="0" interval="0" last-rc-change="1687330529" exec-time="612" queue-time="0" op-digest="91ec5f49c5fd88f29405389545fb7d59"/>
+ <lrm_rsc_op id="base-bundle-podman-1_monitor_60000" operation_key="base-bundle-podman-1_monitor_60000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="29:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;29:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="21" rc-code="0" op-status="0" interval="60000" last-rc-change="1687330529" exec-time="210" queue-time="0" op-digest="f0ef4729d120aa3f5d938cabca4d06c7"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-2" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-2_last_0" operation_key="base-bundle-podman-2_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="13:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;13:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="18" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="78" queue-time="0" op-digest="eb1c148b6d2d7a7013f75d596536409b"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-1" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-1">
+ <lrm_rsc_op id="base-bundle-1_last_0" operation_key="base-bundle-1_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="30:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;30:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="2" rc-code="0" op-status="0" interval="0" last-rc-change="1687330529" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ <lrm_rsc_op id="base-bundle-1_monitor_30000" operation_key="base-bundle-1_monitor_30000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="20:1:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;20:1:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="4" rc-code="0" op-status="0" interval="30000" last-rc-change="1687330532" exec-time="0" queue-time="0" op-digest="3929eec440004bca31f813a8e6097506"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-0" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-0">
+ <lrm_rsc_op id="base-bundle-0_last_0" operation_key="base-bundle-0_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="10:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;10:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="3" rc-code="7" op-status="0" interval="0" last-rc-change="1687330530" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-2" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-2">
+ <lrm_rsc_op id="base-bundle-2_last_0" operation_key="base-bundle-2_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="9:171:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;9:171:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="5" rc-code="7" op-status="0" interval="0" last-rc-change="1687478432" exec-time="0" queue-time="0" op-digest="2b7683df7d64ff71ec5fd3675fd12017" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state remote_node="true" id="base-bundle-0" uname="base-bundle-0" in_ccm="true" crm-debug-origin="controld_update_resource_history" node_fenced="0">
+ <lrm id="base-bundle-0">
+ <lrm_resources>
+ <lrm_resource id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="base_last_0" operation_key="base_promote_0" operation="promote" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="31:176:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;31:176:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="4956" rc-code="0" op-status="0" interval="0" last-rc-change="1687478477" exec-time="210" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="base_monitor_15000" operation_key="base_monitor_15000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="32:177:8:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:8;32:177:8:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="4962" rc-code="8" op-status="0" interval="15000" last-rc-change="1687478477" exec-time="21" queue-time="0" op-digest="3ef575c5f050ae086f0f31bc8f085fdc"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="base-bundle-0">
+ <instance_attributes id="status-base-bundle-0">
+ <nvpair id="status-base-bundle-0-master-base" name="master-base" value="10"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state remote_node="true" id="base-bundle-1" uname="base-bundle-1" in_ccm="true" crm-debug-origin="controld_update_resource_history" node_fenced="0">
+ <lrm id="base-bundle-1">
+ <lrm_resources>
+ <lrm_resource id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="base_last_0" operation_key="base_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="27:2:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;27:2:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="8" rc-code="0" op-status="0" interval="0" last-rc-change="1687330532" exec-time="246" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="base_monitor_16000" operation_key="base_monitor_16000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="30:167:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;30:167:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="4922" rc-code="0" op-status="0" interval="16000" last-rc-change="1687477999" exec-time="37" queue-time="0" op-digest="e62ad827732488b88ef8722ff6a484b3"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="base-bundle-1">
+ <instance_attributes id="status-base-bundle-1">
+ <nvpair id="status-base-bundle-1-master-base" name="master-base" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state remote_node="true" id="base-bundle-2" uname="base-bundle-2" in_ccm="true" crm-debug-origin="controld_update_resource_history" node_fenced="0">
+ <lrm id="base-bundle-2">
+ <lrm_resources>
+ <lrm_resource id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="base_last_0" operation_key="base_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="35:172:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;35:172:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="8" rc-code="0" op-status="0" interval="0" last-rc-change="1687478435" exec-time="259" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="base_monitor_16000" operation_key="base_monitor_16000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="36:173:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;36:173:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="14" rc-code="0" op-status="0" interval="16000" last-rc-change="1687478435" exec-time="12" queue-time="0" op-digest="e62ad827732488b88ef8722ff6a484b3"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="base-bundle-2">
+ <instance_attributes id="status-base-bundle-2">
+ <nvpair id="status-base-bundle-2-master-base" name="master-base" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ </status>
+</cib>
diff --git a/cts/scheduler/xml/bundle-promoted-location-2.xml b/cts/scheduler/xml/bundle-promoted-location-2.xml
new file mode 100644
index 0000000..352ea70
--- /dev/null
+++ b/cts/scheduler/xml/bundle-promoted-location-2.xml
@@ -0,0 +1,218 @@
+<cib crm_feature_set="3.17.4" validate-with="pacemaker-3.9" epoch="136" num_updates="0" admin_epoch="0" cib-last-written="Thu Jun 22 17:00:31 2023" update-origin="node1" update-client="cibadmin" update-user="root" have-quorum="1" dc-uuid="1">
+ <configuration>
+ <!-- The essential elements of this test are:
+ * A promotable bundle (base-bundle) is promoted on node3 and unpromoted
+ on node1 and node2
+ * There is a negative location constraint (score=-INFINITY) for the
+ bundle, avoiding node3
+ * There are no other constraints
+ * There is no stickiness
+
+ In this situation, the bundle should stop on node3 and promote on some
+ other node.
+ -->
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.1.5-1.0a457786a.git.el9-0a457786a"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="test"/>
+ <nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1687288330"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="node1">
+ <instance_attributes id="nodes-1"/>
+ </node>
+ <node id="2" uname="node2">
+ <instance_attributes id="nodes-2"/>
+ </node>
+ <node id="3" uname="node3">
+ <instance_attributes id="nodes-3"/>
+ </node>
+ </nodes>
+ <resources>
+ <primitive class="stonith" id="Fencing" type="fence_xvm">
+ <instance_attributes id="Fencing-instance_attributes">
+ <nvpair id="Fencing-instance_attributes_pcmk_host_list" name="pcmk_host_list" value="node1 node2 node3"/>
+ </instance_attributes>
+ <operations>
+ <op id="Fencing-monitor-120s" interval="120s" name="monitor" timeout="120s"/>
+ </operations>
+ </primitive>
+ <bundle id="base-bundle">
+ <meta_attributes id="base-bundle-meta_attributes">
+ <nvpair id="base-bundle-meta_attributes-container-attribute-target" name="container-attribute-target" value="host"/>
+ <nvpair id="base-bundle-meta_attributes-promotable" name="promotable" value="true"/>
+ </meta_attributes>
+ <podman image="localhost/pcmktest" replicas="3"/>
+ <network control-port="3121"/>
+ <primitive id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <operations>
+ <op id="base-monitor-15s" interval="15s" name="monitor" timeout="15s" role="Promoted"/>
+ <op id="base-monitor-16s" interval="16s" name="monitor" timeout="16s" role="Unpromoted"/>
+ </operations>
+ </primitive>
+ </bundle>
+ </resources>
+ <constraints>
+ <rsc_location id="location-base-bundle-node3--INFINITY" rsc="base-bundle" node="node3" score="-INFINITY"/>
+ </constraints>
+ <op_defaults/>
+ <alerts/>
+ <rsc_defaults/>
+ </configuration>
+ <status>
+ <node_state id="1" uname="node1" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <transient_attributes id="1">
+ <instance_attributes id="status-1">
+ <nvpair id="status-1-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-1-master-base" name="master-base" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="1">
+ <lrm_resources>
+ <lrm_resource id="Fencing" class="stonith" type="fence_xvm">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="1:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;1:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="8" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="2" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-0" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-0_last_0" operation_key="base-bundle-podman-0_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="2:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;2:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="12" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="105" queue-time="0" op-digest="b93578750303776f6bb098395c53338a"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-1" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-1_last_0" operation_key="base-bundle-podman-1_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="4:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;4:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="13" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="114" queue-time="0" op-digest="91ec5f49c5fd88f29405389545fb7d59"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-2" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-2_last_0" operation_key="base-bundle-podman-2_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="21:171:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;21:171:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="20" rc-code="0" op-status="0" interval="0" last-rc-change="1687478431" exec-time="1223" queue-time="0" op-digest="eb1c148b6d2d7a7013f75d596536409b"/>
+ <lrm_rsc_op id="base-bundle-podman-2_monitor_60000" operation_key="base-bundle-podman-2_monitor_60000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="22:171:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;22:171:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="21" rc-code="0" op-status="0" interval="60000" last-rc-change="1687478432" exec-time="176" queue-time="0" op-digest="8eeca5a30b14f3d9ef7d2ddbd16c2e05"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-1" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-1">
+ <lrm_rsc_op id="base-bundle-1_last_0" operation_key="base-bundle-1_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="5:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;5:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-0" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-0">
+ <lrm_rsc_op id="base-bundle-0_last_0" operation_key="base-bundle-0_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="3:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;3:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="2" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-2" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-2">
+ <lrm_rsc_op id="base-bundle-2_last_0" operation_key="base-bundle-2_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="23:171:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;23:171:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="4" rc-code="0" op-status="0" interval="0" last-rc-change="1687478432" exec-time="0" queue-time="0" op-digest="2b7683df7d64ff71ec5fd3675fd12017" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ <lrm_rsc_op id="base-bundle-2_monitor_30000" operation_key="base-bundle-2_monitor_30000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="24:172:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;24:172:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="5" rc-code="0" op-status="0" interval="30000" last-rc-change="1687478435" exec-time="0" queue-time="0" op-digest="6f3c7e233bacb8420fef5f9581190d00"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state id="3" uname="node3" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <lrm id="3">
+ <lrm_resources>
+ <lrm_resource id="Fencing" class="stonith" type="fence_xvm">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="15:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;15:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="8" rc-code="7" op-status="0" interval="0" last-rc-change="1687330528" exec-time="3" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-0" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-0_last_0" operation_key="base-bundle-podman-0_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="24:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;24:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="18" rc-code="0" op-status="0" interval="0" last-rc-change="1687330528" exec-time="663" queue-time="0" op-digest="b93578750303776f6bb098395c53338a"/>
+ <lrm_rsc_op id="base-bundle-podman-0_monitor_60000" operation_key="base-bundle-podman-0_monitor_60000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="25:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;25:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="20" rc-code="0" op-status="0" interval="60000" last-rc-change="1687330529" exec-time="118" queue-time="0" op-digest="902512fcf3e4556d9585c44184665d8c"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-1" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-1_last_0" operation_key="base-bundle-podman-1_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="18:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;18:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="13" rc-code="7" op-status="0" interval="0" last-rc-change="1687330528" exec-time="90" queue-time="0" op-digest="91ec5f49c5fd88f29405389545fb7d59"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-2" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-2_last_0" operation_key="base-bundle-podman-2_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="20:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;20:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="17" rc-code="7" op-status="0" interval="0" last-rc-change="1687330528" exec-time="71" queue-time="0" op-digest="eb1c148b6d2d7a7013f75d596536409b"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-1" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-1">
+ <lrm_rsc_op id="base-bundle-1_last_0" operation_key="base-bundle-1_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="19:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;19:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-0" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-0">
+ <lrm_rsc_op id="base-bundle-0_last_0" operation_key="base-bundle-0_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="26:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;26:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="3" rc-code="0" op-status="0" interval="0" last-rc-change="1687330529" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ <lrm_rsc_op id="base-bundle-0_monitor_30000" operation_key="base-bundle-0_monitor_30000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="15:1:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;15:1:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="4" rc-code="0" op-status="0" interval="30000" last-rc-change="1687330531" exec-time="0" queue-time="0" op-digest="354b9acaa7ea1113d708dc11a1d6bbfa"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-2" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-2">
+ <lrm_rsc_op id="base-bundle-2_last_0" operation_key="base-bundle-2_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="10:171:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;10:171:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="5" rc-code="7" op-status="0" interval="0" last-rc-change="1687478432" exec-time="0" queue-time="0" op-digest="2b7683df7d64ff71ec5fd3675fd12017" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="3">
+ <instance_attributes id="status-3">
+ <nvpair id="status-3-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-3-master-base" name="master-base" value="10"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state id="2" uname="node2" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <transient_attributes id="2">
+ <instance_attributes id="status-2">
+ <nvpair id="status-2-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-2-master-base" name="master-base" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="2">
+ <lrm_resources>
+ <lrm_resource id="Fencing" class="stonith" type="fence_xvm">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="22:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;22:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="14" rc-code="0" op-status="0" interval="0" last-rc-change="1687330529" exec-time="33" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ <lrm_rsc_op id="Fencing_monitor_120000" operation_key="Fencing_monitor_120000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="23:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;23:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="19" rc-code="0" op-status="0" interval="120000" last-rc-change="1687330529" exec-time="26" queue-time="0" op-digest="24989640311980988fb77ddd1cc1002b"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-0" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-0_last_0" operation_key="base-bundle-podman-0_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="9:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;9:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="12" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="80" queue-time="0" op-digest="b93578750303776f6bb098395c53338a"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-1" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-1_last_0" operation_key="base-bundle-podman-1_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="28:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;28:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="20" rc-code="0" op-status="0" interval="0" last-rc-change="1687330529" exec-time="612" queue-time="0" op-digest="91ec5f49c5fd88f29405389545fb7d59"/>
+ <lrm_rsc_op id="base-bundle-podman-1_monitor_60000" operation_key="base-bundle-podman-1_monitor_60000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="29:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;29:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="21" rc-code="0" op-status="0" interval="60000" last-rc-change="1687330529" exec-time="210" queue-time="0" op-digest="f0ef4729d120aa3f5d938cabca4d06c7"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-2" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-2_last_0" operation_key="base-bundle-podman-2_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="13:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;13:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="18" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="78" queue-time="0" op-digest="eb1c148b6d2d7a7013f75d596536409b"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-1" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-1">
+ <lrm_rsc_op id="base-bundle-1_last_0" operation_key="base-bundle-1_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="30:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;30:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="2" rc-code="0" op-status="0" interval="0" last-rc-change="1687330529" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ <lrm_rsc_op id="base-bundle-1_monitor_30000" operation_key="base-bundle-1_monitor_30000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="20:1:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;20:1:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="4" rc-code="0" op-status="0" interval="30000" last-rc-change="1687330532" exec-time="0" queue-time="0" op-digest="3929eec440004bca31f813a8e6097506"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-0" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-0">
+ <lrm_rsc_op id="base-bundle-0_last_0" operation_key="base-bundle-0_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="10:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;10:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="3" rc-code="7" op-status="0" interval="0" last-rc-change="1687330530" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-2" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-2">
+ <lrm_rsc_op id="base-bundle-2_last_0" operation_key="base-bundle-2_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="9:171:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;9:171:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="5" rc-code="7" op-status="0" interval="0" last-rc-change="1687478432" exec-time="0" queue-time="0" op-digest="2b7683df7d64ff71ec5fd3675fd12017" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state remote_node="true" id="base-bundle-0" uname="base-bundle-0" in_ccm="true" crm-debug-origin="controld_update_resource_history" node_fenced="0">
+ <lrm id="base-bundle-0">
+ <lrm_resources>
+ <lrm_resource id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="base_last_0" operation_key="base_promote_0" operation="promote" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="31:176:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;31:176:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="4956" rc-code="0" op-status="0" interval="0" last-rc-change="1687478477" exec-time="210" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="base_monitor_15000" operation_key="base_monitor_15000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="32:177:8:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:8;32:177:8:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="4962" rc-code="8" op-status="0" interval="15000" last-rc-change="1687478477" exec-time="21" queue-time="0" op-digest="3ef575c5f050ae086f0f31bc8f085fdc"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="base-bundle-0">
+ <instance_attributes id="status-base-bundle-0">
+ <nvpair id="status-base-bundle-0-master-base" name="master-base" value="10"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state remote_node="true" id="base-bundle-1" uname="base-bundle-1" in_ccm="true" crm-debug-origin="controld_update_resource_history" node_fenced="0">
+ <lrm id="base-bundle-1">
+ <lrm_resources>
+ <lrm_resource id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="base_last_0" operation_key="base_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="27:2:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;27:2:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="8" rc-code="0" op-status="0" interval="0" last-rc-change="1687330532" exec-time="246" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="base_monitor_16000" operation_key="base_monitor_16000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="30:167:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;30:167:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="4922" rc-code="0" op-status="0" interval="16000" last-rc-change="1687477999" exec-time="37" queue-time="0" op-digest="e62ad827732488b88ef8722ff6a484b3"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="base-bundle-1">
+ <instance_attributes id="status-base-bundle-1">
+ <nvpair id="status-base-bundle-1-master-base" name="master-base" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state remote_node="true" id="base-bundle-2" uname="base-bundle-2" in_ccm="true" crm-debug-origin="controld_update_resource_history" node_fenced="0">
+ <lrm id="base-bundle-2">
+ <lrm_resources>
+ <lrm_resource id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="base_last_0" operation_key="base_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="35:172:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;35:172:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="8" rc-code="0" op-status="0" interval="0" last-rc-change="1687478435" exec-time="259" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="base_monitor_16000" operation_key="base_monitor_16000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="36:173:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;36:173:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="14" rc-code="0" op-status="0" interval="16000" last-rc-change="1687478435" exec-time="12" queue-time="0" op-digest="e62ad827732488b88ef8722ff6a484b3"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="base-bundle-2">
+ <instance_attributes id="status-base-bundle-2">
+ <nvpair id="status-base-bundle-2-master-base" name="master-base" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ </status>
+</cib>
diff --git a/cts/scheduler/xml/bundle-promoted-location-3.xml b/cts/scheduler/xml/bundle-promoted-location-3.xml
new file mode 100644
index 0000000..0954fd5
--- /dev/null
+++ b/cts/scheduler/xml/bundle-promoted-location-3.xml
@@ -0,0 +1,225 @@
+<cib crm_feature_set="3.17.4" validate-with="pacemaker-3.9" epoch="134" num_updates="40" admin_epoch="0" cib-last-written="Thu Jun 22 17:00:31 2023" update-origin="node1" update-client="cibadmin" update-user="root" have-quorum="1" dc-uuid="1">
+ <configuration>
+ <!-- The essential elements of this test are:
+ * A promotable bundle (base-bundle) is promoted on node3 and unpromoted
+ on node1 and node2
+ * There is a positive location constraint (score=5000) for the bundle's
+ promoted role, preferring node2
+ * There are no other constraints
+ * There is no stickiness
+
+ In this situation, the bundle should demote on node3 and promote on
+ node2.
+
+ This test is incorrect. The bundle remains promoted on node3 and
+ unpromoted on node2.
+ -->
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.1.5-1.0a457786a.git.el9-0a457786a"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="test"/>
+ <nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1687288330"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="node1">
+ <instance_attributes id="nodes-1"/>
+ </node>
+ <node id="2" uname="node2">
+ <instance_attributes id="nodes-2"/>
+ </node>
+ <node id="3" uname="node3">
+ <instance_attributes id="nodes-3"/>
+ </node>
+ </nodes>
+ <resources>
+ <primitive class="stonith" id="Fencing" type="fence_xvm">
+ <instance_attributes id="Fencing-instance_attributes">
+ <nvpair id="Fencing-instance_attributes_pcmk_host_list" name="pcmk_host_list" value="node1 node2 node3"/>
+ </instance_attributes>
+ <operations>
+ <op id="Fencing-monitor-120s" interval="120s" name="monitor" timeout="120s"/>
+ </operations>
+ </primitive>
+ <bundle id="base-bundle">
+ <meta_attributes id="base-bundle-meta_attributes">
+ <nvpair id="base-bundle-meta_attributes-container-attribute-target" name="container-attribute-target" value="host"/>
+ <nvpair id="base-bundle-meta_attributes-promotable" name="promotable" value="true"/>
+ </meta_attributes>
+ <podman image="localhost/pcmktest" replicas="3"/>
+ <network control-port="3121"/>
+ <primitive id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <operations>
+ <op id="base-monitor-15s" interval="15s" name="monitor" timeout="15s" role="Promoted"/>
+ <op id="base-monitor-16s" interval="16s" name="monitor" timeout="16s" role="Unpromoted"/>
+ </operations>
+ </primitive>
+ </bundle>
+ </resources>
+ <constraints>
+ <rsc_location id="location-base-bundle" rsc="base-bundle">
+ <rule id="location-base-bundle-rule" role="Promoted" score="5000">
+ <expression id="location-base-bundle-rule-expr" operation="eq" attribute="#uname" value="node2"/>
+ </rule>
+ </rsc_location>
+ </constraints>
+ <op_defaults/>
+ <alerts/>
+ <rsc_defaults/>
+ </configuration>
+ <status>
+ <node_state id="1" uname="node1" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <transient_attributes id="1">
+ <instance_attributes id="status-1">
+ <nvpair id="status-1-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-1-master-base" name="master-base" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="1">
+ <lrm_resources>
+ <lrm_resource id="Fencing" class="stonith" type="fence_xvm">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="1:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;1:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="8" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="2" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-0" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-0_last_0" operation_key="base-bundle-podman-0_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="2:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;2:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="12" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="105" queue-time="0" op-digest="b93578750303776f6bb098395c53338a"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-1" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-1_last_0" operation_key="base-bundle-podman-1_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="4:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;4:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="13" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="114" queue-time="0" op-digest="91ec5f49c5fd88f29405389545fb7d59"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-2" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-2_last_0" operation_key="base-bundle-podman-2_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="21:171:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;21:171:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="20" rc-code="0" op-status="0" interval="0" last-rc-change="1687478431" exec-time="1223" queue-time="0" op-digest="eb1c148b6d2d7a7013f75d596536409b"/>
+ <lrm_rsc_op id="base-bundle-podman-2_monitor_60000" operation_key="base-bundle-podman-2_monitor_60000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="22:171:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;22:171:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="21" rc-code="0" op-status="0" interval="60000" last-rc-change="1687478432" exec-time="176" queue-time="0" op-digest="8eeca5a30b14f3d9ef7d2ddbd16c2e05"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-1" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-1">
+ <lrm_rsc_op id="base-bundle-1_last_0" operation_key="base-bundle-1_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="5:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;5:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-0" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-0">
+ <lrm_rsc_op id="base-bundle-0_last_0" operation_key="base-bundle-0_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="3:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;3:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="2" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-2" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-2">
+ <lrm_rsc_op id="base-bundle-2_last_0" operation_key="base-bundle-2_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="23:171:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;23:171:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="4" rc-code="0" op-status="0" interval="0" last-rc-change="1687478432" exec-time="0" queue-time="0" op-digest="2b7683df7d64ff71ec5fd3675fd12017" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ <lrm_rsc_op id="base-bundle-2_monitor_30000" operation_key="base-bundle-2_monitor_30000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="24:172:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;24:172:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="5" rc-code="0" op-status="0" interval="30000" last-rc-change="1687478435" exec-time="0" queue-time="0" op-digest="6f3c7e233bacb8420fef5f9581190d00"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state id="3" uname="node3" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <lrm id="3">
+ <lrm_resources>
+ <lrm_resource id="Fencing" class="stonith" type="fence_xvm">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="15:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;15:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="8" rc-code="7" op-status="0" interval="0" last-rc-change="1687330528" exec-time="3" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-0" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-0_last_0" operation_key="base-bundle-podman-0_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="24:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;24:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="18" rc-code="0" op-status="0" interval="0" last-rc-change="1687330528" exec-time="663" queue-time="0" op-digest="b93578750303776f6bb098395c53338a"/>
+ <lrm_rsc_op id="base-bundle-podman-0_monitor_60000" operation_key="base-bundle-podman-0_monitor_60000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="25:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;25:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="20" rc-code="0" op-status="0" interval="60000" last-rc-change="1687330529" exec-time="118" queue-time="0" op-digest="902512fcf3e4556d9585c44184665d8c"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-1" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-1_last_0" operation_key="base-bundle-podman-1_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="18:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;18:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="13" rc-code="7" op-status="0" interval="0" last-rc-change="1687330528" exec-time="90" queue-time="0" op-digest="91ec5f49c5fd88f29405389545fb7d59"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-2" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-2_last_0" operation_key="base-bundle-podman-2_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="20:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;20:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="17" rc-code="7" op-status="0" interval="0" last-rc-change="1687330528" exec-time="71" queue-time="0" op-digest="eb1c148b6d2d7a7013f75d596536409b"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-1" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-1">
+ <lrm_rsc_op id="base-bundle-1_last_0" operation_key="base-bundle-1_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="19:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;19:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-0" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-0">
+ <lrm_rsc_op id="base-bundle-0_last_0" operation_key="base-bundle-0_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="26:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;26:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="3" rc-code="0" op-status="0" interval="0" last-rc-change="1687330529" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ <lrm_rsc_op id="base-bundle-0_monitor_30000" operation_key="base-bundle-0_monitor_30000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="15:1:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;15:1:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="4" rc-code="0" op-status="0" interval="30000" last-rc-change="1687330531" exec-time="0" queue-time="0" op-digest="354b9acaa7ea1113d708dc11a1d6bbfa"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-2" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-2">
+ <lrm_rsc_op id="base-bundle-2_last_0" operation_key="base-bundle-2_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="10:171:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;10:171:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="5" rc-code="7" op-status="0" interval="0" last-rc-change="1687478432" exec-time="0" queue-time="0" op-digest="2b7683df7d64ff71ec5fd3675fd12017" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="3">
+ <instance_attributes id="status-3">
+ <nvpair id="status-3-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-3-master-base" name="master-base" value="10"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state id="2" uname="node2" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <transient_attributes id="2">
+ <instance_attributes id="status-2">
+ <nvpair id="status-2-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-2-master-base" name="master-base" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="2">
+ <lrm_resources>
+ <lrm_resource id="Fencing" class="stonith" type="fence_xvm">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="22:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;22:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="14" rc-code="0" op-status="0" interval="0" last-rc-change="1687330529" exec-time="33" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ <lrm_rsc_op id="Fencing_monitor_120000" operation_key="Fencing_monitor_120000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="23:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;23:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="19" rc-code="0" op-status="0" interval="120000" last-rc-change="1687330529" exec-time="26" queue-time="0" op-digest="24989640311980988fb77ddd1cc1002b"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-0" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-0_last_0" operation_key="base-bundle-podman-0_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="9:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;9:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="12" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="80" queue-time="0" op-digest="b93578750303776f6bb098395c53338a"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-1" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-1_last_0" operation_key="base-bundle-podman-1_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="28:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;28:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="20" rc-code="0" op-status="0" interval="0" last-rc-change="1687330529" exec-time="612" queue-time="0" op-digest="91ec5f49c5fd88f29405389545fb7d59"/>
+ <lrm_rsc_op id="base-bundle-podman-1_monitor_60000" operation_key="base-bundle-podman-1_monitor_60000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="29:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;29:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="21" rc-code="0" op-status="0" interval="60000" last-rc-change="1687330529" exec-time="210" queue-time="0" op-digest="f0ef4729d120aa3f5d938cabca4d06c7"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-2" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-2_last_0" operation_key="base-bundle-podman-2_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="13:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;13:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="18" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="78" queue-time="0" op-digest="eb1c148b6d2d7a7013f75d596536409b"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-1" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-1">
+ <lrm_rsc_op id="base-bundle-1_last_0" operation_key="base-bundle-1_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="30:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;30:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="2" rc-code="0" op-status="0" interval="0" last-rc-change="1687330529" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ <lrm_rsc_op id="base-bundle-1_monitor_30000" operation_key="base-bundle-1_monitor_30000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="20:1:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;20:1:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="4" rc-code="0" op-status="0" interval="30000" last-rc-change="1687330532" exec-time="0" queue-time="0" op-digest="3929eec440004bca31f813a8e6097506"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-0" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-0">
+ <lrm_rsc_op id="base-bundle-0_last_0" operation_key="base-bundle-0_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="10:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;10:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="3" rc-code="7" op-status="0" interval="0" last-rc-change="1687330530" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-2" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-2">
+ <lrm_rsc_op id="base-bundle-2_last_0" operation_key="base-bundle-2_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="9:171:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;9:171:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="5" rc-code="7" op-status="0" interval="0" last-rc-change="1687478432" exec-time="0" queue-time="0" op-digest="2b7683df7d64ff71ec5fd3675fd12017" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state remote_node="true" id="base-bundle-0" uname="base-bundle-0" in_ccm="true" crm-debug-origin="controld_update_resource_history" node_fenced="0">
+ <lrm id="base-bundle-0">
+ <lrm_resources>
+ <lrm_resource id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="base_last_0" operation_key="base_promote_0" operation="promote" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="31:176:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;31:176:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="4956" rc-code="0" op-status="0" interval="0" last-rc-change="1687478477" exec-time="210" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="base_monitor_15000" operation_key="base_monitor_15000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="32:177:8:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:8;32:177:8:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="4962" rc-code="8" op-status="0" interval="15000" last-rc-change="1687478477" exec-time="21" queue-time="0" op-digest="3ef575c5f050ae086f0f31bc8f085fdc"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="base-bundle-0">
+ <instance_attributes id="status-base-bundle-0">
+ <nvpair id="status-base-bundle-0-master-base" name="master-base" value="10"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state remote_node="true" id="base-bundle-1" uname="base-bundle-1" in_ccm="true" crm-debug-origin="controld_update_resource_history" node_fenced="0">
+ <lrm id="base-bundle-1">
+ <lrm_resources>
+ <lrm_resource id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="base_last_0" operation_key="base_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="27:2:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;27:2:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="8" rc-code="0" op-status="0" interval="0" last-rc-change="1687330532" exec-time="246" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="base_monitor_16000" operation_key="base_monitor_16000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="30:167:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;30:167:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="4922" rc-code="0" op-status="0" interval="16000" last-rc-change="1687477999" exec-time="37" queue-time="0" op-digest="e62ad827732488b88ef8722ff6a484b3"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="base-bundle-1">
+ <instance_attributes id="status-base-bundle-1">
+ <nvpair id="status-base-bundle-1-master-base" name="master-base" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state remote_node="true" id="base-bundle-2" uname="base-bundle-2" in_ccm="true" crm-debug-origin="controld_update_resource_history" node_fenced="0">
+ <lrm id="base-bundle-2">
+ <lrm_resources>
+ <lrm_resource id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="base_last_0" operation_key="base_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="35:172:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;35:172:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="8" rc-code="0" op-status="0" interval="0" last-rc-change="1687478435" exec-time="259" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="base_monitor_16000" operation_key="base_monitor_16000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="36:173:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;36:173:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="14" rc-code="0" op-status="0" interval="16000" last-rc-change="1687478435" exec-time="12" queue-time="0" op-digest="e62ad827732488b88ef8722ff6a484b3"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="base-bundle-2">
+ <instance_attributes id="status-base-bundle-2">
+ <nvpair id="status-base-bundle-2-master-base" name="master-base" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ </status>
+</cib>
diff --git a/cts/scheduler/xml/bundle-promoted-location-4.xml b/cts/scheduler/xml/bundle-promoted-location-4.xml
new file mode 100644
index 0000000..8cfbac1
--- /dev/null
+++ b/cts/scheduler/xml/bundle-promoted-location-4.xml
@@ -0,0 +1,225 @@
+<cib crm_feature_set="3.17.4" validate-with="pacemaker-3.9" epoch="134" num_updates="40" admin_epoch="0" cib-last-written="Thu Jun 22 17:00:31 2023" update-origin="node1" update-client="cibadmin" update-user="root" have-quorum="1" dc-uuid="1">
+ <configuration>
+ <!-- The essential elements of this test are:
+ * A promotable bundle (base-bundle) is promoted on node3 and unpromoted
+ on node1 and node2
+ * There is a negative location constraint (score=-INFINITY) for the
+ bundle's promoted role, avoiding node3
+ * There are no other constraints
+ * There is no stickiness
+
+ In this situation, the bundle should demote on node3 and promote on
+ some other node.
+
+ This test is incorrect. The bundle remains promoted on node3 and
+ unpromoted on other nodes.
+ -->
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.1.5-1.0a457786a.git.el9-0a457786a"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="test"/>
+ <nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1687288330"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="node1">
+ <instance_attributes id="nodes-1"/>
+ </node>
+ <node id="2" uname="node2">
+ <instance_attributes id="nodes-2"/>
+ </node>
+ <node id="3" uname="node3">
+ <instance_attributes id="nodes-3"/>
+ </node>
+ </nodes>
+ <resources>
+ <primitive class="stonith" id="Fencing" type="fence_xvm">
+ <instance_attributes id="Fencing-instance_attributes">
+ <nvpair id="Fencing-instance_attributes_pcmk_host_list" name="pcmk_host_list" value="node1 node2 node3"/>
+ </instance_attributes>
+ <operations>
+ <op id="Fencing-monitor-120s" interval="120s" name="monitor" timeout="120s"/>
+ </operations>
+ </primitive>
+ <bundle id="base-bundle">
+ <meta_attributes id="base-bundle-meta_attributes">
+ <nvpair id="base-bundle-meta_attributes-container-attribute-target" name="container-attribute-target" value="host"/>
+ <nvpair id="base-bundle-meta_attributes-promotable" name="promotable" value="true"/>
+ </meta_attributes>
+ <podman image="localhost/pcmktest" replicas="3"/>
+ <network control-port="3121"/>
+ <primitive id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <operations>
+ <op id="base-monitor-15s" interval="15s" name="monitor" timeout="15s" role="Promoted"/>
+ <op id="base-monitor-16s" interval="16s" name="monitor" timeout="16s" role="Unpromoted"/>
+ </operations>
+ </primitive>
+ </bundle>
+ </resources>
+ <constraints>
+ <rsc_location id="location-base-bundle" rsc="base-bundle">
+ <rule id="location-base-bundle-rule" role="Promoted" score="-INFINITY">
+ <expression id="location-base-bundle-rule-expr" operation="eq" attribute="#uname" value="node3"/>
+ </rule>
+ </rsc_location>
+ </constraints>
+ <op_defaults/>
+ <alerts/>
+ <rsc_defaults/>
+ </configuration>
+ <status>
+ <node_state id="1" uname="node1" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <transient_attributes id="1">
+ <instance_attributes id="status-1">
+ <nvpair id="status-1-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-1-master-base" name="master-base" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="1">
+ <lrm_resources>
+ <lrm_resource id="Fencing" class="stonith" type="fence_xvm">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="1:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;1:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="8" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="2" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-0" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-0_last_0" operation_key="base-bundle-podman-0_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="2:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;2:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="12" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="105" queue-time="0" op-digest="b93578750303776f6bb098395c53338a"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-1" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-1_last_0" operation_key="base-bundle-podman-1_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="4:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;4:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="13" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="114" queue-time="0" op-digest="91ec5f49c5fd88f29405389545fb7d59"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-2" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-2_last_0" operation_key="base-bundle-podman-2_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="21:171:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;21:171:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="20" rc-code="0" op-status="0" interval="0" last-rc-change="1687478431" exec-time="1223" queue-time="0" op-digest="eb1c148b6d2d7a7013f75d596536409b"/>
+ <lrm_rsc_op id="base-bundle-podman-2_monitor_60000" operation_key="base-bundle-podman-2_monitor_60000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="22:171:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;22:171:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="21" rc-code="0" op-status="0" interval="60000" last-rc-change="1687478432" exec-time="176" queue-time="0" op-digest="8eeca5a30b14f3d9ef7d2ddbd16c2e05"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-1" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-1">
+ <lrm_rsc_op id="base-bundle-1_last_0" operation_key="base-bundle-1_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="5:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;5:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-0" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-0">
+ <lrm_rsc_op id="base-bundle-0_last_0" operation_key="base-bundle-0_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="3:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;3:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="2" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-2" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-2">
+ <lrm_rsc_op id="base-bundle-2_last_0" operation_key="base-bundle-2_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="23:171:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;23:171:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="4" rc-code="0" op-status="0" interval="0" last-rc-change="1687478432" exec-time="0" queue-time="0" op-digest="2b7683df7d64ff71ec5fd3675fd12017" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ <lrm_rsc_op id="base-bundle-2_monitor_30000" operation_key="base-bundle-2_monitor_30000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="24:172:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;24:172:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="5" rc-code="0" op-status="0" interval="30000" last-rc-change="1687478435" exec-time="0" queue-time="0" op-digest="6f3c7e233bacb8420fef5f9581190d00"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state id="3" uname="node3" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <lrm id="3">
+ <lrm_resources>
+ <lrm_resource id="Fencing" class="stonith" type="fence_xvm">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="15:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;15:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="8" rc-code="7" op-status="0" interval="0" last-rc-change="1687330528" exec-time="3" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-0" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-0_last_0" operation_key="base-bundle-podman-0_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="24:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;24:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="18" rc-code="0" op-status="0" interval="0" last-rc-change="1687330528" exec-time="663" queue-time="0" op-digest="b93578750303776f6bb098395c53338a"/>
+ <lrm_rsc_op id="base-bundle-podman-0_monitor_60000" operation_key="base-bundle-podman-0_monitor_60000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="25:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;25:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="20" rc-code="0" op-status="0" interval="60000" last-rc-change="1687330529" exec-time="118" queue-time="0" op-digest="902512fcf3e4556d9585c44184665d8c"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-1" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-1_last_0" operation_key="base-bundle-podman-1_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="18:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;18:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="13" rc-code="7" op-status="0" interval="0" last-rc-change="1687330528" exec-time="90" queue-time="0" op-digest="91ec5f49c5fd88f29405389545fb7d59"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-2" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-2_last_0" operation_key="base-bundle-podman-2_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="20:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;20:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="17" rc-code="7" op-status="0" interval="0" last-rc-change="1687330528" exec-time="71" queue-time="0" op-digest="eb1c148b6d2d7a7013f75d596536409b"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-1" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-1">
+ <lrm_rsc_op id="base-bundle-1_last_0" operation_key="base-bundle-1_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="19:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;19:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-0" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-0">
+ <lrm_rsc_op id="base-bundle-0_last_0" operation_key="base-bundle-0_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="26:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;26:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="3" rc-code="0" op-status="0" interval="0" last-rc-change="1687330529" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ <lrm_rsc_op id="base-bundle-0_monitor_30000" operation_key="base-bundle-0_monitor_30000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="15:1:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;15:1:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="4" rc-code="0" op-status="0" interval="30000" last-rc-change="1687330531" exec-time="0" queue-time="0" op-digest="354b9acaa7ea1113d708dc11a1d6bbfa"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-2" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-2">
+ <lrm_rsc_op id="base-bundle-2_last_0" operation_key="base-bundle-2_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="10:171:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;10:171:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="5" rc-code="7" op-status="0" interval="0" last-rc-change="1687478432" exec-time="0" queue-time="0" op-digest="2b7683df7d64ff71ec5fd3675fd12017" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="3">
+ <instance_attributes id="status-3">
+ <nvpair id="status-3-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-3-master-base" name="master-base" value="10"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state id="2" uname="node2" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <transient_attributes id="2">
+ <instance_attributes id="status-2">
+ <nvpair id="status-2-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-2-master-base" name="master-base" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="2">
+ <lrm_resources>
+ <lrm_resource id="Fencing" class="stonith" type="fence_xvm">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="22:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;22:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="14" rc-code="0" op-status="0" interval="0" last-rc-change="1687330529" exec-time="33" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ <lrm_rsc_op id="Fencing_monitor_120000" operation_key="Fencing_monitor_120000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="23:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;23:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="19" rc-code="0" op-status="0" interval="120000" last-rc-change="1687330529" exec-time="26" queue-time="0" op-digest="24989640311980988fb77ddd1cc1002b"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-0" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-0_last_0" operation_key="base-bundle-podman-0_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="9:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;9:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="12" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="80" queue-time="0" op-digest="b93578750303776f6bb098395c53338a"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-1" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-1_last_0" operation_key="base-bundle-podman-1_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="28:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;28:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="20" rc-code="0" op-status="0" interval="0" last-rc-change="1687330529" exec-time="612" queue-time="0" op-digest="91ec5f49c5fd88f29405389545fb7d59"/>
+ <lrm_rsc_op id="base-bundle-podman-1_monitor_60000" operation_key="base-bundle-podman-1_monitor_60000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="29:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;29:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="21" rc-code="0" op-status="0" interval="60000" last-rc-change="1687330529" exec-time="210" queue-time="0" op-digest="f0ef4729d120aa3f5d938cabca4d06c7"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-2" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-2_last_0" operation_key="base-bundle-podman-2_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="13:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;13:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="18" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="78" queue-time="0" op-digest="eb1c148b6d2d7a7013f75d596536409b"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-1" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-1">
+ <lrm_rsc_op id="base-bundle-1_last_0" operation_key="base-bundle-1_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="30:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;30:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="2" rc-code="0" op-status="0" interval="0" last-rc-change="1687330529" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ <lrm_rsc_op id="base-bundle-1_monitor_30000" operation_key="base-bundle-1_monitor_30000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="20:1:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;20:1:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="4" rc-code="0" op-status="0" interval="30000" last-rc-change="1687330532" exec-time="0" queue-time="0" op-digest="3929eec440004bca31f813a8e6097506"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-0" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-0">
+ <lrm_rsc_op id="base-bundle-0_last_0" operation_key="base-bundle-0_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="10:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;10:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="3" rc-code="7" op-status="0" interval="0" last-rc-change="1687330530" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-2" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-2">
+ <lrm_rsc_op id="base-bundle-2_last_0" operation_key="base-bundle-2_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="9:171:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;9:171:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="5" rc-code="7" op-status="0" interval="0" last-rc-change="1687478432" exec-time="0" queue-time="0" op-digest="2b7683df7d64ff71ec5fd3675fd12017" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state remote_node="true" id="base-bundle-0" uname="base-bundle-0" in_ccm="true" crm-debug-origin="controld_update_resource_history" node_fenced="0">
+ <lrm id="base-bundle-0">
+ <lrm_resources>
+ <lrm_resource id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="base_last_0" operation_key="base_promote_0" operation="promote" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="31:176:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;31:176:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="4956" rc-code="0" op-status="0" interval="0" last-rc-change="1687478477" exec-time="210" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="base_monitor_15000" operation_key="base_monitor_15000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="32:177:8:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:8;32:177:8:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="4962" rc-code="8" op-status="0" interval="15000" last-rc-change="1687478477" exec-time="21" queue-time="0" op-digest="3ef575c5f050ae086f0f31bc8f085fdc"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="base-bundle-0">
+ <instance_attributes id="status-base-bundle-0">
+ <nvpair id="status-base-bundle-0-master-base" name="master-base" value="10"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state remote_node="true" id="base-bundle-1" uname="base-bundle-1" in_ccm="true" crm-debug-origin="controld_update_resource_history" node_fenced="0">
+ <lrm id="base-bundle-1">
+ <lrm_resources>
+ <lrm_resource id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="base_last_0" operation_key="base_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="27:2:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;27:2:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="8" rc-code="0" op-status="0" interval="0" last-rc-change="1687330532" exec-time="246" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="base_monitor_16000" operation_key="base_monitor_16000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="30:167:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;30:167:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="4922" rc-code="0" op-status="0" interval="16000" last-rc-change="1687477999" exec-time="37" queue-time="0" op-digest="e62ad827732488b88ef8722ff6a484b3"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="base-bundle-1">
+ <instance_attributes id="status-base-bundle-1">
+ <nvpair id="status-base-bundle-1-master-base" name="master-base" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state remote_node="true" id="base-bundle-2" uname="base-bundle-2" in_ccm="true" crm-debug-origin="controld_update_resource_history" node_fenced="0">
+ <lrm id="base-bundle-2">
+ <lrm_resources>
+ <lrm_resource id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="base_last_0" operation_key="base_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="35:172:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;35:172:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="8" rc-code="0" op-status="0" interval="0" last-rc-change="1687478435" exec-time="259" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="base_monitor_16000" operation_key="base_monitor_16000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="36:173:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;36:173:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="14" rc-code="0" op-status="0" interval="16000" last-rc-change="1687478435" exec-time="12" queue-time="0" op-digest="e62ad827732488b88ef8722ff6a484b3"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="base-bundle-2">
+ <instance_attributes id="status-base-bundle-2">
+ <nvpair id="status-base-bundle-2-master-base" name="master-base" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ </status>
+</cib>
diff --git a/cts/scheduler/xml/bundle-promoted-location-5.xml b/cts/scheduler/xml/bundle-promoted-location-5.xml
new file mode 100644
index 0000000..4cb76fe
--- /dev/null
+++ b/cts/scheduler/xml/bundle-promoted-location-5.xml
@@ -0,0 +1,231 @@
+<cib crm_feature_set="3.17.4" validate-with="pacemaker-3.9" epoch="134" num_updates="40" admin_epoch="0" cib-last-written="Thu Jun 22 17:00:31 2023" update-origin="node1" update-client="cibadmin" update-user="root" have-quorum="1" dc-uuid="1">
+ <configuration>
+ <!-- The essential elements of this test are:
+ * A promotable bundle (base-bundle) is promoted on node3 and unpromoted
+ on node1 and node2
+ * There is a positive location constraint (score=5000) for the bundle's
+ unpromoted role, preferring node3
+ * There are no other constraints
+ * There is no stickiness
+ * base-bundle has a higher promotion score on node3 compared to the
+ other nodes.
+
+ In this situation, the bundle should remain promoted on node3 and
+ unpromoted on other nodes. Even the promoted instance must first be
+ unpromoted, so this score applies to all running instances.
+
+ This behavior is questionable though. Another possibility is for the
+ bundle to demote on node3 and promote somewhere else. The reasoning is
+ that the constraint may specify a preference that the instance on node3
+ be unpromoted in the stable state, after an instance is chosen for
+ promotion.
+ -->
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.1.5-1.0a457786a.git.el9-0a457786a"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="test"/>
+ <nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1687288330"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="node1">
+ <instance_attributes id="nodes-1"/>
+ </node>
+ <node id="2" uname="node2">
+ <instance_attributes id="nodes-2"/>
+ </node>
+ <node id="3" uname="node3">
+ <instance_attributes id="nodes-3"/>
+ </node>
+ </nodes>
+ <resources>
+ <primitive class="stonith" id="Fencing" type="fence_xvm">
+ <instance_attributes id="Fencing-instance_attributes">
+ <nvpair id="Fencing-instance_attributes_pcmk_host_list" name="pcmk_host_list" value="node1 node2 node3"/>
+ </instance_attributes>
+ <operations>
+ <op id="Fencing-monitor-120s" interval="120s" name="monitor" timeout="120s"/>
+ </operations>
+ </primitive>
+ <bundle id="base-bundle">
+ <meta_attributes id="base-bundle-meta_attributes">
+ <nvpair id="base-bundle-meta_attributes-container-attribute-target" name="container-attribute-target" value="host"/>
+ <nvpair id="base-bundle-meta_attributes-promotable" name="promotable" value="true"/>
+ </meta_attributes>
+ <podman image="localhost/pcmktest" replicas="3"/>
+ <network control-port="3121"/>
+ <primitive id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <operations>
+ <op id="base-monitor-15s" interval="15s" name="monitor" timeout="15s" role="Promoted"/>
+ <op id="base-monitor-16s" interval="16s" name="monitor" timeout="16s" role="Unpromoted"/>
+ </operations>
+ </primitive>
+ </bundle>
+ </resources>
+ <constraints>
+ <rsc_location id="location-base-bundle" rsc="base-bundle">
+ <rule id="location-base-bundle-rule" role="Unpromoted" score="5000">
+ <expression id="location-base-bundle-rule-expr" operation="eq" attribute="#uname" value="node3"/>
+ </rule>
+ </rsc_location>
+ </constraints>
+ <op_defaults/>
+ <alerts/>
+ <rsc_defaults/>
+ </configuration>
+ <status>
+ <node_state id="1" uname="node1" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <transient_attributes id="1">
+ <instance_attributes id="status-1">
+ <nvpair id="status-1-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-1-master-base" name="master-base" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="1">
+ <lrm_resources>
+ <lrm_resource id="Fencing" class="stonith" type="fence_xvm">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="1:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;1:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="8" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="2" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-0" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-0_last_0" operation_key="base-bundle-podman-0_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="2:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;2:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="12" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="105" queue-time="0" op-digest="b93578750303776f6bb098395c53338a"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-1" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-1_last_0" operation_key="base-bundle-podman-1_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="4:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;4:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="13" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="114" queue-time="0" op-digest="91ec5f49c5fd88f29405389545fb7d59"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-2" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-2_last_0" operation_key="base-bundle-podman-2_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="21:171:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;21:171:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="20" rc-code="0" op-status="0" interval="0" last-rc-change="1687478431" exec-time="1223" queue-time="0" op-digest="eb1c148b6d2d7a7013f75d596536409b"/>
+ <lrm_rsc_op id="base-bundle-podman-2_monitor_60000" operation_key="base-bundle-podman-2_monitor_60000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="22:171:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;22:171:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="21" rc-code="0" op-status="0" interval="60000" last-rc-change="1687478432" exec-time="176" queue-time="0" op-digest="8eeca5a30b14f3d9ef7d2ddbd16c2e05"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-1" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-1">
+ <lrm_rsc_op id="base-bundle-1_last_0" operation_key="base-bundle-1_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="5:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;5:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-0" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-0">
+ <lrm_rsc_op id="base-bundle-0_last_0" operation_key="base-bundle-0_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="3:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;3:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="2" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-2" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-2">
+ <lrm_rsc_op id="base-bundle-2_last_0" operation_key="base-bundle-2_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="23:171:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;23:171:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="4" rc-code="0" op-status="0" interval="0" last-rc-change="1687478432" exec-time="0" queue-time="0" op-digest="2b7683df7d64ff71ec5fd3675fd12017" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ <lrm_rsc_op id="base-bundle-2_monitor_30000" operation_key="base-bundle-2_monitor_30000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="24:172:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;24:172:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="5" rc-code="0" op-status="0" interval="30000" last-rc-change="1687478435" exec-time="0" queue-time="0" op-digest="6f3c7e233bacb8420fef5f9581190d00"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state id="3" uname="node3" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <lrm id="3">
+ <lrm_resources>
+ <lrm_resource id="Fencing" class="stonith" type="fence_xvm">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="15:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;15:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="8" rc-code="7" op-status="0" interval="0" last-rc-change="1687330528" exec-time="3" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-0" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-0_last_0" operation_key="base-bundle-podman-0_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="24:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;24:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="18" rc-code="0" op-status="0" interval="0" last-rc-change="1687330528" exec-time="663" queue-time="0" op-digest="b93578750303776f6bb098395c53338a"/>
+ <lrm_rsc_op id="base-bundle-podman-0_monitor_60000" operation_key="base-bundle-podman-0_monitor_60000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="25:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;25:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="20" rc-code="0" op-status="0" interval="60000" last-rc-change="1687330529" exec-time="118" queue-time="0" op-digest="902512fcf3e4556d9585c44184665d8c"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-1" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-1_last_0" operation_key="base-bundle-podman-1_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="18:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;18:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="13" rc-code="7" op-status="0" interval="0" last-rc-change="1687330528" exec-time="90" queue-time="0" op-digest="91ec5f49c5fd88f29405389545fb7d59"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-2" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-2_last_0" operation_key="base-bundle-podman-2_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="20:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;20:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="17" rc-code="7" op-status="0" interval="0" last-rc-change="1687330528" exec-time="71" queue-time="0" op-digest="eb1c148b6d2d7a7013f75d596536409b"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-1" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-1">
+ <lrm_rsc_op id="base-bundle-1_last_0" operation_key="base-bundle-1_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="19:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;19:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-0" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-0">
+ <lrm_rsc_op id="base-bundle-0_last_0" operation_key="base-bundle-0_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="26:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;26:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="3" rc-code="0" op-status="0" interval="0" last-rc-change="1687330529" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ <lrm_rsc_op id="base-bundle-0_monitor_30000" operation_key="base-bundle-0_monitor_30000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="15:1:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;15:1:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="4" rc-code="0" op-status="0" interval="30000" last-rc-change="1687330531" exec-time="0" queue-time="0" op-digest="354b9acaa7ea1113d708dc11a1d6bbfa"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-2" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-2">
+ <lrm_rsc_op id="base-bundle-2_last_0" operation_key="base-bundle-2_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="10:171:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;10:171:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="5" rc-code="7" op-status="0" interval="0" last-rc-change="1687478432" exec-time="0" queue-time="0" op-digest="2b7683df7d64ff71ec5fd3675fd12017" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="3">
+ <instance_attributes id="status-3">
+ <nvpair id="status-3-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-3-master-base" name="master-base" value="10"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state id="2" uname="node2" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <transient_attributes id="2">
+ <instance_attributes id="status-2">
+ <nvpair id="status-2-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-2-master-base" name="master-base" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="2">
+ <lrm_resources>
+ <lrm_resource id="Fencing" class="stonith" type="fence_xvm">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="22:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;22:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="14" rc-code="0" op-status="0" interval="0" last-rc-change="1687330529" exec-time="33" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ <lrm_rsc_op id="Fencing_monitor_120000" operation_key="Fencing_monitor_120000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="23:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;23:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="19" rc-code="0" op-status="0" interval="120000" last-rc-change="1687330529" exec-time="26" queue-time="0" op-digest="24989640311980988fb77ddd1cc1002b"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-0" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-0_last_0" operation_key="base-bundle-podman-0_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="9:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;9:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="12" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="80" queue-time="0" op-digest="b93578750303776f6bb098395c53338a"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-1" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-1_last_0" operation_key="base-bundle-podman-1_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="28:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;28:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="20" rc-code="0" op-status="0" interval="0" last-rc-change="1687330529" exec-time="612" queue-time="0" op-digest="91ec5f49c5fd88f29405389545fb7d59"/>
+ <lrm_rsc_op id="base-bundle-podman-1_monitor_60000" operation_key="base-bundle-podman-1_monitor_60000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="29:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;29:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="21" rc-code="0" op-status="0" interval="60000" last-rc-change="1687330529" exec-time="210" queue-time="0" op-digest="f0ef4729d120aa3f5d938cabca4d06c7"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-2" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-2_last_0" operation_key="base-bundle-podman-2_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="13:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;13:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="18" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="78" queue-time="0" op-digest="eb1c148b6d2d7a7013f75d596536409b"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-1" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-1">
+ <lrm_rsc_op id="base-bundle-1_last_0" operation_key="base-bundle-1_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="30:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;30:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="2" rc-code="0" op-status="0" interval="0" last-rc-change="1687330529" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ <lrm_rsc_op id="base-bundle-1_monitor_30000" operation_key="base-bundle-1_monitor_30000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="20:1:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;20:1:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="4" rc-code="0" op-status="0" interval="30000" last-rc-change="1687330532" exec-time="0" queue-time="0" op-digest="3929eec440004bca31f813a8e6097506"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-0" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-0">
+ <lrm_rsc_op id="base-bundle-0_last_0" operation_key="base-bundle-0_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="10:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;10:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="3" rc-code="7" op-status="0" interval="0" last-rc-change="1687330530" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-2" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-2">
+ <lrm_rsc_op id="base-bundle-2_last_0" operation_key="base-bundle-2_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="9:171:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;9:171:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="5" rc-code="7" op-status="0" interval="0" last-rc-change="1687478432" exec-time="0" queue-time="0" op-digest="2b7683df7d64ff71ec5fd3675fd12017" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state remote_node="true" id="base-bundle-0" uname="base-bundle-0" in_ccm="true" crm-debug-origin="controld_update_resource_history" node_fenced="0">
+ <lrm id="base-bundle-0">
+ <lrm_resources>
+ <lrm_resource id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="base_last_0" operation_key="base_promote_0" operation="promote" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="31:176:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;31:176:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="4956" rc-code="0" op-status="0" interval="0" last-rc-change="1687478477" exec-time="210" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="base_monitor_15000" operation_key="base_monitor_15000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="32:177:8:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:8;32:177:8:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="4962" rc-code="8" op-status="0" interval="15000" last-rc-change="1687478477" exec-time="21" queue-time="0" op-digest="3ef575c5f050ae086f0f31bc8f085fdc"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="base-bundle-0">
+ <instance_attributes id="status-base-bundle-0">
+ <nvpair id="status-base-bundle-0-master-base" name="master-base" value="10"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state remote_node="true" id="base-bundle-1" uname="base-bundle-1" in_ccm="true" crm-debug-origin="controld_update_resource_history" node_fenced="0">
+ <lrm id="base-bundle-1">
+ <lrm_resources>
+ <lrm_resource id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="base_last_0" operation_key="base_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="27:2:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;27:2:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="8" rc-code="0" op-status="0" interval="0" last-rc-change="1687330532" exec-time="246" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="base_monitor_16000" operation_key="base_monitor_16000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="30:167:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;30:167:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="4922" rc-code="0" op-status="0" interval="16000" last-rc-change="1687477999" exec-time="37" queue-time="0" op-digest="e62ad827732488b88ef8722ff6a484b3"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="base-bundle-1">
+ <instance_attributes id="status-base-bundle-1">
+ <nvpair id="status-base-bundle-1-master-base" name="master-base" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state remote_node="true" id="base-bundle-2" uname="base-bundle-2" in_ccm="true" crm-debug-origin="controld_update_resource_history" node_fenced="0">
+ <lrm id="base-bundle-2">
+ <lrm_resources>
+ <lrm_resource id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="base_last_0" operation_key="base_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="35:172:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;35:172:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="8" rc-code="0" op-status="0" interval="0" last-rc-change="1687478435" exec-time="259" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="base_monitor_16000" operation_key="base_monitor_16000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="36:173:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;36:173:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="14" rc-code="0" op-status="0" interval="16000" last-rc-change="1687478435" exec-time="12" queue-time="0" op-digest="e62ad827732488b88ef8722ff6a484b3"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="base-bundle-2">
+ <instance_attributes id="status-base-bundle-2">
+ <nvpair id="status-base-bundle-2-master-base" name="master-base" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ </status>
+</cib>
diff --git a/cts/scheduler/xml/bundle-promoted-location-6.xml b/cts/scheduler/xml/bundle-promoted-location-6.xml
new file mode 100644
index 0000000..cab69de
--- /dev/null
+++ b/cts/scheduler/xml/bundle-promoted-location-6.xml
@@ -0,0 +1,224 @@
+<cib crm_feature_set="3.17.4" validate-with="pacemaker-3.9" epoch="134" num_updates="40" admin_epoch="0" cib-last-written="Thu Jun 22 17:00:31 2023" update-origin="node1" update-client="cibadmin" update-user="root" have-quorum="1" dc-uuid="1">
+ <configuration>
+ <!-- The essential elements of this test are:
+ * A promotable bundle (base-bundle) is promoted on node3 and unpromoted
+ on node1 and node2
+ * There is a negative location constraint (score=-INFINITY) for the
+ bundle's unpromoted role, avoiding node2
+ * There are no other constraints
+ * There is no stickiness
+
+ In this situation, the bundle should remain promoted on node3 and
+ unpromoted on node1, and it should stop on node2. A negative location
+ constraint for the unpromoted role is a complete ban, because an
+ instance must be started as unpromoted before it can be promoted.
+ -->
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.1.5-1.0a457786a.git.el9-0a457786a"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="test"/>
+ <nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1687288330"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="node1">
+ <instance_attributes id="nodes-1"/>
+ </node>
+ <node id="2" uname="node2">
+ <instance_attributes id="nodes-2"/>
+ </node>
+ <node id="3" uname="node3">
+ <instance_attributes id="nodes-3"/>
+ </node>
+ </nodes>
+ <resources>
+ <primitive class="stonith" id="Fencing" type="fence_xvm">
+ <instance_attributes id="Fencing-instance_attributes">
+ <nvpair id="Fencing-instance_attributes_pcmk_host_list" name="pcmk_host_list" value="node1 node2 node3"/>
+ </instance_attributes>
+ <operations>
+ <op id="Fencing-monitor-120s" interval="120s" name="monitor" timeout="120s"/>
+ </operations>
+ </primitive>
+ <bundle id="base-bundle">
+ <meta_attributes id="base-bundle-meta_attributes">
+ <nvpair id="base-bundle-meta_attributes-container-attribute-target" name="container-attribute-target" value="host"/>
+ <nvpair id="base-bundle-meta_attributes-promotable" name="promotable" value="true"/>
+ </meta_attributes>
+ <podman image="localhost/pcmktest" replicas="3"/>
+ <network control-port="3121"/>
+ <primitive id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <operations>
+ <op id="base-monitor-15s" interval="15s" name="monitor" timeout="15s" role="Promoted"/>
+ <op id="base-monitor-16s" interval="16s" name="monitor" timeout="16s" role="Unpromoted"/>
+ </operations>
+ </primitive>
+ </bundle>
+ </resources>
+ <constraints>
+ <rsc_location id="location-base-bundle" rsc="base-bundle">
+ <rule id="location-base-bundle-rule" role="Unpromoted" score="-INFINITY">
+ <expression id="location-base-bundle-rule-expr" operation="eq" attribute="#uname" value="node2"/>
+ </rule>
+ </rsc_location>
+ </constraints>
+ <op_defaults/>
+ <alerts/>
+ <rsc_defaults/>
+ </configuration>
+ <status>
+ <node_state id="1" uname="node1" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <transient_attributes id="1">
+ <instance_attributes id="status-1">
+ <nvpair id="status-1-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-1-master-base" name="master-base" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="1">
+ <lrm_resources>
+ <lrm_resource id="Fencing" class="stonith" type="fence_xvm">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="1:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;1:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="8" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="2" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-0" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-0_last_0" operation_key="base-bundle-podman-0_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="2:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;2:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="12" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="105" queue-time="0" op-digest="b93578750303776f6bb098395c53338a"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-1" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-1_last_0" operation_key="base-bundle-podman-1_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="4:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;4:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="13" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="114" queue-time="0" op-digest="91ec5f49c5fd88f29405389545fb7d59"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-2" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-2_last_0" operation_key="base-bundle-podman-2_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="21:171:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;21:171:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="20" rc-code="0" op-status="0" interval="0" last-rc-change="1687478431" exec-time="1223" queue-time="0" op-digest="eb1c148b6d2d7a7013f75d596536409b"/>
+ <lrm_rsc_op id="base-bundle-podman-2_monitor_60000" operation_key="base-bundle-podman-2_monitor_60000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="22:171:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;22:171:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="21" rc-code="0" op-status="0" interval="60000" last-rc-change="1687478432" exec-time="176" queue-time="0" op-digest="8eeca5a30b14f3d9ef7d2ddbd16c2e05"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-1" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-1">
+ <lrm_rsc_op id="base-bundle-1_last_0" operation_key="base-bundle-1_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="5:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;5:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-0" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-0">
+ <lrm_rsc_op id="base-bundle-0_last_0" operation_key="base-bundle-0_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="3:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;3:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="2" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-2" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-2">
+ <lrm_rsc_op id="base-bundle-2_last_0" operation_key="base-bundle-2_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="23:171:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;23:171:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="4" rc-code="0" op-status="0" interval="0" last-rc-change="1687478432" exec-time="0" queue-time="0" op-digest="2b7683df7d64ff71ec5fd3675fd12017" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ <lrm_rsc_op id="base-bundle-2_monitor_30000" operation_key="base-bundle-2_monitor_30000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="24:172:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;24:172:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="5" rc-code="0" op-status="0" interval="30000" last-rc-change="1687478435" exec-time="0" queue-time="0" op-digest="6f3c7e233bacb8420fef5f9581190d00"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state id="3" uname="node3" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <lrm id="3">
+ <lrm_resources>
+ <lrm_resource id="Fencing" class="stonith" type="fence_xvm">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="15:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;15:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="8" rc-code="7" op-status="0" interval="0" last-rc-change="1687330528" exec-time="3" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-0" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-0_last_0" operation_key="base-bundle-podman-0_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="24:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;24:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="18" rc-code="0" op-status="0" interval="0" last-rc-change="1687330528" exec-time="663" queue-time="0" op-digest="b93578750303776f6bb098395c53338a"/>
+ <lrm_rsc_op id="base-bundle-podman-0_monitor_60000" operation_key="base-bundle-podman-0_monitor_60000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="25:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;25:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="20" rc-code="0" op-status="0" interval="60000" last-rc-change="1687330529" exec-time="118" queue-time="0" op-digest="902512fcf3e4556d9585c44184665d8c"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-1" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-1_last_0" operation_key="base-bundle-podman-1_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="18:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;18:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="13" rc-code="7" op-status="0" interval="0" last-rc-change="1687330528" exec-time="90" queue-time="0" op-digest="91ec5f49c5fd88f29405389545fb7d59"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-2" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-2_last_0" operation_key="base-bundle-podman-2_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="20:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;20:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="17" rc-code="7" op-status="0" interval="0" last-rc-change="1687330528" exec-time="71" queue-time="0" op-digest="eb1c148b6d2d7a7013f75d596536409b"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-1" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-1">
+ <lrm_rsc_op id="base-bundle-1_last_0" operation_key="base-bundle-1_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="19:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;19:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-0" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-0">
+ <lrm_rsc_op id="base-bundle-0_last_0" operation_key="base-bundle-0_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="26:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;26:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="3" rc-code="0" op-status="0" interval="0" last-rc-change="1687330529" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ <lrm_rsc_op id="base-bundle-0_monitor_30000" operation_key="base-bundle-0_monitor_30000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="15:1:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;15:1:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="4" rc-code="0" op-status="0" interval="30000" last-rc-change="1687330531" exec-time="0" queue-time="0" op-digest="354b9acaa7ea1113d708dc11a1d6bbfa"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-2" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-2">
+ <lrm_rsc_op id="base-bundle-2_last_0" operation_key="base-bundle-2_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="10:171:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;10:171:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="5" rc-code="7" op-status="0" interval="0" last-rc-change="1687478432" exec-time="0" queue-time="0" op-digest="2b7683df7d64ff71ec5fd3675fd12017" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="3">
+ <instance_attributes id="status-3">
+ <nvpair id="status-3-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-3-master-base" name="master-base" value="10"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state id="2" uname="node2" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <transient_attributes id="2">
+ <instance_attributes id="status-2">
+ <nvpair id="status-2-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-2-master-base" name="master-base" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="2">
+ <lrm_resources>
+ <lrm_resource id="Fencing" class="stonith" type="fence_xvm">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="22:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;22:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="14" rc-code="0" op-status="0" interval="0" last-rc-change="1687330529" exec-time="33" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ <lrm_rsc_op id="Fencing_monitor_120000" operation_key="Fencing_monitor_120000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="23:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;23:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="19" rc-code="0" op-status="0" interval="120000" last-rc-change="1687330529" exec-time="26" queue-time="0" op-digest="24989640311980988fb77ddd1cc1002b"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-0" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-0_last_0" operation_key="base-bundle-podman-0_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="9:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;9:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="12" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="80" queue-time="0" op-digest="b93578750303776f6bb098395c53338a"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-1" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-1_last_0" operation_key="base-bundle-podman-1_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="28:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;28:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="20" rc-code="0" op-status="0" interval="0" last-rc-change="1687330529" exec-time="612" queue-time="0" op-digest="91ec5f49c5fd88f29405389545fb7d59"/>
+ <lrm_rsc_op id="base-bundle-podman-1_monitor_60000" operation_key="base-bundle-podman-1_monitor_60000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="29:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;29:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="21" rc-code="0" op-status="0" interval="60000" last-rc-change="1687330529" exec-time="210" queue-time="0" op-digest="f0ef4729d120aa3f5d938cabca4d06c7"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-2" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-2_last_0" operation_key="base-bundle-podman-2_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="13:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;13:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="18" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="78" queue-time="0" op-digest="eb1c148b6d2d7a7013f75d596536409b"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-1" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-1">
+ <lrm_rsc_op id="base-bundle-1_last_0" operation_key="base-bundle-1_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="30:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;30:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="2" rc-code="0" op-status="0" interval="0" last-rc-change="1687330529" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ <lrm_rsc_op id="base-bundle-1_monitor_30000" operation_key="base-bundle-1_monitor_30000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="20:1:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;20:1:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="4" rc-code="0" op-status="0" interval="30000" last-rc-change="1687330532" exec-time="0" queue-time="0" op-digest="3929eec440004bca31f813a8e6097506"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-0" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-0">
+ <lrm_rsc_op id="base-bundle-0_last_0" operation_key="base-bundle-0_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="10:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;10:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="3" rc-code="7" op-status="0" interval="0" last-rc-change="1687330530" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-2" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-2">
+ <lrm_rsc_op id="base-bundle-2_last_0" operation_key="base-bundle-2_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="9:171:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;9:171:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="5" rc-code="7" op-status="0" interval="0" last-rc-change="1687478432" exec-time="0" queue-time="0" op-digest="2b7683df7d64ff71ec5fd3675fd12017" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state remote_node="true" id="base-bundle-0" uname="base-bundle-0" in_ccm="true" crm-debug-origin="controld_update_resource_history" node_fenced="0">
+ <lrm id="base-bundle-0">
+ <lrm_resources>
+ <lrm_resource id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="base_last_0" operation_key="base_promote_0" operation="promote" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="31:176:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;31:176:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="4956" rc-code="0" op-status="0" interval="0" last-rc-change="1687478477" exec-time="210" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="base_monitor_15000" operation_key="base_monitor_15000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="32:177:8:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:8;32:177:8:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="4962" rc-code="8" op-status="0" interval="15000" last-rc-change="1687478477" exec-time="21" queue-time="0" op-digest="3ef575c5f050ae086f0f31bc8f085fdc"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="base-bundle-0">
+ <instance_attributes id="status-base-bundle-0">
+ <nvpair id="status-base-bundle-0-master-base" name="master-base" value="10"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state remote_node="true" id="base-bundle-1" uname="base-bundle-1" in_ccm="true" crm-debug-origin="controld_update_resource_history" node_fenced="0">
+ <lrm id="base-bundle-1">
+ <lrm_resources>
+ <lrm_resource id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="base_last_0" operation_key="base_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="27:2:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;27:2:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="8" rc-code="0" op-status="0" interval="0" last-rc-change="1687330532" exec-time="246" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="base_monitor_16000" operation_key="base_monitor_16000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="30:167:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;30:167:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="4922" rc-code="0" op-status="0" interval="16000" last-rc-change="1687477999" exec-time="37" queue-time="0" op-digest="e62ad827732488b88ef8722ff6a484b3"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="base-bundle-1">
+ <instance_attributes id="status-base-bundle-1">
+ <nvpair id="status-base-bundle-1-master-base" name="master-base" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state remote_node="true" id="base-bundle-2" uname="base-bundle-2" in_ccm="true" crm-debug-origin="controld_update_resource_history" node_fenced="0">
+ <lrm id="base-bundle-2">
+ <lrm_resources>
+ <lrm_resource id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="base_last_0" operation_key="base_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="35:172:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;35:172:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="8" rc-code="0" op-status="0" interval="0" last-rc-change="1687478435" exec-time="259" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="base_monitor_16000" operation_key="base_monitor_16000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="36:173:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;36:173:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="14" rc-code="0" op-status="0" interval="16000" last-rc-change="1687478435" exec-time="12" queue-time="0" op-digest="e62ad827732488b88ef8722ff6a484b3"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="base-bundle-2">
+ <instance_attributes id="status-base-bundle-2">
+ <nvpair id="status-base-bundle-2-master-base" name="master-base" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ </status>
+</cib>
diff --git a/cts/scheduler/xml/cancel-behind-moving-remote.xml b/cts/scheduler/xml/cancel-behind-moving-remote.xml
index 67e1430..7b88060 100644
--- a/cts/scheduler/xml/cancel-behind-moving-remote.xml
+++ b/cts/scheduler/xml/cancel-behind-moving-remote.xml
@@ -1,5 +1,19 @@
<cib crm_feature_set="3.4.1" validate-with="pacemaker-3.7" epoch="184" num_updates="0" admin_epoch="0" cib-last-written="Mon Feb 15 01:40:51 2021" update-origin="controller-0" update-client="crm_resource" update-user="root" have-quorum="1" dc-uuid="2" execution-date="1613353251">
<configuration>
+ <!-- This test output is incorrect. ip-172.17.1.87 is colocated with the
+ promoted role of ovn-dbs-bundle and is banned from controller-2.
+ ovn-dbs-bundle should promote on either controller-0 or controller-1,
+ so that ip-172-17.1.87 is allowed to run.
+
+ However, ovn-dbs-bundle promotes on controller-2, and ip-172-17.1.87
+ is stopped.
+
+ The output was correct prior to the addition of this comment,
+ immediately after fixing a clone shuffling issue. However, that is
+ believed to be due to luck. Role-based colocations for promotable
+ bundles do not work correctly in general; see the
+ bundle-promoted-*colocation-* tests.
+ -->
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
diff --git a/cts/scheduler/xml/clone-recover-no-shuffle-1.xml b/cts/scheduler/xml/clone-recover-no-shuffle-1.xml
new file mode 100644
index 0000000..a634ff3
--- /dev/null
+++ b/cts/scheduler/xml/clone-recover-no-shuffle-1.xml
@@ -0,0 +1,113 @@
+<cib crm_feature_set="3.17.4" validate-with="pacemaker-3.9" epoch="74" num_updates="0" admin_epoch="0" cib-last-written="Tue Jun 20 17:44:24 2023" update-origin="node1" update-client="cibadmin" update-user="root" have-quorum="true" dc-uuid="1" execution-date="1687309199">
+ <configuration>
+ <!-- The essential elements of this test are:
+ * An anonymous clone resource (dummy) is stopped on node1 and started
+ on node2 and node3
+ * Clone instances are primitives
+ * There is no stickiness configured
+ * There are no location constraints
+
+ The following should happen:
+ * Instance dummy:0 should remain started on node2
+ * Instance dummy:1 should remain started on node3
+ * Instance dummy:2 should start on node1
+ -->
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.1.5-1.0a457786a.git.el9-0a457786a"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="test"/>
+ <nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1687288330"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="node1"/>
+ <node id="2" uname="node2"/>
+ <node id="3" uname="node3"/>
+ </nodes>
+ <resources>
+ <primitive class="stonith" id="Fencing" type="fence_xvm">
+ <instance_attributes id="Fencing-instance_attributes">
+ <nvpair id="Fencing-instance_attributes_pcmk_host_list" name="pcmk_host_list" value="node1 node2 node3"/>
+ </instance_attributes>
+ <operations>
+ <op id="Fencing-monitor-120s" interval="120s" name="monitor" timeout="120s"/>
+ </operations>
+ </primitive>
+ <clone id="dummy-clone">
+ <primitive id="dummy" class="ocf" type="Dummy" provider="pacemaker">
+ <operations>
+ <op name="migrate_from" interval="0s" timeout="20s" id="dummy-migrate_from-interval-0s"/>
+ <op name="migrate_to" interval="0s" timeout="20s" id="dummy-migrate_to-interval-0s"/>
+ <op name="monitor" interval="10s" timeout="20s" id="dummy-monitor-interval-10s"/>
+ <op name="reload" interval="0s" timeout="20s" id="dummy-reload-interval-0s"/>
+ <op name="reload-agent" interval="0s" timeout="20s" id="dummy-reload-agent-interval-0s"/>
+ <op name="start" interval="0s" timeout="20s" id="dummy-start-interval-0s"/>
+ <op name="stop" interval="0s" timeout="20s" id="dummy-stop-interval-0s"/>
+ </operations>
+ </primitive>
+ </clone>
+ </resources>
+ <constraints/>
+ <op_defaults/>
+ <alerts/>
+ <rsc_defaults/>
+ </configuration>
+ <status>
+ <node_state id="1" uname="node1" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <lrm id="1">
+ <lrm_resources>
+ <lrm_resource id="Fencing" class="stonith" type="fence_xvm">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="1:1:7:6149c21b-77ec-4d97-8b22-d39d97981ad1" transition-magic="0:7;1:1:7:6149c21b-77ec-4d97-8b22-d39d97981ad1" exit-reason="" on_node="node1" call-id="9" rc-code="7" op-status="0" interval="0" last-rc-change="1687308299" exec-time="7" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ </lrm_resource>
+ <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="dummy_last_0" operation_key="dummy_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="2:1:7:6149c21b-77ec-4d97-8b22-d39d97981ad1" transition-magic="0:7;2:1:7:6149c21b-77ec-4d97-8b22-d39d97981ad1" exit-reason="" on_node="node1" call-id="10" rc-code="7" op-status="0" interval="0" last-rc-change="1687308299" exec-time="20" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params=" passwd " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="1">
+ <instance_attributes id="status-1">
+ <nvpair id="status-1-.feature-set" name="#feature-set" value="3.17.4"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state id="2" uname="node2" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <lrm id="2">
+ <lrm_resources>
+ <lrm_resource id="Fencing" class="stonith" type="fence_xvm">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="7:1:0:6149c21b-77ec-4d97-8b22-d39d97981ad1" transition-magic="0:0;7:1:0:6149c21b-77ec-4d97-8b22-d39d97981ad1" exit-reason="" on_node="node2" call-id="11" rc-code="0" op-status="0" interval="0" last-rc-change="1687308299" exec-time="40" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ <lrm_rsc_op id="Fencing_monitor_120000" operation_key="Fencing_monitor_120000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="8:1:0:6149c21b-77ec-4d97-8b22-d39d97981ad1" transition-magic="0:0;8:1:0:6149c21b-77ec-4d97-8b22-d39d97981ad1" exit-reason="" on_node="node2" call-id="13" rc-code="0" op-status="0" interval="120000" last-rc-change="1687308299" exec-time="40" queue-time="0" op-digest="24989640311980988fb77ddd1cc1002b"/>
+ </lrm_resource>
+ <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="11:1:0:6149c21b-77ec-4d97-8b22-d39d97981ad1" transition-magic="0:0;11:1:0:6149c21b-77ec-4d97-8b22-d39d97981ad1" exit-reason="" on_node="node2" call-id="12" rc-code="0" op-status="0" interval="0" last-rc-change="1687308299" exec-time="28" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params=" passwd " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="dummy_monitor_10000" operation_key="dummy_monitor_10000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="12:1:0:6149c21b-77ec-4d97-8b22-d39d97981ad1" transition-magic="0:0;12:1:0:6149c21b-77ec-4d97-8b22-d39d97981ad1" exit-reason="" on_node="node2" call-id="14" rc-code="0" op-status="0" interval="10000" last-rc-change="1687308299" exec-time="12" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd" op-secure-params=" passwd " op-secure-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="2">
+ <instance_attributes id="status-2">
+ <nvpair id="status-2-.feature-set" name="#feature-set" value="3.17.4"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state id="3" uname="node3" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <lrm id="3">
+ <lrm_resources>
+ <lrm_resource id="Fencing" class="stonith" type="fence_xvm">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="5:1:7:6149c21b-77ec-4d97-8b22-d39d97981ad1" transition-magic="0:7;5:1:7:6149c21b-77ec-4d97-8b22-d39d97981ad1" exit-reason="" on_node="node3" call-id="9" rc-code="7" op-status="0" interval="0" last-rc-change="1687308298" exec-time="1" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ </lrm_resource>
+ <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="9:1:0:6149c21b-77ec-4d97-8b22-d39d97981ad1" transition-magic="0:0;9:1:0:6149c21b-77ec-4d97-8b22-d39d97981ad1" exit-reason="" on_node="node3" call-id="11" rc-code="0" op-status="0" interval="0" last-rc-change="1687308298" exec-time="17" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params=" passwd " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="dummy_monitor_10000" operation_key="dummy_monitor_10000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="10:1:0:6149c21b-77ec-4d97-8b22-d39d97981ad1" transition-magic="0:0;10:1:0:6149c21b-77ec-4d97-8b22-d39d97981ad1" exit-reason="" on_node="node3" call-id="12" rc-code="0" op-status="0" interval="10000" last-rc-change="1687308298" exec-time="14" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd" op-secure-params=" passwd " op-secure-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="3">
+ <instance_attributes id="status-3">
+ <nvpair id="status-3-.feature-set" name="#feature-set" value="3.17.4"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ </status>
+</cib>
diff --git a/cts/scheduler/xml/clone-recover-no-shuffle-10.xml b/cts/scheduler/xml/clone-recover-no-shuffle-10.xml
new file mode 100644
index 0000000..faa202a
--- /dev/null
+++ b/cts/scheduler/xml/clone-recover-no-shuffle-10.xml
@@ -0,0 +1,120 @@
+<cib crm_feature_set="3.17.4" validate-with="pacemaker-3.9" epoch="89" num_updates="50" admin_epoch="0" cib-last-written="Tue Jun 20 18:29:07 2023" update-origin="node1" update-client="cibadmin" update-user="root" have-quorum="1" dc-uuid="3">
+ <configuration>
+ <!-- The essential elements of this test are:
+ * An anonymous promotable clone resource (dummy) is stopped on node1,
+ promoted on node2, and unpromoted on node3
+ * Clone instances are primitives
+ * There are no location constraints or stickiness configured
+ * dummy has the highest promotion score on node2
+
+ The following should happen:
+ * Instance dummy:0 should remain started (unpromoted) on node3
+ * Instance dummy:1 should remain promoted on node2
+ * Instance dummy:2 should start (unpromoted) on node1
+ -->
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.1.5-1.0a457786a.git.el9-0a457786a"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="test"/>
+ <nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1687288330"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="node1"/>
+ <node id="2" uname="node2"/>
+ <node id="3" uname="node3"/>
+ </nodes>
+ <resources>
+ <primitive class="stonith" id="Fencing" type="fence_xvm">
+ <instance_attributes id="Fencing-instance_attributes">
+ <nvpair id="Fencing-instance_attributes_pcmk_host_list" name="pcmk_host_list" value="node1 node2 node3"/>
+ </instance_attributes>
+ <operations>
+ <op id="Fencing-monitor-120s" interval="120s" name="monitor" timeout="120s"/>
+ </operations>
+ </primitive>
+ <clone id="dummy-clone">
+ <primitive id="dummy" class="ocf" type="Stateful" provider="pacemaker">
+ <operations>
+ <op name="demote" interval="0s" timeout="10s" id="dummy-demote-interval-0s"/>
+ <op name="monitor" interval="10s" timeout="20s" role="Promoted" id="dummy-monitor-interval-10s"/>
+ <op name="monitor" interval="11s" timeout="20s" role="Unpromoted" id="dummy-monitor-interval-11s"/>
+ <op name="notify" interval="0s" timeout="5s" id="dummy-notify-interval-0s"/>
+ <op name="promote" interval="0s" timeout="10s" id="dummy-promote-interval-0s"/>
+ <op name="reload-agent" interval="0s" timeout="10s" id="dummy-reload-agent-interval-0s"/>
+ <op name="start" interval="0s" timeout="20s" id="dummy-start-interval-0s"/>
+ <op name="stop" interval="0s" timeout="20s" id="dummy-stop-interval-0s"/>
+ </operations>
+ </primitive>
+ <meta_attributes id="dummy-clone-meta_attributes">
+ <nvpair id="dummy-clone-meta_attributes-promotable" name="promotable" value="true"/>
+ </meta_attributes>
+ </clone>
+ </resources>
+ <constraints/>
+ <op_defaults/>
+ <alerts/>
+ <rsc_defaults/>
+ </configuration>
+ <status>
+ <node_state id="3" uname="node3" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <lrm id="3">
+ <lrm_resources>
+ <lrm_resource id="Fencing" class="stonith" type="fence_xvm">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="5:0:7:ecbbb42b-f6a4-47dc-90de-b2ccf960124e" transition-magic="0:7;5:0:7:ecbbb42b-f6a4-47dc-90de-b2ccf960124e" exit-reason="" on_node="node3" call-id="9" rc-code="7" op-status="0" interval="0" last-rc-change="1687310966" exec-time="3" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ </lrm_resource>
+ <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="dummy_last_0" operation_key="dummy_demote_0" operation="demote" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="7:6:0:ecbbb42b-f6a4-47dc-90de-b2ccf960124e" transition-magic="0:0;7:6:0:ecbbb42b-f6a4-47dc-90de-b2ccf960124e" exit-reason="" on_node="node3" call-id="15" rc-code="0" op-status="0" interval="0" last-rc-change="1687311113" exec-time="39" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="dummy_monitor_11000" operation_key="dummy_monitor_11000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="9:6:0:ecbbb42b-f6a4-47dc-90de-b2ccf960124e" transition-magic="0:0;9:6:0:ecbbb42b-f6a4-47dc-90de-b2ccf960124e" exit-reason="" on_node="node3" call-id="16" rc-code="0" op-status="0" interval="11000" last-rc-change="1687311113" exec-time="14" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="3">
+ <instance_attributes id="status-3">
+ <nvpair id="status-3-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-3-master-dummy" name="master-dummy" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state id="1" uname="node1" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <lrm id="1">
+ <lrm_resources>
+ <lrm_resource id="Fencing" class="stonith" type="fence_xvm">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="1:0:7:ecbbb42b-f6a4-47dc-90de-b2ccf960124e" transition-magic="0:7;1:0:7:ecbbb42b-f6a4-47dc-90de-b2ccf960124e" exit-reason="" on_node="node1" call-id="9" rc-code="7" op-status="0" interval="0" last-rc-change="1687310966" exec-time="3" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ </lrm_resource>
+ <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="dummy_last_0" operation_key="dummy_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="2:0:7:ecbbb42b-f6a4-47dc-90de-b2ccf960124e" transition-magic="0:7;2:0:7:ecbbb42b-f6a4-47dc-90de-b2ccf960124e" exit-reason="" on_node="node1" call-id="10" rc-code="7" op-status="0" interval="0" last-rc-change="1687310967" exec-time="19" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="1">
+ <instance_attributes id="status-1">
+ <nvpair id="status-1-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-1-master-dummy" name="master-dummy" value="10"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state id="2" uname="node2" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <lrm id="2">
+ <lrm_resources>
+ <lrm_resource id="Fencing" class="stonith" type="fence_xvm">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="7:0:0:ecbbb42b-f6a4-47dc-90de-b2ccf960124e" transition-magic="0:0;7:0:0:ecbbb42b-f6a4-47dc-90de-b2ccf960124e" exit-reason="" on_node="node2" call-id="10" rc-code="0" op-status="0" interval="0" last-rc-change="1687310967" exec-time="47" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ <lrm_rsc_op id="Fencing_monitor_120000" operation_key="Fencing_monitor_120000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="8:0:0:ecbbb42b-f6a4-47dc-90de-b2ccf960124e" transition-magic="0:0;8:0:0:ecbbb42b-f6a4-47dc-90de-b2ccf960124e" exit-reason="" on_node="node2" call-id="13" rc-code="0" op-status="0" interval="120000" last-rc-change="1687310967" exec-time="35" queue-time="0" op-digest="24989640311980988fb77ddd1cc1002b"/>
+ </lrm_resource>
+ <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="dummy_last_0" operation_key="dummy_promote_0" operation="promote" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="12:6:0:ecbbb42b-f6a4-47dc-90de-b2ccf960124e" transition-magic="0:0;12:6:0:ecbbb42b-f6a4-47dc-90de-b2ccf960124e" exit-reason="" on_node="node2" call-id="16" rc-code="0" op-status="0" interval="0" last-rc-change="1687311114" exec-time="23" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="dummy_monitor_10000" operation_key="dummy_monitor_10000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="13:6:8:ecbbb42b-f6a4-47dc-90de-b2ccf960124e" transition-magic="0:8;13:6:8:ecbbb42b-f6a4-47dc-90de-b2ccf960124e" exit-reason="" on_node="node2" call-id="17" rc-code="8" op-status="0" interval="10000" last-rc-change="1687311114" exec-time="10" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="2">
+ <instance_attributes id="status-2">
+ <nvpair id="status-2-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-2-master-dummy" name="master-dummy" value="15"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ </status>
+</cib>
diff --git a/cts/scheduler/xml/clone-recover-no-shuffle-11.xml b/cts/scheduler/xml/clone-recover-no-shuffle-11.xml
new file mode 100644
index 0000000..43d6d74
--- /dev/null
+++ b/cts/scheduler/xml/clone-recover-no-shuffle-11.xml
@@ -0,0 +1,153 @@
+<cib crm_feature_set="3.17.4" validate-with="pacemaker-3.9" epoch="100" num_updates="0" admin_epoch="0" cib-last-written="Tue Jun 20 18:52:50 2023" update-origin="node1" update-client="cibadmin" update-user="root" have-quorum="1" dc-uuid="1">
+ <configuration>
+ <!-- The essential elements of this test are:
+ * An anonymous promotable clone resource (grp) is stopped on node1,
+ promoted on node2, and unpromoted on node3
+ * Clone instances are resource groups consisting of primitives rsc1 and
+ rsc2
+ * There are no location constraints or stickiness configured
+ * grp has the highest promotion score on node2 (scores for rsc1 + rsc2)
+
+ The following should happen:
+ * Instance grp:0 should remain started (unpromoted) on node3
+ * Instance grp:1 should remain promoted on node2
+ * Instance grp:2 should start (unpromoted) on node1
+ -->
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.1.5-1.0a457786a.git.el9-0a457786a"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="test"/>
+ <nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1687288330"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="node1"/>
+ <node id="2" uname="node2"/>
+ <node id="3" uname="node3"/>
+ </nodes>
+ <resources>
+ <primitive class="stonith" id="Fencing" type="fence_xvm">
+ <instance_attributes id="Fencing-instance_attributes">
+ <nvpair id="Fencing-instance_attributes_pcmk_host_list" name="pcmk_host_list" value="node1 node2 node3"/>
+ </instance_attributes>
+ <operations>
+ <op id="Fencing-monitor-120s" interval="120s" name="monitor" timeout="120s"/>
+ </operations>
+ </primitive>
+ <clone id="grp-clone">
+ <group id="grp">
+ <primitive id="rsc1" class="ocf" type="Stateful" provider="pacemaker">
+ <operations>
+ <op name="demote" interval="0s" timeout="10s" id="rsc1-demote-interval-0s"/>
+ <op name="monitor" interval="10s" timeout="20s" role="Promoted" id="rsc1-monitor-interval-10s"/>
+ <op name="monitor" interval="11s" timeout="20s" role="Unpromoted" id="rsc1-monitor-interval-11s"/>
+ <op name="notify" interval="0s" timeout="5s" id="rsc1-notify-interval-0s"/>
+ <op name="promote" interval="0s" timeout="10s" id="rsc1-promote-interval-0s"/>
+ <op name="reload-agent" interval="0s" timeout="10s" id="rsc1-reload-agent-interval-0s"/>
+ <op name="start" interval="0s" timeout="20s" id="rsc1-start-interval-0s"/>
+ <op name="stop" interval="0s" timeout="20s" id="rsc1-stop-interval-0s"/>
+ </operations>
+ </primitive>
+ <primitive id="rsc2" class="ocf" type="Stateful" provider="pacemaker">
+ <operations>
+ <op name="demote" interval="0s" timeout="10s" id="rsc2-demote-interval-0s"/>
+ <op name="monitor" interval="10s" timeout="20s" role="Promoted" id="rsc2-monitor-interval-10s"/>
+ <op name="monitor" interval="11s" timeout="20s" role="Unpromoted" id="rsc2-monitor-interval-11s"/>
+ <op name="notify" interval="0s" timeout="5s" id="rsc2-notify-interval-0s"/>
+ <op name="promote" interval="0s" timeout="10s" id="rsc2-promote-interval-0s"/>
+ <op name="reload-agent" interval="0s" timeout="10s" id="rsc2-reload-agent-interval-0s"/>
+ <op name="start" interval="0s" timeout="20s" id="rsc2-start-interval-0s"/>
+ <op name="stop" interval="0s" timeout="20s" id="rsc2-stop-interval-0s"/>
+ </operations>
+ </primitive>
+ </group>
+ <meta_attributes id="grp-clone-meta_attributes">
+ <nvpair id="grp-clone-meta_attributes-promotable" name="promotable" value="true"/>
+ </meta_attributes>
+ </clone>
+ </resources>
+ <constraints/>
+ <op_defaults/>
+ <alerts/>
+ <rsc_defaults/>
+ </configuration>
+ <status>
+ <node_state id="3" uname="node3" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <transient_attributes id="3">
+ <instance_attributes id="status-3">
+ <nvpair id="status-3-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-3-master-rsc1" name="master-rsc1" value="5"/>
+ <nvpair id="status-3-master-rsc2" name="master-rsc2" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="3">
+ <lrm_resources>
+ <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="7:0:7:0773bc62-9ff2-42db-818f-c4f3a9e3993e" transition-magic="0:7;7:0:7:0773bc62-9ff2-42db-818f-c4f3a9e3993e" exit-reason="" on_node="node3" call-id="9" rc-code="7" op-status="0" interval="0" last-rc-change="1687312090" exec-time="3" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ </lrm_resource>
+ <lrm_resource id="rsc1" type="Stateful" class="ocf" provider="pacemaker">
+ <lrm_rsc_op id="rsc1_last_0" operation_key="rsc1_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="22:28:0:0773bc62-9ff2-42db-818f-c4f3a9e3993e" transition-magic="0:0;22:28:0:0773bc62-9ff2-42db-818f-c4f3a9e3993e" exit-reason="" on_node="node3" call-id="40" rc-code="0" op-status="0" interval="0" last-rc-change="1687312369" exec-time="21" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="rsc1_monitor_10000" operation_key="rsc1_monitor_10000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="10:16:8:0773bc62-9ff2-42db-818f-c4f3a9e3993e" transition-magic="0:8;10:16:8:0773bc62-9ff2-42db-818f-c4f3a9e3993e" exit-reason="" on_node="node3" call-id="31" rc-code="8" op-status="0" interval="10000" last-rc-change="1687312235" exec-time="16" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ <lrm_rsc_op id="rsc1_monitor_11000" operation_key="rsc1_monitor_11000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="8:29:0:0773bc62-9ff2-42db-818f-c4f3a9e3993e" transition-magic="0:0;8:29:0:0773bc62-9ff2-42db-818f-c4f3a9e3993e" exit-reason="" on_node="node3" call-id="41" rc-code="0" op-status="0" interval="11000" last-rc-change="1687312369" exec-time="14" queue-time="1" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ <lrm_resource id="rsc2" type="Stateful" class="ocf" provider="pacemaker">
+ <lrm_rsc_op id="rsc2_last_0" operation_key="rsc2_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="9:29:0:0773bc62-9ff2-42db-818f-c4f3a9e3993e" transition-magic="0:0;9:29:0:0773bc62-9ff2-42db-818f-c4f3a9e3993e" exit-reason="" on_node="node3" call-id="42" rc-code="0" op-status="0" interval="0" last-rc-change="1687312370" exec-time="21" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="rsc2_monitor_10000" operation_key="rsc2_monitor_10000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="15:17:8:0773bc62-9ff2-42db-818f-c4f3a9e3993e" transition-magic="0:8;15:17:8:0773bc62-9ff2-42db-818f-c4f3a9e3993e" exit-reason="" on_node="node3" call-id="33" rc-code="8" op-status="0" interval="10000" last-rc-change="1687312235" exec-time="10" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ <lrm_rsc_op id="rsc2_monitor_11000" operation_key="rsc2_monitor_11000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="11:30:0:0773bc62-9ff2-42db-818f-c4f3a9e3993e" transition-magic="0:0;11:30:0:0773bc62-9ff2-42db-818f-c4f3a9e3993e" exit-reason="" on_node="node3" call-id="43" rc-code="0" op-status="0" interval="11000" last-rc-change="1687312370" exec-time="13" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state id="1" uname="node1" in_ccm="true" crmd="online" crm-debug-origin="do_state_transition" join="member" expected="member">
+ <transient_attributes id="1">
+ <instance_attributes id="status-1">
+ <nvpair id="status-1-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-1-master-rsc1" name="master-rsc1" value="10"/>
+ <nvpair id="status-1-master-rsc2" name="master-rsc2" value="10"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="1">
+ <lrm_resources>
+ <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="1:0:7:0773bc62-9ff2-42db-818f-c4f3a9e3993e" transition-magic="0:7;1:0:7:0773bc62-9ff2-42db-818f-c4f3a9e3993e" exit-reason="" on_node="node1" call-id="9" rc-code="7" op-status="0" interval="0" last-rc-change="1687312091" exec-time="2" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ </lrm_resource>
+ <lrm_resource id="rsc1" type="Stateful" class="ocf" provider="pacemaker">
+ <lrm_rsc_op id="rsc1_last_0" operation_key="rsc1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="2:0:7:0773bc62-9ff2-42db-818f-c4f3a9e3993e" transition-magic="0:7;2:0:7:0773bc62-9ff2-42db-818f-c4f3a9e3993e" exit-reason="" on_node="node1" call-id="14" rc-code="7" op-status="0" interval="0" last-rc-change="1687312091" exec-time="15" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ <lrm_resource id="rsc2" type="Stateful" class="ocf" provider="pacemaker">
+ <lrm_rsc_op id="rsc2_last_0" operation_key="rsc2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="3:0:7:0773bc62-9ff2-42db-818f-c4f3a9e3993e" transition-magic="0:7;3:0:7:0773bc62-9ff2-42db-818f-c4f3a9e3993e" exit-reason="" on_node="node1" call-id="15" rc-code="7" op-status="0" interval="0" last-rc-change="1687312091" exec-time="18" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state id="2" uname="node2" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <transient_attributes id="2">
+ <instance_attributes id="status-2">
+ <nvpair id="status-2-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-2-master-rsc1" name="master-rsc1" value="15"/>
+ <nvpair id="status-2-master-rsc2" name="master-rsc2" value="15"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="2">
+ <lrm_resources>
+ <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="1:23:0:0773bc62-9ff2-42db-818f-c4f3a9e3993e" transition-magic="0:0;1:23:0:0773bc62-9ff2-42db-818f-c4f3a9e3993e" exit-reason="" on_node="node2" call-id="28" rc-code="0" op-status="0" interval="0" last-rc-change="1687312369" exec-time="38" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ <lrm_rsc_op id="Fencing_monitor_120000" operation_key="Fencing_monitor_120000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="2:23:0:0773bc62-9ff2-42db-818f-c4f3a9e3993e" transition-magic="0:0;2:23:0:0773bc62-9ff2-42db-818f-c4f3a9e3993e" exit-reason="" on_node="node2" call-id="30" rc-code="0" op-status="0" interval="120000" last-rc-change="1687312369" exec-time="30" queue-time="0" op-digest="24989640311980988fb77ddd1cc1002b"/>
+ </lrm_resource>
+ <lrm_resource id="rsc1" type="Stateful" class="ocf" provider="pacemaker">
+ <lrm_rsc_op id="rsc1_last_0" operation_key="rsc1_promote_0" operation="promote" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="6:25:0:0773bc62-9ff2-42db-818f-c4f3a9e3993e" transition-magic="0:0;6:25:0:0773bc62-9ff2-42db-818f-c4f3a9e3993e" exit-reason="" on_node="node2" call-id="32" rc-code="0" op-status="0" interval="0" last-rc-change="1687312369" exec-time="22" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="rsc1_monitor_11000" operation_key="rsc1_monitor_11000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="21:1:0:0773bc62-9ff2-42db-818f-c4f3a9e3993e" transition-magic="0:0;21:1:0:0773bc62-9ff2-42db-818f-c4f3a9e3993e" exit-reason="" on_node="node2" call-id="19" rc-code="0" op-status="0" interval="11000" last-rc-change="1687312091" exec-time="16" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ <lrm_rsc_op id="rsc1_monitor_10000" operation_key="rsc1_monitor_10000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="8:26:8:0773bc62-9ff2-42db-818f-c4f3a9e3993e" transition-magic="0:8;8:26:8:0773bc62-9ff2-42db-818f-c4f3a9e3993e" exit-reason="" on_node="node2" call-id="33" rc-code="8" op-status="0" interval="10000" last-rc-change="1687312369" exec-time="13" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ <lrm_resource id="rsc2" type="Stateful" class="ocf" provider="pacemaker">
+ <lrm_rsc_op id="rsc2_last_0" operation_key="rsc2_promote_0" operation="promote" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="11:26:0:0773bc62-9ff2-42db-818f-c4f3a9e3993e" transition-magic="0:0;11:26:0:0773bc62-9ff2-42db-818f-c4f3a9e3993e" exit-reason="" on_node="node2" call-id="34" rc-code="0" op-status="0" interval="0" last-rc-change="1687312369" exec-time="25" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="rsc2_monitor_11000" operation_key="rsc2_monitor_11000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="25:2:0:0773bc62-9ff2-42db-818f-c4f3a9e3993e" transition-magic="0:0;25:2:0:0773bc62-9ff2-42db-818f-c4f3a9e3993e" exit-reason="" on_node="node2" call-id="21" rc-code="0" op-status="0" interval="11000" last-rc-change="1687312091" exec-time="16" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ <lrm_rsc_op id="rsc2_monitor_10000" operation_key="rsc2_monitor_10000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="13:27:8:0773bc62-9ff2-42db-818f-c4f3a9e3993e" transition-magic="0:8;13:27:8:0773bc62-9ff2-42db-818f-c4f3a9e3993e" exit-reason="" on_node="node2" call-id="35" rc-code="8" op-status="0" interval="10000" last-rc-change="1687312369" exec-time="12" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ </status>
+</cib>
diff --git a/cts/scheduler/xml/clone-recover-no-shuffle-12.xml b/cts/scheduler/xml/clone-recover-no-shuffle-12.xml
new file mode 100644
index 0000000..e302690
--- /dev/null
+++ b/cts/scheduler/xml/clone-recover-no-shuffle-12.xml
@@ -0,0 +1,186 @@
+<cib crm_feature_set="3.17.4" validate-with="pacemaker-3.9" epoch="128" num_updates="90" admin_epoch="0" cib-last-written="Tue Jun 20 23:33:00 2023" update-origin="node1" update-client="cibadmin" update-user="root" have-quorum="1" dc-uuid="2">
+ <configuration>
+ <!-- The essential elements of this test are:
+ * A promotable bundle resource (base-bundle) is stopped on node1,
+ promoted on node2, and unpromoted on node3
+ * Clone instances are primitives (base), the instances of base-bundle's
+ child resource
+ * There are no location constraints or stickiness configured
+ * base has the highest promotion score on node2
+
+ The following should happen:
+ * Instance base:0 should remain started (unpromoted) on node3
+ * Instance base:1 should remain promoted on node2
+ * Instance base:2 should start (unpromoted) on node1
+ -->
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.1.5-1.0a457786a.git.el9-0a457786a"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="test"/>
+ <nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1687288330"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="node1"/>
+ <node id="2" uname="node2"/>
+ <node id="3" uname="node3"/>
+ </nodes>
+ <resources>
+ <primitive class="stonith" id="Fencing" type="fence_xvm">
+ <instance_attributes id="Fencing-instance_attributes">
+ <nvpair id="Fencing-instance_attributes_pcmk_host_list" name="pcmk_host_list" value="node1 node2 node3"/>
+ </instance_attributes>
+ <operations>
+ <op id="Fencing-monitor-120s" interval="120s" name="monitor" timeout="120s"/>
+ </operations>
+ </primitive>
+ <bundle id="base-bundle">
+ <meta_attributes id="base-bundle-meta_attributes">
+ <nvpair id="base-bundle-meta_attributes-promotable" name="promotable" value="true"/>
+ <nvpair id="base-bundle-meta_attributes-container-attribute-target" name="container-attribute-target" value="host"/>
+ </meta_attributes>
+ <podman image="localhost/pcmktest" replicas="3"/>
+ <network control-port="3121"/>
+ <primitive id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <operations>
+ <op id="base-monitor-15s" interval="15s" name="monitor" timeout="15s" role="Promoted"/>
+ <op id="base-monitor-16s" interval="16s" name="monitor" timeout="16s" role="Unpromoted"/>
+ </operations>
+ </primitive>
+ </bundle>
+ </resources>
+ <constraints/>
+ <op_defaults/>
+ <alerts/>
+ <rsc_defaults/>
+ </configuration>
+ <status>
+ <node_state id="1" uname="node1" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <transient_attributes id="1">
+ <instance_attributes id="status-1">
+ <nvpair id="status-1-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-1-master-base" name="master-base" value="10"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="1">
+ <lrm_resources>
+ <lrm_resource id="Fencing" class="stonith" type="fence_xvm">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="1:0:7:159e0708-f49f-4071-a9f4-93c48dc468d2" transition-magic="0:7;1:0:7:159e0708-f49f-4071-a9f4-93c48dc468d2" exit-reason="" on_node="node1" call-id="8" rc-code="7" op-status="0" interval="0" last-rc-change="1687329223" exec-time="3" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-0" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-0_last_0" operation_key="base-bundle-podman-0_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="2:0:7:159e0708-f49f-4071-a9f4-93c48dc468d2" transition-magic="0:7;2:0:7:159e0708-f49f-4071-a9f4-93c48dc468d2" exit-reason="" on_node="node1" call-id="12" rc-code="7" op-status="0" interval="0" last-rc-change="1687329223" exec-time="109" queue-time="0" op-digest="b93578750303776f6bb098395c53338a"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-1" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-1_last_0" operation_key="base-bundle-podman-1_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="4:0:7:159e0708-f49f-4071-a9f4-93c48dc468d2" transition-magic="0:7;4:0:7:159e0708-f49f-4071-a9f4-93c48dc468d2" exit-reason="" on_node="node1" call-id="13" rc-code="7" op-status="0" interval="0" last-rc-change="1687329223" exec-time="99" queue-time="0" op-digest="91ec5f49c5fd88f29405389545fb7d59"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-2" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-2_last_0" operation_key="base-bundle-podman-2_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="6:0:7:159e0708-f49f-4071-a9f4-93c48dc468d2" transition-magic="0:7;6:0:7:159e0708-f49f-4071-a9f4-93c48dc468d2" exit-reason="" on_node="node1" call-id="17" rc-code="7" op-status="0" interval="0" last-rc-change="1687329223" exec-time="47" queue-time="0" op-digest="eb1c148b6d2d7a7013f75d596536409b"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-1" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-1">
+ <lrm_rsc_op id="base-bundle-1_last_0" operation_key="base-bundle-1_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="5:0:7:159e0708-f49f-4071-a9f4-93c48dc468d2" transition-magic="0:7;5:0:7:159e0708-f49f-4071-a9f4-93c48dc468d2" exit-reason="" on_node="node1" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1687329224" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-0" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-0">
+ <lrm_rsc_op id="base-bundle-0_last_0" operation_key="base-bundle-0_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="3:0:7:159e0708-f49f-4071-a9f4-93c48dc468d2" transition-magic="0:7;3:0:7:159e0708-f49f-4071-a9f4-93c48dc468d2" exit-reason="" on_node="node1" call-id="2" rc-code="7" op-status="0" interval="0" last-rc-change="1687329224" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state id="3" uname="node3" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <lrm id="3">
+ <lrm_resources>
+ <lrm_resource id="Fencing" class="stonith" type="fence_xvm">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="15:0:7:159e0708-f49f-4071-a9f4-93c48dc468d2" transition-magic="0:7;15:0:7:159e0708-f49f-4071-a9f4-93c48dc468d2" exit-reason="" on_node="node3" call-id="8" rc-code="7" op-status="0" interval="0" last-rc-change="1687329222" exec-time="1" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-0" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-0_last_0" operation_key="base-bundle-podman-0_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="24:0:0:159e0708-f49f-4071-a9f4-93c48dc468d2" transition-magic="0:0;24:0:0:159e0708-f49f-4071-a9f4-93c48dc468d2" exit-reason="" on_node="node3" call-id="18" rc-code="0" op-status="0" interval="0" last-rc-change="1687329222" exec-time="863" queue-time="0" op-digest="b93578750303776f6bb098395c53338a"/>
+ <lrm_rsc_op id="base-bundle-podman-0_monitor_60000" operation_key="base-bundle-podman-0_monitor_60000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="25:0:0:159e0708-f49f-4071-a9f4-93c48dc468d2" transition-magic="0:0;25:0:0:159e0708-f49f-4071-a9f4-93c48dc468d2" exit-reason="" on_node="node3" call-id="20" rc-code="0" op-status="0" interval="60000" last-rc-change="1687329223" exec-time="164" queue-time="0" op-digest="902512fcf3e4556d9585c44184665d8c"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-1" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-1_last_0" operation_key="base-bundle-podman-1_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="18:0:7:159e0708-f49f-4071-a9f4-93c48dc468d2" transition-magic="0:7;18:0:7:159e0708-f49f-4071-a9f4-93c48dc468d2" exit-reason="" on_node="node3" call-id="13" rc-code="7" op-status="0" interval="0" last-rc-change="1687329222" exec-time="108" queue-time="0" op-digest="91ec5f49c5fd88f29405389545fb7d59"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-2" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-2_last_0" operation_key="base-bundle-podman-2_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="20:0:7:159e0708-f49f-4071-a9f4-93c48dc468d2" transition-magic="0:7;20:0:7:159e0708-f49f-4071-a9f4-93c48dc468d2" exit-reason="" on_node="node3" call-id="17" rc-code="7" op-status="0" interval="0" last-rc-change="1687329222" exec-time="59" queue-time="0" op-digest="eb1c148b6d2d7a7013f75d596536409b"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-1" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-1">
+ <lrm_rsc_op id="base-bundle-1_last_0" operation_key="base-bundle-1_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="19:0:7:159e0708-f49f-4071-a9f4-93c48dc468d2" transition-magic="0:7;19:0:7:159e0708-f49f-4071-a9f4-93c48dc468d2" exit-reason="" on_node="node3" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1687329223" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-0" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-0">
+ <lrm_rsc_op id="base-bundle-0_last_0" operation_key="base-bundle-0_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="26:0:0:159e0708-f49f-4071-a9f4-93c48dc468d2" transition-magic="0:0;26:0:0:159e0708-f49f-4071-a9f4-93c48dc468d2" exit-reason="" on_node="node3" call-id="3" rc-code="0" op-status="0" interval="0" last-rc-change="1687329223" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ <lrm_rsc_op id="base-bundle-0_monitor_30000" operation_key="base-bundle-0_monitor_30000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="15:1:0:159e0708-f49f-4071-a9f4-93c48dc468d2" transition-magic="0:0;15:1:0:159e0708-f49f-4071-a9f4-93c48dc468d2" exit-reason="" on_node="node3" call-id="4" rc-code="0" op-status="0" interval="30000" last-rc-change="1687329226" exec-time="0" queue-time="0" op-digest="354b9acaa7ea1113d708dc11a1d6bbfa"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="3">
+ <instance_attributes id="status-3">
+ <nvpair id="status-3-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-3-master-base" name="master-base" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state id="2" uname="node2" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <transient_attributes id="2">
+ <instance_attributes id="status-2">
+ <nvpair id="status-2-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-2-master-base" name="master-base" value="15"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="2">
+ <lrm_resources>
+ <lrm_resource id="Fencing" class="stonith" type="fence_xvm">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="22:0:0:159e0708-f49f-4071-a9f4-93c48dc468d2" transition-magic="0:0;22:0:0:159e0708-f49f-4071-a9f4-93c48dc468d2" exit-reason="" on_node="node2" call-id="14" rc-code="0" op-status="0" interval="0" last-rc-change="1687329223" exec-time="34" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ <lrm_rsc_op id="Fencing_monitor_120000" operation_key="Fencing_monitor_120000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="23:0:0:159e0708-f49f-4071-a9f4-93c48dc468d2" transition-magic="0:0;23:0:0:159e0708-f49f-4071-a9f4-93c48dc468d2" exit-reason="" on_node="node2" call-id="19" rc-code="0" op-status="0" interval="120000" last-rc-change="1687329223" exec-time="36" queue-time="0" op-digest="24989640311980988fb77ddd1cc1002b"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-0" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-0_last_0" operation_key="base-bundle-podman-0_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="9:0:7:159e0708-f49f-4071-a9f4-93c48dc468d2" transition-magic="0:7;9:0:7:159e0708-f49f-4071-a9f4-93c48dc468d2" exit-reason="" on_node="node2" call-id="12" rc-code="7" op-status="0" interval="0" last-rc-change="1687329223" exec-time="60" queue-time="0" op-digest="b93578750303776f6bb098395c53338a"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-1" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-1_last_0" operation_key="base-bundle-podman-1_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="28:0:0:159e0708-f49f-4071-a9f4-93c48dc468d2" transition-magic="0:0;28:0:0:159e0708-f49f-4071-a9f4-93c48dc468d2" exit-reason="" on_node="node2" call-id="20" rc-code="0" op-status="0" interval="0" last-rc-change="1687329223" exec-time="791" queue-time="0" op-digest="91ec5f49c5fd88f29405389545fb7d59"/>
+ <lrm_rsc_op id="base-bundle-podman-1_monitor_60000" operation_key="base-bundle-podman-1_monitor_60000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="29:0:0:159e0708-f49f-4071-a9f4-93c48dc468d2" transition-magic="0:0;29:0:0:159e0708-f49f-4071-a9f4-93c48dc468d2" exit-reason="" on_node="node2" call-id="21" rc-code="0" op-status="0" interval="60000" last-rc-change="1687329224" exec-time="144" queue-time="0" op-digest="f0ef4729d120aa3f5d938cabca4d06c7"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-2" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-2_last_0" operation_key="base-bundle-podman-2_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="13:0:7:159e0708-f49f-4071-a9f4-93c48dc468d2" transition-magic="0:7;13:0:7:159e0708-f49f-4071-a9f4-93c48dc468d2" exit-reason="" on_node="node2" call-id="18" rc-code="7" op-status="0" interval="0" last-rc-change="1687329223" exec-time="68" queue-time="0" op-digest="eb1c148b6d2d7a7013f75d596536409b"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-1" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-1">
+ <lrm_rsc_op id="base-bundle-1_last_0" operation_key="base-bundle-1_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="30:0:0:159e0708-f49f-4071-a9f4-93c48dc468d2" transition-magic="0:0;30:0:0:159e0708-f49f-4071-a9f4-93c48dc468d2" exit-reason="" on_node="node2" call-id="2" rc-code="0" op-status="0" interval="0" last-rc-change="1687329224" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ <lrm_rsc_op id="base-bundle-1_monitor_30000" operation_key="base-bundle-1_monitor_30000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="20:1:0:159e0708-f49f-4071-a9f4-93c48dc468d2" transition-magic="0:0;20:1:0:159e0708-f49f-4071-a9f4-93c48dc468d2" exit-reason="" on_node="node2" call-id="4" rc-code="0" op-status="0" interval="30000" last-rc-change="1687329227" exec-time="0" queue-time="0" op-digest="3929eec440004bca31f813a8e6097506"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-0" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-0">
+ <lrm_rsc_op id="base-bundle-0_last_0" operation_key="base-bundle-0_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="10:0:7:159e0708-f49f-4071-a9f4-93c48dc468d2" transition-magic="0:7;10:0:7:159e0708-f49f-4071-a9f4-93c48dc468d2" exit-reason="" on_node="node2" call-id="3" rc-code="7" op-status="0" interval="0" last-rc-change="1687329224" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state remote_node="true" id="base-bundle-1" uname="base-bundle-1" in_ccm="true" crm-debug-origin="controld_update_resource_history" node_fenced="0">
+ <lrm id="base-bundle-1">
+ <lrm_resources>
+ <lrm_resource id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="base_last_0" operation_key="base_promote_0" operation="promote" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="29:4:0:159e0708-f49f-4071-a9f4-93c48dc468d2" transition-magic="0:0;29:4:0:159e0708-f49f-4071-a9f4-93c48dc468d2" exit-reason="" on_node="node2" call-id="20" rc-code="0" op-status="0" interval="0" last-rc-change="1687329377" exec-time="307" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="base_monitor_15000" operation_key="base_monitor_15000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="30:5:8:159e0708-f49f-4071-a9f4-93c48dc468d2" transition-magic="0:8;30:5:8:159e0708-f49f-4071-a9f4-93c48dc468d2" exit-reason="" on_node="node2" call-id="27" rc-code="8" op-status="0" interval="15000" last-rc-change="1687329377" exec-time="7" queue-time="0" op-digest="3ef575c5f050ae086f0f31bc8f085fdc"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="base-bundle-1">
+ <instance_attributes id="status-base-bundle-1">
+ <nvpair id="status-base-bundle-1-master-base" name="master-base" value="10"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state remote_node="true" id="base-bundle-0" uname="base-bundle-0" in_ccm="true" crm-debug-origin="controld_update_resource_history" node_fenced="0">
+ <lrm id="base-bundle-0">
+ <lrm_resources>
+ <lrm_resource id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="base_last_0" operation_key="base_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="27:1:0:159e0708-f49f-4071-a9f4-93c48dc468d2" transition-magic="0:0;27:1:0:159e0708-f49f-4071-a9f4-93c48dc468d2" exit-reason="" on_node="node3" call-id="8" rc-code="0" op-status="0" interval="0" last-rc-change="1687329226" exec-time="269" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="base_monitor_16000" operation_key="base_monitor_16000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="27:2:0:159e0708-f49f-4071-a9f4-93c48dc468d2" transition-magic="0:0;27:2:0:159e0708-f49f-4071-a9f4-93c48dc468d2" exit-reason="" on_node="node3" call-id="14" rc-code="0" op-status="0" interval="16000" last-rc-change="1687329226" exec-time="13" queue-time="0" op-digest="e62ad827732488b88ef8722ff6a484b3"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="base-bundle-0">
+ <instance_attributes id="status-base-bundle-0">
+ <nvpair id="status-base-bundle-0-master-base" name="master-base" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ </status>
+</cib>
diff --git a/cts/scheduler/xml/clone-recover-no-shuffle-2.xml b/cts/scheduler/xml/clone-recover-no-shuffle-2.xml
new file mode 100644
index 0000000..486666c
--- /dev/null
+++ b/cts/scheduler/xml/clone-recover-no-shuffle-2.xml
@@ -0,0 +1,141 @@
+<cib crm_feature_set="3.17.4" validate-with="pacemaker-3.9" epoch="83" num_updates="0" admin_epoch="0" cib-last-written="Tue Jun 20 18:21:31 2023" update-origin="node1" update-client="cibadmin" update-user="root" have-quorum="1" dc-uuid="2">
+ <configuration>
+ <!-- The essential elements of this test are:
+ * An anonymous clone resource (grp) is stopped on node1 and
+ started on node2 and node3
+ * Clone instances are resource groups consisting of primitives rsc1 and
+ rsc2
+ * There is no stickiness configured
+ * There are no location constraints
+
+ The following should happen:
+ * Instance grp:0 should remain started on node2
+ * Instance grp:1 should remain started on node3
+ * Instance grp:2 should start on node1
+ -->
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.1.5-1.0a457786a.git.el9-0a457786a"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="test"/>
+ <nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1687288330"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="node1"/>
+ <node id="2" uname="node2"/>
+ <node id="3" uname="node3"/>
+ </nodes>
+ <resources>
+ <primitive class="stonith" id="Fencing" type="fence_xvm">
+ <instance_attributes id="Fencing-instance_attributes">
+ <nvpair id="Fencing-instance_attributes_pcmk_host_list" name="pcmk_host_list" value="node1 node2 node3"/>
+ </instance_attributes>
+ <operations>
+ <op id="Fencing-monitor-120s" interval="120s" name="monitor" timeout="120s"/>
+ </operations>
+ </primitive>
+ <clone id="grp-clone">
+ <group id="grp">
+ <primitive id="rsc1" class="ocf" type="Dummy" provider="pacemaker">
+ <operations>
+ <op name="migrate_from" interval="0s" timeout="20s" id="rsc1-migrate_from-interval-0s"/>
+ <op name="migrate_to" interval="0s" timeout="20s" id="rsc1-migrate_to-interval-0s"/>
+ <op name="monitor" interval="10s" timeout="20s" id="rsc1-monitor-interval-10s"/>
+ <op name="reload" interval="0s" timeout="20s" id="rsc1-reload-interval-0s"/>
+ <op name="reload-agent" interval="0s" timeout="20s" id="rsc1-reload-agent-interval-0s"/>
+ <op name="start" interval="0s" timeout="20s" id="rsc1-start-interval-0s"/>
+ <op name="stop" interval="0s" timeout="20s" id="rsc1-stop-interval-0s"/>
+ </operations>
+ </primitive>
+ <primitive id="rsc2" class="ocf" type="Dummy" provider="pacemaker">
+ <operations>
+ <op name="migrate_from" interval="0s" timeout="20s" id="rsc2-migrate_from-interval-0s"/>
+ <op name="migrate_to" interval="0s" timeout="20s" id="rsc2-migrate_to-interval-0s"/>
+ <op name="monitor" interval="10s" timeout="20s" id="rsc2-monitor-interval-10s"/>
+ <op name="reload" interval="0s" timeout="20s" id="rsc2-reload-interval-0s"/>
+ <op name="reload-agent" interval="0s" timeout="20s" id="rsc2-reload-agent-interval-0s"/>
+ <op name="start" interval="0s" timeout="20s" id="rsc2-start-interval-0s"/>
+ <op name="stop" interval="0s" timeout="20s" id="rsc2-stop-interval-0s"/>
+ </operations>
+ </primitive>
+ </group>
+ <meta_attributes id="grp-clone-meta_attributes">
+ <nvpair id="grp-clone-meta_attributes-clone-node-max" name="clone-node-max" value="1"/>
+ </meta_attributes>
+ </clone>
+ </resources>
+ <constraints/>
+ <op_defaults/>
+ <alerts/>
+ <rsc_defaults/>
+ </configuration>
+ <status>
+ <node_state id="2" uname="node2" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <lrm id="2">
+ <lrm_resources>
+ <lrm_resource id="Fencing" class="stonith" type="fence_xvm">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="10:0:0:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" transition-magic="0:0;10:0:0:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" exit-reason="" on_node="node2" call-id="16" rc-code="0" op-status="0" interval="0" last-rc-change="1687310527" exec-time="47" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ <lrm_rsc_op id="Fencing_monitor_120000" operation_key="Fencing_monitor_120000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="11:0:0:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" transition-magic="0:0;11:0:0:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" exit-reason="" on_node="node2" call-id="19" rc-code="0" op-status="0" interval="120000" last-rc-change="1687310527" exec-time="42" queue-time="0" op-digest="24989640311980988fb77ddd1cc1002b"/>
+ </lrm_resource>
+ <lrm_resource id="rsc1" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="rsc1_last_0" operation_key="rsc1_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="20:0:0:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" transition-magic="0:0;20:0:0:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" exit-reason="" on_node="node2" call-id="17" rc-code="0" op-status="0" interval="0" last-rc-change="1687310527" exec-time="14" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params=" passwd " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="rsc1_monitor_10000" operation_key="rsc1_monitor_10000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="21:0:0:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" transition-magic="0:0;21:0:0:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" exit-reason="" on_node="node2" call-id="18" rc-code="0" op-status="0" interval="10000" last-rc-change="1687310527" exec-time="19" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd" op-secure-params=" passwd " op-secure-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ <lrm_resource id="rsc2" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="rsc2_last_0" operation_key="rsc2_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="22:0:0:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" transition-magic="0:0;22:0:0:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" exit-reason="" on_node="node2" call-id="20" rc-code="0" op-status="0" interval="0" last-rc-change="1687310527" exec-time="14" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params=" passwd " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="rsc2_monitor_10000" operation_key="rsc2_monitor_10000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="23:0:0:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" transition-magic="0:0;23:0:0:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" exit-reason="" on_node="node2" call-id="21" rc-code="0" op-status="0" interval="10000" last-rc-change="1687310527" exec-time="10" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd" op-secure-params=" passwd " op-secure-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="2">
+ <instance_attributes id="status-2">
+ <nvpair id="status-2-.feature-set" name="#feature-set" value="3.17.4"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state id="1" uname="node1" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <lrm id="1">
+ <lrm_resources>
+ <lrm_resource id="Fencing" class="stonith" type="fence_xvm">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="1:0:7:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" transition-magic="0:7;1:0:7:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" exit-reason="" on_node="node1" call-id="9" rc-code="7" op-status="0" interval="0" last-rc-change="1687310527" exec-time="3" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ </lrm_resource>
+ <lrm_resource id="rsc1" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="rsc1_last_0" operation_key="rsc1_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="2:0:7:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" transition-magic="0:7;2:0:7:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" exit-reason="" on_node="node1" call-id="14" rc-code="7" op-status="0" interval="0" last-rc-change="1687310527" exec-time="17" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params=" passwd " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ <lrm_resource id="rsc2" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="rsc2_last_0" operation_key="rsc2_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="3:0:7:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" transition-magic="0:7;3:0:7:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" exit-reason="" on_node="node1" call-id="15" rc-code="7" op-status="0" interval="0" last-rc-change="1687310527" exec-time="20" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params=" passwd " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="1">
+ <instance_attributes id="status-1">
+ <nvpair id="status-1-.feature-set" name="#feature-set" value="3.17.4"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state id="3" uname="node3" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <lrm id="3">
+ <lrm_resources>
+ <lrm_resource id="Fencing" class="stonith" type="fence_xvm">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="7:0:7:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" transition-magic="0:7;7:0:7:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" exit-reason="" on_node="node3" call-id="9" rc-code="7" op-status="0" interval="0" last-rc-change="1687310526" exec-time="2" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ </lrm_resource>
+ <lrm_resource id="rsc1" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="rsc1_last_0" operation_key="rsc1_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="12:0:0:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" transition-magic="0:0;12:0:0:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" exit-reason="" on_node="node3" call-id="16" rc-code="0" op-status="0" interval="0" last-rc-change="1687310526" exec-time="16" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params=" passwd " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="rsc1_monitor_10000" operation_key="rsc1_monitor_10000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="13:0:0:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" transition-magic="0:0;13:0:0:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" exit-reason="" on_node="node3" call-id="17" rc-code="0" op-status="0" interval="10000" last-rc-change="1687310526" exec-time="19" queue-time="1" op-digest="4811cef7f7f94e3a35a70be7916cb2fd" op-secure-params=" passwd " op-secure-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ <lrm_resource id="rsc2" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="rsc2_last_0" operation_key="rsc2_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="14:0:0:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" transition-magic="0:0;14:0:0:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" exit-reason="" on_node="node3" call-id="18" rc-code="0" op-status="0" interval="0" last-rc-change="1687310526" exec-time="14" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params=" passwd " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="rsc2_monitor_10000" operation_key="rsc2_monitor_10000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="15:0:0:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" transition-magic="0:0;15:0:0:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" exit-reason="" on_node="node3" call-id="19" rc-code="0" op-status="0" interval="10000" last-rc-change="1687310526" exec-time="12" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd" op-secure-params=" passwd " op-secure-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="3">
+ <instance_attributes id="status-3">
+ <nvpair id="status-3-.feature-set" name="#feature-set" value="3.17.4"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ </status>
+</cib>
diff --git a/cts/scheduler/xml/clone-recover-no-shuffle-3.xml b/cts/scheduler/xml/clone-recover-no-shuffle-3.xml
new file mode 100644
index 0000000..ddafb74
--- /dev/null
+++ b/cts/scheduler/xml/clone-recover-no-shuffle-3.xml
@@ -0,0 +1,180 @@
+<cib crm_feature_set="3.17.4" validate-with="pacemaker-3.9" epoch="132" num_updates="0" admin_epoch="0" cib-last-written="Tue Jun 20 23:54:37 2023" update-origin="node1" update-client="cibadmin" update-user="root" have-quorum="1" dc-uuid="1">
+ <configuration>
+ <!-- The essential elements of this test are:
+ * A bundle resource (base-bundle) is stopped on node1 and started on
+ node2 and node3
+ * Clone instances are primitives (base), the instances of base-bundle's
+ child resource
+ * There is no stickiness configured
+ * There are no location constraints
+
+ The following should happen:
+ * Instance base:0 should remain started on node3
+ * Instance base:1 should remain started on node2
+ * Instance base:2 should start on node1
+ -->
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.1.5-1.0a457786a.git.el9-0a457786a"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="test"/>
+ <nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1687288330"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="node1"/>
+ <node id="2" uname="node2"/>
+ <node id="3" uname="node3"/>
+ </nodes>
+ <resources>
+ <primitive class="stonith" id="Fencing" type="fence_xvm">
+ <instance_attributes id="Fencing-instance_attributes">
+ <nvpair id="Fencing-instance_attributes_pcmk_host_list" name="pcmk_host_list" value="node1 node2 node3"/>
+ </instance_attributes>
+ <operations>
+ <op id="Fencing-monitor-120s" interval="120s" name="monitor" timeout="120s"/>
+ </operations>
+ </primitive>
+ <bundle id="base-bundle">
+ <meta_attributes id="base-bundle-meta_attributes">
+ <nvpair id="base-bundle-meta_attributes-container-attribute-target" name="container-attribute-target" value="host"/>
+ </meta_attributes>
+ <podman image="localhost/pcmktest" replicas="3"/>
+ <network control-port="3121"/>
+ <primitive id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <operations>
+ <op id="base-monitor-15s" interval="15s" name="monitor" timeout="15s" role="Promoted"/>
+ <op id="base-monitor-16s" interval="16s" name="monitor" timeout="16s" role="Unpromoted"/>
+ </operations>
+ </primitive>
+ </bundle>
+ </resources>
+ <constraints/>
+ <op_defaults/>
+ <alerts/>
+ <rsc_defaults/>
+ </configuration>
+ <status>
+ <node_state id="1" uname="node1" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <transient_attributes id="1">
+ <instance_attributes id="status-1">
+ <nvpair id="status-1-.feature-set" name="#feature-set" value="3.17.4"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="1">
+ <lrm_resources>
+ <lrm_resource id="Fencing" class="stonith" type="fence_xvm">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="1:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;1:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="8" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="2" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-0" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-0_last_0" operation_key="base-bundle-podman-0_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="2:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;2:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="12" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="105" queue-time="0" op-digest="b93578750303776f6bb098395c53338a"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-1" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-1_last_0" operation_key="base-bundle-podman-1_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="4:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;4:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="13" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="114" queue-time="0" op-digest="91ec5f49c5fd88f29405389545fb7d59"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-2" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-2_last_0" operation_key="base-bundle-podman-2_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="6:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;6:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="17" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="62" queue-time="0" op-digest="eb1c148b6d2d7a7013f75d596536409b"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-1" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-1">
+ <lrm_rsc_op id="base-bundle-1_last_0" operation_key="base-bundle-1_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="5:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;5:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-0" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-0">
+ <lrm_rsc_op id="base-bundle-0_last_0" operation_key="base-bundle-0_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="3:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;3:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="2" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state id="3" uname="node3" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <lrm id="3">
+ <lrm_resources>
+ <lrm_resource id="Fencing" class="stonith" type="fence_xvm">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="15:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;15:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="8" rc-code="7" op-status="0" interval="0" last-rc-change="1687330528" exec-time="3" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-0" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-0_last_0" operation_key="base-bundle-podman-0_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="24:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;24:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="18" rc-code="0" op-status="0" interval="0" last-rc-change="1687330528" exec-time="663" queue-time="0" op-digest="b93578750303776f6bb098395c53338a"/>
+ <lrm_rsc_op id="base-bundle-podman-0_monitor_60000" operation_key="base-bundle-podman-0_monitor_60000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="25:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;25:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="20" rc-code="0" op-status="0" interval="60000" last-rc-change="1687330529" exec-time="118" queue-time="0" op-digest="902512fcf3e4556d9585c44184665d8c"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-1" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-1_last_0" operation_key="base-bundle-podman-1_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="18:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;18:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="13" rc-code="7" op-status="0" interval="0" last-rc-change="1687330528" exec-time="90" queue-time="0" op-digest="91ec5f49c5fd88f29405389545fb7d59"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-2" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-2_last_0" operation_key="base-bundle-podman-2_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="20:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;20:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="17" rc-code="7" op-status="0" interval="0" last-rc-change="1687330528" exec-time="71" queue-time="0" op-digest="eb1c148b6d2d7a7013f75d596536409b"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-1" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-1">
+ <lrm_rsc_op id="base-bundle-1_last_0" operation_key="base-bundle-1_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="19:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;19:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-0" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-0">
+ <lrm_rsc_op id="base-bundle-0_last_0" operation_key="base-bundle-0_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="26:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;26:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="3" rc-code="0" op-status="0" interval="0" last-rc-change="1687330529" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ <lrm_rsc_op id="base-bundle-0_monitor_30000" operation_key="base-bundle-0_monitor_30000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="15:1:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;15:1:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="4" rc-code="0" op-status="0" interval="30000" last-rc-change="1687330531" exec-time="0" queue-time="0" op-digest="354b9acaa7ea1113d708dc11a1d6bbfa"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="3">
+ <instance_attributes id="status-3">
+ <nvpair id="status-3-.feature-set" name="#feature-set" value="3.17.4"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state id="2" uname="node2" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <transient_attributes id="2">
+ <instance_attributes id="status-2">
+ <nvpair id="status-2-.feature-set" name="#feature-set" value="3.17.4"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="2">
+ <lrm_resources>
+ <lrm_resource id="Fencing" class="stonith" type="fence_xvm">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="22:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;22:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="14" rc-code="0" op-status="0" interval="0" last-rc-change="1687330529" exec-time="33" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ <lrm_rsc_op id="Fencing_monitor_120000" operation_key="Fencing_monitor_120000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="23:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;23:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="19" rc-code="0" op-status="0" interval="120000" last-rc-change="1687330529" exec-time="26" queue-time="0" op-digest="24989640311980988fb77ddd1cc1002b"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-0" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-0_last_0" operation_key="base-bundle-podman-0_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="9:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;9:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="12" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="80" queue-time="0" op-digest="b93578750303776f6bb098395c53338a"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-1" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-1_last_0" operation_key="base-bundle-podman-1_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="28:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;28:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="20" rc-code="0" op-status="0" interval="0" last-rc-change="1687330529" exec-time="612" queue-time="0" op-digest="91ec5f49c5fd88f29405389545fb7d59"/>
+ <lrm_rsc_op id="base-bundle-podman-1_monitor_60000" operation_key="base-bundle-podman-1_monitor_60000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="29:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;29:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="21" rc-code="0" op-status="0" interval="60000" last-rc-change="1687330529" exec-time="210" queue-time="0" op-digest="f0ef4729d120aa3f5d938cabca4d06c7"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-2" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-2_last_0" operation_key="base-bundle-podman-2_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="13:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;13:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="18" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="78" queue-time="0" op-digest="eb1c148b6d2d7a7013f75d596536409b"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-1" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-1">
+ <lrm_rsc_op id="base-bundle-1_last_0" operation_key="base-bundle-1_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="30:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;30:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="2" rc-code="0" op-status="0" interval="0" last-rc-change="1687330529" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ <lrm_rsc_op id="base-bundle-1_monitor_30000" operation_key="base-bundle-1_monitor_30000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="20:1:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;20:1:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="4" rc-code="0" op-status="0" interval="30000" last-rc-change="1687330532" exec-time="0" queue-time="0" op-digest="3929eec440004bca31f813a8e6097506"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-0" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-0">
+ <lrm_rsc_op id="base-bundle-0_last_0" operation_key="base-bundle-0_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="10:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;10:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="3" rc-code="7" op-status="0" interval="0" last-rc-change="1687330530" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state remote_node="true" id="base-bundle-0" uname="base-bundle-0" in_ccm="true" crm-debug-origin="controld_update_resource_history" node_fenced="0">
+ <lrm id="base-bundle-0">
+ <lrm_resources>
+ <lrm_resource id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="base_last_0" operation_key="base_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="27:1:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;27:1:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="8" rc-code="0" op-status="0" interval="0" last-rc-change="1687330531" exec-time="254" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="base-bundle-0">
+ <instance_attributes id="status-base-bundle-0">
+ <nvpair id="status-base-bundle-0-master-base" name="master-base" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state remote_node="true" id="base-bundle-1" uname="base-bundle-1" in_ccm="true" crm-debug-origin="controld_update_resource_history" node_fenced="0">
+ <lrm id="base-bundle-1">
+ <lrm_resources>
+ <lrm_resource id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="base_last_0" operation_key="base_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="27:2:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;27:2:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="8" rc-code="0" op-status="0" interval="0" last-rc-change="1687330532" exec-time="246" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="base-bundle-1">
+ <instance_attributes id="status-base-bundle-1">
+ <nvpair id="status-base-bundle-1-master-base" name="master-base" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ </status>
+</cib>
diff --git a/cts/scheduler/xml/clone-recover-no-shuffle-4.xml b/cts/scheduler/xml/clone-recover-no-shuffle-4.xml
new file mode 100644
index 0000000..f0a5feb
--- /dev/null
+++ b/cts/scheduler/xml/clone-recover-no-shuffle-4.xml
@@ -0,0 +1,115 @@
+<cib crm_feature_set="3.17.4" validate-with="pacemaker-3.9" epoch="74" num_updates="0" admin_epoch="0" cib-last-written="Tue Jun 20 17:44:24 2023" update-origin="node1" update-client="cibadmin" update-user="root" have-quorum="true" dc-uuid="1" execution-date="1687309199">
+ <configuration>
+ <!-- The essential elements of this test are:
+ * An anonymous clone resource (dummy) is stopped on node1 and started
+ on node2 and node3
+ * Clone instances are primitives
+ * There is no stickiness configured
+ * dummy-clone prefers node1 (score=100)
+
+ The following should happen:
+ * Instance dummy:0 should remain started on node2
+ * Instance dummy:1 should remain started on node3
+ * Instance dummy:2 should start on node1
+ -->
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.1.5-1.0a457786a.git.el9-0a457786a"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="test"/>
+ <nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1687288330"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="node1"/>
+ <node id="2" uname="node2"/>
+ <node id="3" uname="node3"/>
+ </nodes>
+ <resources>
+ <primitive class="stonith" id="Fencing" type="fence_xvm">
+ <instance_attributes id="Fencing-instance_attributes">
+ <nvpair id="Fencing-instance_attributes_pcmk_host_list" name="pcmk_host_list" value="node1 node2 node3"/>
+ </instance_attributes>
+ <operations>
+ <op id="Fencing-monitor-120s" interval="120s" name="monitor" timeout="120s"/>
+ </operations>
+ </primitive>
+ <clone id="dummy-clone">
+ <primitive id="dummy" class="ocf" type="Dummy" provider="pacemaker">
+ <operations>
+ <op name="migrate_from" interval="0s" timeout="20s" id="dummy-migrate_from-interval-0s"/>
+ <op name="migrate_to" interval="0s" timeout="20s" id="dummy-migrate_to-interval-0s"/>
+ <op name="monitor" interval="10s" timeout="20s" id="dummy-monitor-interval-10s"/>
+ <op name="reload" interval="0s" timeout="20s" id="dummy-reload-interval-0s"/>
+ <op name="reload-agent" interval="0s" timeout="20s" id="dummy-reload-agent-interval-0s"/>
+ <op name="start" interval="0s" timeout="20s" id="dummy-start-interval-0s"/>
+ <op name="stop" interval="0s" timeout="20s" id="dummy-stop-interval-0s"/>
+ </operations>
+ </primitive>
+ </clone>
+ </resources>
+ <constraints>
+ <rsc_location id="location-dummy-clone-node1-100" rsc="dummy-clone" node="node1" score="100"/>
+ </constraints>
+ <op_defaults/>
+ <alerts/>
+ <rsc_defaults/>
+ </configuration>
+ <status>
+ <node_state id="1" uname="node1" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <lrm id="1">
+ <lrm_resources>
+ <lrm_resource id="Fencing" class="stonith" type="fence_xvm">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="1:1:7:6149c21b-77ec-4d97-8b22-d39d97981ad1" transition-magic="0:7;1:1:7:6149c21b-77ec-4d97-8b22-d39d97981ad1" exit-reason="" on_node="node1" call-id="9" rc-code="7" op-status="0" interval="0" last-rc-change="1687308299" exec-time="7" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ </lrm_resource>
+ <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="dummy_last_0" operation_key="dummy_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="2:1:7:6149c21b-77ec-4d97-8b22-d39d97981ad1" transition-magic="0:7;2:1:7:6149c21b-77ec-4d97-8b22-d39d97981ad1" exit-reason="" on_node="node1" call-id="10" rc-code="7" op-status="0" interval="0" last-rc-change="1687308299" exec-time="20" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params=" passwd " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="1">
+ <instance_attributes id="status-1">
+ <nvpair id="status-1-.feature-set" name="#feature-set" value="3.17.4"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state id="2" uname="node2" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <lrm id="2">
+ <lrm_resources>
+ <lrm_resource id="Fencing" class="stonith" type="fence_xvm">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="7:1:0:6149c21b-77ec-4d97-8b22-d39d97981ad1" transition-magic="0:0;7:1:0:6149c21b-77ec-4d97-8b22-d39d97981ad1" exit-reason="" on_node="node2" call-id="11" rc-code="0" op-status="0" interval="0" last-rc-change="1687308299" exec-time="40" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ <lrm_rsc_op id="Fencing_monitor_120000" operation_key="Fencing_monitor_120000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="8:1:0:6149c21b-77ec-4d97-8b22-d39d97981ad1" transition-magic="0:0;8:1:0:6149c21b-77ec-4d97-8b22-d39d97981ad1" exit-reason="" on_node="node2" call-id="13" rc-code="0" op-status="0" interval="120000" last-rc-change="1687308299" exec-time="40" queue-time="0" op-digest="24989640311980988fb77ddd1cc1002b"/>
+ </lrm_resource>
+ <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="11:1:0:6149c21b-77ec-4d97-8b22-d39d97981ad1" transition-magic="0:0;11:1:0:6149c21b-77ec-4d97-8b22-d39d97981ad1" exit-reason="" on_node="node2" call-id="12" rc-code="0" op-status="0" interval="0" last-rc-change="1687308299" exec-time="28" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params=" passwd " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="dummy_monitor_10000" operation_key="dummy_monitor_10000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="12:1:0:6149c21b-77ec-4d97-8b22-d39d97981ad1" transition-magic="0:0;12:1:0:6149c21b-77ec-4d97-8b22-d39d97981ad1" exit-reason="" on_node="node2" call-id="14" rc-code="0" op-status="0" interval="10000" last-rc-change="1687308299" exec-time="12" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd" op-secure-params=" passwd " op-secure-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="2">
+ <instance_attributes id="status-2">
+ <nvpair id="status-2-.feature-set" name="#feature-set" value="3.17.4"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state id="3" uname="node3" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <lrm id="3">
+ <lrm_resources>
+ <lrm_resource id="Fencing" class="stonith" type="fence_xvm">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="5:1:7:6149c21b-77ec-4d97-8b22-d39d97981ad1" transition-magic="0:7;5:1:7:6149c21b-77ec-4d97-8b22-d39d97981ad1" exit-reason="" on_node="node3" call-id="9" rc-code="7" op-status="0" interval="0" last-rc-change="1687308298" exec-time="1" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ </lrm_resource>
+ <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="9:1:0:6149c21b-77ec-4d97-8b22-d39d97981ad1" transition-magic="0:0;9:1:0:6149c21b-77ec-4d97-8b22-d39d97981ad1" exit-reason="" on_node="node3" call-id="11" rc-code="0" op-status="0" interval="0" last-rc-change="1687308298" exec-time="17" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params=" passwd " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="dummy_monitor_10000" operation_key="dummy_monitor_10000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="10:1:0:6149c21b-77ec-4d97-8b22-d39d97981ad1" transition-magic="0:0;10:1:0:6149c21b-77ec-4d97-8b22-d39d97981ad1" exit-reason="" on_node="node3" call-id="12" rc-code="0" op-status="0" interval="10000" last-rc-change="1687308298" exec-time="14" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd" op-secure-params=" passwd " op-secure-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="3">
+ <instance_attributes id="status-3">
+ <nvpair id="status-3-.feature-set" name="#feature-set" value="3.17.4"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ </status>
+</cib>
diff --git a/cts/scheduler/xml/clone-recover-no-shuffle-5.xml b/cts/scheduler/xml/clone-recover-no-shuffle-5.xml
new file mode 100644
index 0000000..95e5eca
--- /dev/null
+++ b/cts/scheduler/xml/clone-recover-no-shuffle-5.xml
@@ -0,0 +1,143 @@
+<cib crm_feature_set="3.17.4" validate-with="pacemaker-3.9" epoch="83" num_updates="0" admin_epoch="0" cib-last-written="Tue Jun 20 18:21:31 2023" update-origin="node1" update-client="cibadmin" update-user="root" have-quorum="1" dc-uuid="2">
+ <configuration>
+ <!-- The essential elements of this test are:
+ * An anonymous clone resource (grp) is stopped on node1 and
+ started on node2 and node3
+ * Clone instances are resource groups consisting of primitives rsc1 and
+ rsc2
+ * There is no stickiness configured
+ * grp-clone prefers node1 (score=100)
+
+ The following should happen:
+ * Instance grp:0 should remain started on node2
+ * Instance grp:1 should remain started on node3
+ * Instance grp:2 should start on node1
+ -->
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.1.5-1.0a457786a.git.el9-0a457786a"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="test"/>
+ <nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1687288330"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="node1"/>
+ <node id="2" uname="node2"/>
+ <node id="3" uname="node3"/>
+ </nodes>
+ <resources>
+ <primitive class="stonith" id="Fencing" type="fence_xvm">
+ <instance_attributes id="Fencing-instance_attributes">
+ <nvpair id="Fencing-instance_attributes_pcmk_host_list" name="pcmk_host_list" value="node1 node2 node3"/>
+ </instance_attributes>
+ <operations>
+ <op id="Fencing-monitor-120s" interval="120s" name="monitor" timeout="120s"/>
+ </operations>
+ </primitive>
+ <clone id="grp-clone">
+ <group id="grp">
+ <primitive id="rsc1" class="ocf" type="Dummy" provider="pacemaker">
+ <operations>
+ <op name="migrate_from" interval="0s" timeout="20s" id="rsc1-migrate_from-interval-0s"/>
+ <op name="migrate_to" interval="0s" timeout="20s" id="rsc1-migrate_to-interval-0s"/>
+ <op name="monitor" interval="10s" timeout="20s" id="rsc1-monitor-interval-10s"/>
+ <op name="reload" interval="0s" timeout="20s" id="rsc1-reload-interval-0s"/>
+ <op name="reload-agent" interval="0s" timeout="20s" id="rsc1-reload-agent-interval-0s"/>
+ <op name="start" interval="0s" timeout="20s" id="rsc1-start-interval-0s"/>
+ <op name="stop" interval="0s" timeout="20s" id="rsc1-stop-interval-0s"/>
+ </operations>
+ </primitive>
+ <primitive id="rsc2" class="ocf" type="Dummy" provider="pacemaker">
+ <operations>
+ <op name="migrate_from" interval="0s" timeout="20s" id="rsc2-migrate_from-interval-0s"/>
+ <op name="migrate_to" interval="0s" timeout="20s" id="rsc2-migrate_to-interval-0s"/>
+ <op name="monitor" interval="10s" timeout="20s" id="rsc2-monitor-interval-10s"/>
+ <op name="reload" interval="0s" timeout="20s" id="rsc2-reload-interval-0s"/>
+ <op name="reload-agent" interval="0s" timeout="20s" id="rsc2-reload-agent-interval-0s"/>
+ <op name="start" interval="0s" timeout="20s" id="rsc2-start-interval-0s"/>
+ <op name="stop" interval="0s" timeout="20s" id="rsc2-stop-interval-0s"/>
+ </operations>
+ </primitive>
+ </group>
+ <meta_attributes id="grp-clone-meta_attributes">
+ <nvpair id="grp-clone-meta_attributes-clone-node-max" name="clone-node-max" value="1"/>
+ </meta_attributes>
+ </clone>
+ </resources>
+ <constraints>
+ <rsc_location id="location-grp-clone-node1-100" rsc="grp-clone" node="node1" score="100"/>
+ </constraints>
+ <op_defaults/>
+ <alerts/>
+ <rsc_defaults/>
+ </configuration>
+ <status>
+ <node_state id="2" uname="node2" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <lrm id="2">
+ <lrm_resources>
+ <lrm_resource id="Fencing" class="stonith" type="fence_xvm">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="10:0:0:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" transition-magic="0:0;10:0:0:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" exit-reason="" on_node="node2" call-id="16" rc-code="0" op-status="0" interval="0" last-rc-change="1687310527" exec-time="47" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ <lrm_rsc_op id="Fencing_monitor_120000" operation_key="Fencing_monitor_120000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="11:0:0:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" transition-magic="0:0;11:0:0:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" exit-reason="" on_node="node2" call-id="19" rc-code="0" op-status="0" interval="120000" last-rc-change="1687310527" exec-time="42" queue-time="0" op-digest="24989640311980988fb77ddd1cc1002b"/>
+ </lrm_resource>
+ <lrm_resource id="rsc1" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="rsc1_last_0" operation_key="rsc1_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="20:0:0:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" transition-magic="0:0;20:0:0:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" exit-reason="" on_node="node2" call-id="17" rc-code="0" op-status="0" interval="0" last-rc-change="1687310527" exec-time="14" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params=" passwd " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="rsc1_monitor_10000" operation_key="rsc1_monitor_10000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="21:0:0:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" transition-magic="0:0;21:0:0:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" exit-reason="" on_node="node2" call-id="18" rc-code="0" op-status="0" interval="10000" last-rc-change="1687310527" exec-time="19" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd" op-secure-params=" passwd " op-secure-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ <lrm_resource id="rsc2" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="rsc2_last_0" operation_key="rsc2_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="22:0:0:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" transition-magic="0:0;22:0:0:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" exit-reason="" on_node="node2" call-id="20" rc-code="0" op-status="0" interval="0" last-rc-change="1687310527" exec-time="14" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params=" passwd " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="rsc2_monitor_10000" operation_key="rsc2_monitor_10000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="23:0:0:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" transition-magic="0:0;23:0:0:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" exit-reason="" on_node="node2" call-id="21" rc-code="0" op-status="0" interval="10000" last-rc-change="1687310527" exec-time="10" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd" op-secure-params=" passwd " op-secure-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="2">
+ <instance_attributes id="status-2">
+ <nvpair id="status-2-.feature-set" name="#feature-set" value="3.17.4"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state id="1" uname="node1" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <lrm id="1">
+ <lrm_resources>
+ <lrm_resource id="Fencing" class="stonith" type="fence_xvm">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="1:0:7:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" transition-magic="0:7;1:0:7:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" exit-reason="" on_node="node1" call-id="9" rc-code="7" op-status="0" interval="0" last-rc-change="1687310527" exec-time="3" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ </lrm_resource>
+ <lrm_resource id="rsc1" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="rsc1_last_0" operation_key="rsc1_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="2:0:7:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" transition-magic="0:7;2:0:7:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" exit-reason="" on_node="node1" call-id="14" rc-code="7" op-status="0" interval="0" last-rc-change="1687310527" exec-time="17" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params=" passwd " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ <lrm_resource id="rsc2" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="rsc2_last_0" operation_key="rsc2_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="3:0:7:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" transition-magic="0:7;3:0:7:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" exit-reason="" on_node="node1" call-id="15" rc-code="7" op-status="0" interval="0" last-rc-change="1687310527" exec-time="20" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params=" passwd " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="1">
+ <instance_attributes id="status-1">
+ <nvpair id="status-1-.feature-set" name="#feature-set" value="3.17.4"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state id="3" uname="node3" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <lrm id="3">
+ <lrm_resources>
+ <lrm_resource id="Fencing" class="stonith" type="fence_xvm">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="7:0:7:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" transition-magic="0:7;7:0:7:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" exit-reason="" on_node="node3" call-id="9" rc-code="7" op-status="0" interval="0" last-rc-change="1687310526" exec-time="2" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ </lrm_resource>
+ <lrm_resource id="rsc1" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="rsc1_last_0" operation_key="rsc1_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="12:0:0:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" transition-magic="0:0;12:0:0:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" exit-reason="" on_node="node3" call-id="16" rc-code="0" op-status="0" interval="0" last-rc-change="1687310526" exec-time="16" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params=" passwd " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="rsc1_monitor_10000" operation_key="rsc1_monitor_10000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="13:0:0:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" transition-magic="0:0;13:0:0:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" exit-reason="" on_node="node3" call-id="17" rc-code="0" op-status="0" interval="10000" last-rc-change="1687310526" exec-time="19" queue-time="1" op-digest="4811cef7f7f94e3a35a70be7916cb2fd" op-secure-params=" passwd " op-secure-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ <lrm_resource id="rsc2" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="rsc2_last_0" operation_key="rsc2_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="14:0:0:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" transition-magic="0:0;14:0:0:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" exit-reason="" on_node="node3" call-id="18" rc-code="0" op-status="0" interval="0" last-rc-change="1687310526" exec-time="14" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params=" passwd " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="rsc2_monitor_10000" operation_key="rsc2_monitor_10000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="15:0:0:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" transition-magic="0:0;15:0:0:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" exit-reason="" on_node="node3" call-id="19" rc-code="0" op-status="0" interval="10000" last-rc-change="1687310526" exec-time="12" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd" op-secure-params=" passwd " op-secure-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="3">
+ <instance_attributes id="status-3">
+ <nvpair id="status-3-.feature-set" name="#feature-set" value="3.17.4"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ </status>
+</cib>
diff --git a/cts/scheduler/xml/clone-recover-no-shuffle-6.xml b/cts/scheduler/xml/clone-recover-no-shuffle-6.xml
new file mode 100644
index 0000000..64bb4d9
--- /dev/null
+++ b/cts/scheduler/xml/clone-recover-no-shuffle-6.xml
@@ -0,0 +1,182 @@
+<cib crm_feature_set="3.17.4" validate-with="pacemaker-3.9" epoch="132" num_updates="0" admin_epoch="0" cib-last-written="Tue Jun 20 23:54:37 2023" update-origin="node1" update-client="cibadmin" update-user="root" have-quorum="1" dc-uuid="1">
+ <configuration>
+ <!-- The essential elements of this test are:
+ * A bundle resource (base-bundle) is stopped on node1 and started on
+ node2 and node3
+ * Clone instances are primitives (base), the instances of base-bundle's
+ child resource
+ * There is no stickiness configured
+ * base-bundle prefers node1 (score=100)
+
+ The following should happen:
+ * Instance base:0 should remain started on node3
+ * Instance base:1 should remain started on node2
+ * Instance base:2 should start on node1
+ -->
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.1.5-1.0a457786a.git.el9-0a457786a"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="test"/>
+ <nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1687288330"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="node1"/>
+ <node id="2" uname="node2"/>
+ <node id="3" uname="node3"/>
+ </nodes>
+ <resources>
+ <primitive class="stonith" id="Fencing" type="fence_xvm">
+ <instance_attributes id="Fencing-instance_attributes">
+ <nvpair id="Fencing-instance_attributes_pcmk_host_list" name="pcmk_host_list" value="node1 node2 node3"/>
+ </instance_attributes>
+ <operations>
+ <op id="Fencing-monitor-120s" interval="120s" name="monitor" timeout="120s"/>
+ </operations>
+ </primitive>
+ <bundle id="base-bundle">
+ <meta_attributes id="base-bundle-meta_attributes">
+ <nvpair id="base-bundle-meta_attributes-container-attribute-target" name="container-attribute-target" value="host"/>
+ </meta_attributes>
+ <podman image="localhost/pcmktest" replicas="3"/>
+ <network control-port="3121"/>
+ <primitive id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <operations>
+ <op id="base-monitor-15s" interval="15s" name="monitor" timeout="15s" role="Promoted"/>
+ <op id="base-monitor-16s" interval="16s" name="monitor" timeout="16s" role="Unpromoted"/>
+ </operations>
+ </primitive>
+ </bundle>
+ </resources>
+ <constraints>
+ <rsc_location id="location-base-bundle-node1-100" rsc="base-bundle" node="node1" score="100"/>
+ </constraints>
+ <op_defaults/>
+ <alerts/>
+ <rsc_defaults/>
+ </configuration>
+ <status>
+ <node_state id="1" uname="node1" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <transient_attributes id="1">
+ <instance_attributes id="status-1">
+ <nvpair id="status-1-.feature-set" name="#feature-set" value="3.17.4"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="1">
+ <lrm_resources>
+ <lrm_resource id="Fencing" class="stonith" type="fence_xvm">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="1:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;1:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="8" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="2" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-0" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-0_last_0" operation_key="base-bundle-podman-0_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="2:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;2:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="12" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="105" queue-time="0" op-digest="b93578750303776f6bb098395c53338a"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-1" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-1_last_0" operation_key="base-bundle-podman-1_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="4:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;4:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="13" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="114" queue-time="0" op-digest="91ec5f49c5fd88f29405389545fb7d59"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-2" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-2_last_0" operation_key="base-bundle-podman-2_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="6:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;6:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="17" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="62" queue-time="0" op-digest="eb1c148b6d2d7a7013f75d596536409b"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-1" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-1">
+ <lrm_rsc_op id="base-bundle-1_last_0" operation_key="base-bundle-1_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="5:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;5:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-0" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-0">
+ <lrm_rsc_op id="base-bundle-0_last_0" operation_key="base-bundle-0_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="3:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;3:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node1" call-id="2" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state id="3" uname="node3" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <lrm id="3">
+ <lrm_resources>
+ <lrm_resource id="Fencing" class="stonith" type="fence_xvm">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="15:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;15:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="8" rc-code="7" op-status="0" interval="0" last-rc-change="1687330528" exec-time="3" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-0" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-0_last_0" operation_key="base-bundle-podman-0_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="24:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;24:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="18" rc-code="0" op-status="0" interval="0" last-rc-change="1687330528" exec-time="663" queue-time="0" op-digest="b93578750303776f6bb098395c53338a"/>
+ <lrm_rsc_op id="base-bundle-podman-0_monitor_60000" operation_key="base-bundle-podman-0_monitor_60000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="25:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;25:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="20" rc-code="0" op-status="0" interval="60000" last-rc-change="1687330529" exec-time="118" queue-time="0" op-digest="902512fcf3e4556d9585c44184665d8c"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-1" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-1_last_0" operation_key="base-bundle-podman-1_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="18:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;18:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="13" rc-code="7" op-status="0" interval="0" last-rc-change="1687330528" exec-time="90" queue-time="0" op-digest="91ec5f49c5fd88f29405389545fb7d59"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-2" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-2_last_0" operation_key="base-bundle-podman-2_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="20:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;20:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="17" rc-code="7" op-status="0" interval="0" last-rc-change="1687330528" exec-time="71" queue-time="0" op-digest="eb1c148b6d2d7a7013f75d596536409b"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-1" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-1">
+ <lrm_rsc_op id="base-bundle-1_last_0" operation_key="base-bundle-1_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="19:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;19:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-0" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-0">
+ <lrm_rsc_op id="base-bundle-0_last_0" operation_key="base-bundle-0_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="26:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;26:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="3" rc-code="0" op-status="0" interval="0" last-rc-change="1687330529" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ <lrm_rsc_op id="base-bundle-0_monitor_30000" operation_key="base-bundle-0_monitor_30000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="15:1:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;15:1:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="4" rc-code="0" op-status="0" interval="30000" last-rc-change="1687330531" exec-time="0" queue-time="0" op-digest="354b9acaa7ea1113d708dc11a1d6bbfa"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="3">
+ <instance_attributes id="status-3">
+ <nvpair id="status-3-.feature-set" name="#feature-set" value="3.17.4"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state id="2" uname="node2" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <transient_attributes id="2">
+ <instance_attributes id="status-2">
+ <nvpair id="status-2-.feature-set" name="#feature-set" value="3.17.4"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="2">
+ <lrm_resources>
+ <lrm_resource id="Fencing" class="stonith" type="fence_xvm">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="22:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;22:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="14" rc-code="0" op-status="0" interval="0" last-rc-change="1687330529" exec-time="33" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ <lrm_rsc_op id="Fencing_monitor_120000" operation_key="Fencing_monitor_120000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="23:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;23:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="19" rc-code="0" op-status="0" interval="120000" last-rc-change="1687330529" exec-time="26" queue-time="0" op-digest="24989640311980988fb77ddd1cc1002b"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-0" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-0_last_0" operation_key="base-bundle-podman-0_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="9:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;9:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="12" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="80" queue-time="0" op-digest="b93578750303776f6bb098395c53338a"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-1" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-1_last_0" operation_key="base-bundle-podman-1_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="28:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;28:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="20" rc-code="0" op-status="0" interval="0" last-rc-change="1687330529" exec-time="612" queue-time="0" op-digest="91ec5f49c5fd88f29405389545fb7d59"/>
+ <lrm_rsc_op id="base-bundle-podman-1_monitor_60000" operation_key="base-bundle-podman-1_monitor_60000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="29:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;29:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="21" rc-code="0" op-status="0" interval="60000" last-rc-change="1687330529" exec-time="210" queue-time="0" op-digest="f0ef4729d120aa3f5d938cabca4d06c7"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-2" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-2_last_0" operation_key="base-bundle-podman-2_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="13:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;13:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="18" rc-code="7" op-status="0" interval="0" last-rc-change="1687330529" exec-time="78" queue-time="0" op-digest="eb1c148b6d2d7a7013f75d596536409b"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-1" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-1">
+ <lrm_rsc_op id="base-bundle-1_last_0" operation_key="base-bundle-1_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="30:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;30:0:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="2" rc-code="0" op-status="0" interval="0" last-rc-change="1687330529" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ <lrm_rsc_op id="base-bundle-1_monitor_30000" operation_key="base-bundle-1_monitor_30000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="20:1:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;20:1:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="4" rc-code="0" op-status="0" interval="30000" last-rc-change="1687330532" exec-time="0" queue-time="0" op-digest="3929eec440004bca31f813a8e6097506"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-0" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-0">
+ <lrm_rsc_op id="base-bundle-0_last_0" operation_key="base-bundle-0_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="10:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:7;10:0:7:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="3" rc-code="7" op-status="0" interval="0" last-rc-change="1687330530" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state remote_node="true" id="base-bundle-0" uname="base-bundle-0" in_ccm="true" crm-debug-origin="controld_update_resource_history" node_fenced="0">
+ <lrm id="base-bundle-0">
+ <lrm_resources>
+ <lrm_resource id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="base_last_0" operation_key="base_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="27:1:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;27:1:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node3" call-id="8" rc-code="0" op-status="0" interval="0" last-rc-change="1687330531" exec-time="254" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="base-bundle-0">
+ <instance_attributes id="status-base-bundle-0">
+ <nvpair id="status-base-bundle-0-master-base" name="master-base" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state remote_node="true" id="base-bundle-1" uname="base-bundle-1" in_ccm="true" crm-debug-origin="controld_update_resource_history" node_fenced="0">
+ <lrm id="base-bundle-1">
+ <lrm_resources>
+ <lrm_resource id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="base_last_0" operation_key="base_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="27:2:0:ccd91267-47dc-4232-bd92-84c29a6c6827" transition-magic="0:0;27:2:0:ccd91267-47dc-4232-bd92-84c29a6c6827" exit-reason="" on_node="node2" call-id="8" rc-code="0" op-status="0" interval="0" last-rc-change="1687330532" exec-time="246" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="base-bundle-1">
+ <instance_attributes id="status-base-bundle-1">
+ <nvpair id="status-base-bundle-1-master-base" name="master-base" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ </status>
+</cib>
diff --git a/cts/scheduler/xml/clone-recover-no-shuffle-7.xml b/cts/scheduler/xml/clone-recover-no-shuffle-7.xml
new file mode 100644
index 0000000..e588b81
--- /dev/null
+++ b/cts/scheduler/xml/clone-recover-no-shuffle-7.xml
@@ -0,0 +1,120 @@
+<cib crm_feature_set="3.17.4" validate-with="pacemaker-3.9" epoch="89" num_updates="50" admin_epoch="0" cib-last-written="Tue Jun 20 18:29:07 2023" update-origin="node1" update-client="cibadmin" update-user="root" have-quorum="1" dc-uuid="3">
+ <configuration>
+ <!-- The essential elements of this test are:
+ * An anonymous promotable clone resource (dummy) is stopped on node1,
+ promoted on node2, and unpromoted on node3
+ * Clone instances are primitives
+ * There are no location constraints or stickiness configured
+ * dummy has the highest promotion score on node1
+
+ The following should happen:
+ * Instance dummy:0 should remain started (unpromoted) on node3
+ * Instance dummy:1 should demote on node2
+ * Instance dummy:2 should promote on node1
+ -->
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.1.5-1.0a457786a.git.el9-0a457786a"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="test"/>
+ <nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1687288330"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="node1"/>
+ <node id="2" uname="node2"/>
+ <node id="3" uname="node3"/>
+ </nodes>
+ <resources>
+ <primitive class="stonith" id="Fencing" type="fence_xvm">
+ <instance_attributes id="Fencing-instance_attributes">
+ <nvpair id="Fencing-instance_attributes_pcmk_host_list" name="pcmk_host_list" value="node1 node2 node3"/>
+ </instance_attributes>
+ <operations>
+ <op id="Fencing-monitor-120s" interval="120s" name="monitor" timeout="120s"/>
+ </operations>
+ </primitive>
+ <clone id="dummy-clone">
+ <primitive id="dummy" class="ocf" type="Stateful" provider="pacemaker">
+ <operations>
+ <op name="demote" interval="0s" timeout="10s" id="dummy-demote-interval-0s"/>
+ <op name="monitor" interval="10s" timeout="20s" role="Promoted" id="dummy-monitor-interval-10s"/>
+ <op name="monitor" interval="11s" timeout="20s" role="Unpromoted" id="dummy-monitor-interval-11s"/>
+ <op name="notify" interval="0s" timeout="5s" id="dummy-notify-interval-0s"/>
+ <op name="promote" interval="0s" timeout="10s" id="dummy-promote-interval-0s"/>
+ <op name="reload-agent" interval="0s" timeout="10s" id="dummy-reload-agent-interval-0s"/>
+ <op name="start" interval="0s" timeout="20s" id="dummy-start-interval-0s"/>
+ <op name="stop" interval="0s" timeout="20s" id="dummy-stop-interval-0s"/>
+ </operations>
+ </primitive>
+ <meta_attributes id="dummy-clone-meta_attributes">
+ <nvpair id="dummy-clone-meta_attributes-promotable" name="promotable" value="true"/>
+ </meta_attributes>
+ </clone>
+ </resources>
+ <constraints/>
+ <op_defaults/>
+ <alerts/>
+ <rsc_defaults/>
+ </configuration>
+ <status>
+ <node_state id="3" uname="node3" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <lrm id="3">
+ <lrm_resources>
+ <lrm_resource id="Fencing" class="stonith" type="fence_xvm">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="5:0:7:ecbbb42b-f6a4-47dc-90de-b2ccf960124e" transition-magic="0:7;5:0:7:ecbbb42b-f6a4-47dc-90de-b2ccf960124e" exit-reason="" on_node="node3" call-id="9" rc-code="7" op-status="0" interval="0" last-rc-change="1687310966" exec-time="3" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ </lrm_resource>
+ <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="dummy_last_0" operation_key="dummy_demote_0" operation="demote" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="7:6:0:ecbbb42b-f6a4-47dc-90de-b2ccf960124e" transition-magic="0:0;7:6:0:ecbbb42b-f6a4-47dc-90de-b2ccf960124e" exit-reason="" on_node="node3" call-id="15" rc-code="0" op-status="0" interval="0" last-rc-change="1687311113" exec-time="39" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="dummy_monitor_11000" operation_key="dummy_monitor_11000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="9:6:0:ecbbb42b-f6a4-47dc-90de-b2ccf960124e" transition-magic="0:0;9:6:0:ecbbb42b-f6a4-47dc-90de-b2ccf960124e" exit-reason="" on_node="node3" call-id="16" rc-code="0" op-status="0" interval="11000" last-rc-change="1687311113" exec-time="14" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="3">
+ <instance_attributes id="status-3">
+ <nvpair id="status-3-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-3-master-dummy" name="master-dummy" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state id="1" uname="node1" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <lrm id="1">
+ <lrm_resources>
+ <lrm_resource id="Fencing" class="stonith" type="fence_xvm">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="1:0:7:ecbbb42b-f6a4-47dc-90de-b2ccf960124e" transition-magic="0:7;1:0:7:ecbbb42b-f6a4-47dc-90de-b2ccf960124e" exit-reason="" on_node="node1" call-id="9" rc-code="7" op-status="0" interval="0" last-rc-change="1687310966" exec-time="3" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ </lrm_resource>
+ <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="dummy_last_0" operation_key="dummy_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="2:0:7:ecbbb42b-f6a4-47dc-90de-b2ccf960124e" transition-magic="0:7;2:0:7:ecbbb42b-f6a4-47dc-90de-b2ccf960124e" exit-reason="" on_node="node1" call-id="10" rc-code="7" op-status="0" interval="0" last-rc-change="1687310967" exec-time="19" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="1">
+ <instance_attributes id="status-1">
+ <nvpair id="status-1-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-1-master-dummy" name="master-dummy" value="15"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state id="2" uname="node2" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <lrm id="2">
+ <lrm_resources>
+ <lrm_resource id="Fencing" class="stonith" type="fence_xvm">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="7:0:0:ecbbb42b-f6a4-47dc-90de-b2ccf960124e" transition-magic="0:0;7:0:0:ecbbb42b-f6a4-47dc-90de-b2ccf960124e" exit-reason="" on_node="node2" call-id="10" rc-code="0" op-status="0" interval="0" last-rc-change="1687310967" exec-time="47" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ <lrm_rsc_op id="Fencing_monitor_120000" operation_key="Fencing_monitor_120000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="8:0:0:ecbbb42b-f6a4-47dc-90de-b2ccf960124e" transition-magic="0:0;8:0:0:ecbbb42b-f6a4-47dc-90de-b2ccf960124e" exit-reason="" on_node="node2" call-id="13" rc-code="0" op-status="0" interval="120000" last-rc-change="1687310967" exec-time="35" queue-time="0" op-digest="24989640311980988fb77ddd1cc1002b"/>
+ </lrm_resource>
+ <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="dummy_last_0" operation_key="dummy_promote_0" operation="promote" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="12:6:0:ecbbb42b-f6a4-47dc-90de-b2ccf960124e" transition-magic="0:0;12:6:0:ecbbb42b-f6a4-47dc-90de-b2ccf960124e" exit-reason="" on_node="node2" call-id="16" rc-code="0" op-status="0" interval="0" last-rc-change="1687311114" exec-time="23" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="dummy_monitor_10000" operation_key="dummy_monitor_10000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="13:6:8:ecbbb42b-f6a4-47dc-90de-b2ccf960124e" transition-magic="0:8;13:6:8:ecbbb42b-f6a4-47dc-90de-b2ccf960124e" exit-reason="" on_node="node2" call-id="17" rc-code="8" op-status="0" interval="10000" last-rc-change="1687311114" exec-time="10" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="2">
+ <instance_attributes id="status-2">
+ <nvpair id="status-2-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-2-master-dummy" name="master-dummy" value="10"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ </status>
+</cib>
diff --git a/cts/scheduler/xml/clone-recover-no-shuffle-8.xml b/cts/scheduler/xml/clone-recover-no-shuffle-8.xml
new file mode 100644
index 0000000..6f882b8
--- /dev/null
+++ b/cts/scheduler/xml/clone-recover-no-shuffle-8.xml
@@ -0,0 +1,153 @@
+<cib crm_feature_set="3.17.4" validate-with="pacemaker-3.9" epoch="100" num_updates="0" admin_epoch="0" cib-last-written="Tue Jun 20 18:52:50 2023" update-origin="node1" update-client="cibadmin" update-user="root" have-quorum="1" dc-uuid="1">
+ <configuration>
+ <!-- The essential elements of this test are:
+ * An anonymous promotable clone resource (grp) is stopped on node1,
+ promoted on node2, and unpromoted on node3
+ * Clone instances are resource groups consisting of primitives rsc1 and
+ rsc2
+ * There are no location constraints or stickiness configured
+ * grp has the highest promotion score on node1 (scores for rsc1 + rsc2)
+
+ The following should happen:
+ * Instance grp:0 should remain started (unpromoted) on node3
+ * Instance grp:1 should demote on node2
+ * Instance grp:2 should promote on node1
+ -->
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.1.5-1.0a457786a.git.el9-0a457786a"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="test"/>
+ <nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1687288330"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="node1"/>
+ <node id="2" uname="node2"/>
+ <node id="3" uname="node3"/>
+ </nodes>
+ <resources>
+ <primitive class="stonith" id="Fencing" type="fence_xvm">
+ <instance_attributes id="Fencing-instance_attributes">
+ <nvpair id="Fencing-instance_attributes_pcmk_host_list" name="pcmk_host_list" value="node1 node2 node3"/>
+ </instance_attributes>
+ <operations>
+ <op id="Fencing-monitor-120s" interval="120s" name="monitor" timeout="120s"/>
+ </operations>
+ </primitive>
+ <clone id="grp-clone">
+ <group id="grp">
+ <primitive id="rsc1" class="ocf" type="Stateful" provider="pacemaker">
+ <operations>
+ <op name="demote" interval="0s" timeout="10s" id="rsc1-demote-interval-0s"/>
+ <op name="monitor" interval="10s" timeout="20s" role="Promoted" id="rsc1-monitor-interval-10s"/>
+ <op name="monitor" interval="11s" timeout="20s" role="Unpromoted" id="rsc1-monitor-interval-11s"/>
+ <op name="notify" interval="0s" timeout="5s" id="rsc1-notify-interval-0s"/>
+ <op name="promote" interval="0s" timeout="10s" id="rsc1-promote-interval-0s"/>
+ <op name="reload-agent" interval="0s" timeout="10s" id="rsc1-reload-agent-interval-0s"/>
+ <op name="start" interval="0s" timeout="20s" id="rsc1-start-interval-0s"/>
+ <op name="stop" interval="0s" timeout="20s" id="rsc1-stop-interval-0s"/>
+ </operations>
+ </primitive>
+ <primitive id="rsc2" class="ocf" type="Stateful" provider="pacemaker">
+ <operations>
+ <op name="demote" interval="0s" timeout="10s" id="rsc2-demote-interval-0s"/>
+ <op name="monitor" interval="10s" timeout="20s" role="Promoted" id="rsc2-monitor-interval-10s"/>
+ <op name="monitor" interval="11s" timeout="20s" role="Unpromoted" id="rsc2-monitor-interval-11s"/>
+ <op name="notify" interval="0s" timeout="5s" id="rsc2-notify-interval-0s"/>
+ <op name="promote" interval="0s" timeout="10s" id="rsc2-promote-interval-0s"/>
+ <op name="reload-agent" interval="0s" timeout="10s" id="rsc2-reload-agent-interval-0s"/>
+ <op name="start" interval="0s" timeout="20s" id="rsc2-start-interval-0s"/>
+ <op name="stop" interval="0s" timeout="20s" id="rsc2-stop-interval-0s"/>
+ </operations>
+ </primitive>
+ </group>
+ <meta_attributes id="grp-clone-meta_attributes">
+ <nvpair id="grp-clone-meta_attributes-promotable" name="promotable" value="true"/>
+ </meta_attributes>
+ </clone>
+ </resources>
+ <constraints/>
+ <op_defaults/>
+ <alerts/>
+ <rsc_defaults/>
+ </configuration>
+ <status>
+ <node_state id="3" uname="node3" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <transient_attributes id="3">
+ <instance_attributes id="status-3">
+ <nvpair id="status-3-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-3-master-rsc1" name="master-rsc1" value="5"/>
+ <nvpair id="status-3-master-rsc2" name="master-rsc2" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="3">
+ <lrm_resources>
+ <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="7:0:7:0773bc62-9ff2-42db-818f-c4f3a9e3993e" transition-magic="0:7;7:0:7:0773bc62-9ff2-42db-818f-c4f3a9e3993e" exit-reason="" on_node="node3" call-id="9" rc-code="7" op-status="0" interval="0" last-rc-change="1687312090" exec-time="3" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ </lrm_resource>
+ <lrm_resource id="rsc1" type="Stateful" class="ocf" provider="pacemaker">
+ <lrm_rsc_op id="rsc1_last_0" operation_key="rsc1_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="22:28:0:0773bc62-9ff2-42db-818f-c4f3a9e3993e" transition-magic="0:0;22:28:0:0773bc62-9ff2-42db-818f-c4f3a9e3993e" exit-reason="" on_node="node3" call-id="40" rc-code="0" op-status="0" interval="0" last-rc-change="1687312369" exec-time="21" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="rsc1_monitor_10000" operation_key="rsc1_monitor_10000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="10:16:8:0773bc62-9ff2-42db-818f-c4f3a9e3993e" transition-magic="0:8;10:16:8:0773bc62-9ff2-42db-818f-c4f3a9e3993e" exit-reason="" on_node="node3" call-id="31" rc-code="8" op-status="0" interval="10000" last-rc-change="1687312235" exec-time="16" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ <lrm_rsc_op id="rsc1_monitor_11000" operation_key="rsc1_monitor_11000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="8:29:0:0773bc62-9ff2-42db-818f-c4f3a9e3993e" transition-magic="0:0;8:29:0:0773bc62-9ff2-42db-818f-c4f3a9e3993e" exit-reason="" on_node="node3" call-id="41" rc-code="0" op-status="0" interval="11000" last-rc-change="1687312369" exec-time="14" queue-time="1" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ <lrm_resource id="rsc2" type="Stateful" class="ocf" provider="pacemaker">
+ <lrm_rsc_op id="rsc2_last_0" operation_key="rsc2_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="9:29:0:0773bc62-9ff2-42db-818f-c4f3a9e3993e" transition-magic="0:0;9:29:0:0773bc62-9ff2-42db-818f-c4f3a9e3993e" exit-reason="" on_node="node3" call-id="42" rc-code="0" op-status="0" interval="0" last-rc-change="1687312370" exec-time="21" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="rsc2_monitor_10000" operation_key="rsc2_monitor_10000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="15:17:8:0773bc62-9ff2-42db-818f-c4f3a9e3993e" transition-magic="0:8;15:17:8:0773bc62-9ff2-42db-818f-c4f3a9e3993e" exit-reason="" on_node="node3" call-id="33" rc-code="8" op-status="0" interval="10000" last-rc-change="1687312235" exec-time="10" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ <lrm_rsc_op id="rsc2_monitor_11000" operation_key="rsc2_monitor_11000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="11:30:0:0773bc62-9ff2-42db-818f-c4f3a9e3993e" transition-magic="0:0;11:30:0:0773bc62-9ff2-42db-818f-c4f3a9e3993e" exit-reason="" on_node="node3" call-id="43" rc-code="0" op-status="0" interval="11000" last-rc-change="1687312370" exec-time="13" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state id="1" uname="node1" in_ccm="true" crmd="online" crm-debug-origin="do_state_transition" join="member" expected="member">
+ <transient_attributes id="1">
+ <instance_attributes id="status-1">
+ <nvpair id="status-1-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-1-master-rsc1" name="master-rsc1" value="15"/>
+ <nvpair id="status-1-master-rsc2" name="master-rsc2" value="15"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="1">
+ <lrm_resources>
+ <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="1:0:7:0773bc62-9ff2-42db-818f-c4f3a9e3993e" transition-magic="0:7;1:0:7:0773bc62-9ff2-42db-818f-c4f3a9e3993e" exit-reason="" on_node="node1" call-id="9" rc-code="7" op-status="0" interval="0" last-rc-change="1687312091" exec-time="2" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ </lrm_resource>
+ <lrm_resource id="rsc1" type="Stateful" class="ocf" provider="pacemaker">
+ <lrm_rsc_op id="rsc1_last_0" operation_key="rsc1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="2:0:7:0773bc62-9ff2-42db-818f-c4f3a9e3993e" transition-magic="0:7;2:0:7:0773bc62-9ff2-42db-818f-c4f3a9e3993e" exit-reason="" on_node="node1" call-id="14" rc-code="7" op-status="0" interval="0" last-rc-change="1687312091" exec-time="15" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ <lrm_resource id="rsc2" type="Stateful" class="ocf" provider="pacemaker">
+ <lrm_rsc_op id="rsc2_last_0" operation_key="rsc2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="3:0:7:0773bc62-9ff2-42db-818f-c4f3a9e3993e" transition-magic="0:7;3:0:7:0773bc62-9ff2-42db-818f-c4f3a9e3993e" exit-reason="" on_node="node1" call-id="15" rc-code="7" op-status="0" interval="0" last-rc-change="1687312091" exec-time="18" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state id="2" uname="node2" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <transient_attributes id="2">
+ <instance_attributes id="status-2">
+ <nvpair id="status-2-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-2-master-rsc1" name="master-rsc1" value="10"/>
+ <nvpair id="status-2-master-rsc2" name="master-rsc2" value="10"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="2">
+ <lrm_resources>
+ <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="1:23:0:0773bc62-9ff2-42db-818f-c4f3a9e3993e" transition-magic="0:0;1:23:0:0773bc62-9ff2-42db-818f-c4f3a9e3993e" exit-reason="" on_node="node2" call-id="28" rc-code="0" op-status="0" interval="0" last-rc-change="1687312369" exec-time="38" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ <lrm_rsc_op id="Fencing_monitor_120000" operation_key="Fencing_monitor_120000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="2:23:0:0773bc62-9ff2-42db-818f-c4f3a9e3993e" transition-magic="0:0;2:23:0:0773bc62-9ff2-42db-818f-c4f3a9e3993e" exit-reason="" on_node="node2" call-id="30" rc-code="0" op-status="0" interval="120000" last-rc-change="1687312369" exec-time="30" queue-time="0" op-digest="24989640311980988fb77ddd1cc1002b"/>
+ </lrm_resource>
+ <lrm_resource id="rsc1" type="Stateful" class="ocf" provider="pacemaker">
+ <lrm_rsc_op id="rsc1_last_0" operation_key="rsc1_promote_0" operation="promote" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="6:25:0:0773bc62-9ff2-42db-818f-c4f3a9e3993e" transition-magic="0:0;6:25:0:0773bc62-9ff2-42db-818f-c4f3a9e3993e" exit-reason="" on_node="node2" call-id="32" rc-code="0" op-status="0" interval="0" last-rc-change="1687312369" exec-time="22" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="rsc1_monitor_11000" operation_key="rsc1_monitor_11000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="21:1:0:0773bc62-9ff2-42db-818f-c4f3a9e3993e" transition-magic="0:0;21:1:0:0773bc62-9ff2-42db-818f-c4f3a9e3993e" exit-reason="" on_node="node2" call-id="19" rc-code="0" op-status="0" interval="11000" last-rc-change="1687312091" exec-time="16" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ <lrm_rsc_op id="rsc1_monitor_10000" operation_key="rsc1_monitor_10000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="8:26:8:0773bc62-9ff2-42db-818f-c4f3a9e3993e" transition-magic="0:8;8:26:8:0773bc62-9ff2-42db-818f-c4f3a9e3993e" exit-reason="" on_node="node2" call-id="33" rc-code="8" op-status="0" interval="10000" last-rc-change="1687312369" exec-time="13" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ <lrm_resource id="rsc2" type="Stateful" class="ocf" provider="pacemaker">
+ <lrm_rsc_op id="rsc2_last_0" operation_key="rsc2_promote_0" operation="promote" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="11:26:0:0773bc62-9ff2-42db-818f-c4f3a9e3993e" transition-magic="0:0;11:26:0:0773bc62-9ff2-42db-818f-c4f3a9e3993e" exit-reason="" on_node="node2" call-id="34" rc-code="0" op-status="0" interval="0" last-rc-change="1687312369" exec-time="25" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="rsc2_monitor_11000" operation_key="rsc2_monitor_11000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.17.4" transition-key="25:2:0:0773bc62-9ff2-42db-818f-c4f3a9e3993e" transition-magic="0:0;25:2:0:0773bc62-9ff2-42db-818f-c4f3a9e3993e" exit-reason="" on_node="node2" call-id="21" rc-code="0" op-status="0" interval="11000" last-rc-change="1687312091" exec-time="16" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ <lrm_rsc_op id="rsc2_monitor_10000" operation_key="rsc2_monitor_10000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="13:27:8:0773bc62-9ff2-42db-818f-c4f3a9e3993e" transition-magic="0:8;13:27:8:0773bc62-9ff2-42db-818f-c4f3a9e3993e" exit-reason="" on_node="node2" call-id="35" rc-code="8" op-status="0" interval="10000" last-rc-change="1687312369" exec-time="12" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ </status>
+</cib>
diff --git a/cts/scheduler/xml/clone-recover-no-shuffle-9.xml b/cts/scheduler/xml/clone-recover-no-shuffle-9.xml
new file mode 100644
index 0000000..104331d
--- /dev/null
+++ b/cts/scheduler/xml/clone-recover-no-shuffle-9.xml
@@ -0,0 +1,186 @@
+<cib crm_feature_set="3.17.4" validate-with="pacemaker-3.9" epoch="128" num_updates="90" admin_epoch="0" cib-last-written="Tue Jun 20 23:33:00 2023" update-origin="node1" update-client="cibadmin" update-user="root" have-quorum="1" dc-uuid="2">
+ <configuration>
+ <!-- The essential elements of this test are:
+ * A promotable bundle resource (base-bundle) is stopped on node1,
+ promoted on node2, and unpromoted on node3
+ * Clone instances are primitives (base), the instances of base-bundle's
+ child resource
+ * There are no location constraints or stickiness configured
+ * base has the highest promotion score on node1
+
+ The following should happen:
+ * Instance base:0 should remain started (unpromoted) on node3
+ * Instance base:1 should demote on node2
+ * Instance base:2 should promote on node1
+ -->
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.1.5-1.0a457786a.git.el9-0a457786a"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="test"/>
+ <nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1687288330"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="node1"/>
+ <node id="2" uname="node2"/>
+ <node id="3" uname="node3"/>
+ </nodes>
+ <resources>
+ <primitive class="stonith" id="Fencing" type="fence_xvm">
+ <instance_attributes id="Fencing-instance_attributes">
+ <nvpair id="Fencing-instance_attributes_pcmk_host_list" name="pcmk_host_list" value="node1 node2 node3"/>
+ </instance_attributes>
+ <operations>
+ <op id="Fencing-monitor-120s" interval="120s" name="monitor" timeout="120s"/>
+ </operations>
+ </primitive>
+ <bundle id="base-bundle">
+ <meta_attributes id="base-bundle-meta_attributes">
+ <nvpair id="base-bundle-meta_attributes-promotable" name="promotable" value="true"/>
+ <nvpair id="base-bundle-meta_attributes-container-attribute-target" name="container-attribute-target" value="host"/>
+ </meta_attributes>
+ <podman image="localhost/pcmktest" replicas="3"/>
+ <network control-port="3121"/>
+ <primitive id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <operations>
+ <op id="base-monitor-15s" interval="15s" name="monitor" timeout="15s" role="Promoted"/>
+ <op id="base-monitor-16s" interval="16s" name="monitor" timeout="16s" role="Unpromoted"/>
+ </operations>
+ </primitive>
+ </bundle>
+ </resources>
+ <constraints/>
+ <op_defaults/>
+ <alerts/>
+ <rsc_defaults/>
+ </configuration>
+ <status>
+ <node_state id="1" uname="node1" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <transient_attributes id="1">
+ <instance_attributes id="status-1">
+ <nvpair id="status-1-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-1-master-base" name="master-base" value="15"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="1">
+ <lrm_resources>
+ <lrm_resource id="Fencing" class="stonith" type="fence_xvm">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="1:0:7:159e0708-f49f-4071-a9f4-93c48dc468d2" transition-magic="0:7;1:0:7:159e0708-f49f-4071-a9f4-93c48dc468d2" exit-reason="" on_node="node1" call-id="8" rc-code="7" op-status="0" interval="0" last-rc-change="1687329223" exec-time="3" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-0" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-0_last_0" operation_key="base-bundle-podman-0_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="2:0:7:159e0708-f49f-4071-a9f4-93c48dc468d2" transition-magic="0:7;2:0:7:159e0708-f49f-4071-a9f4-93c48dc468d2" exit-reason="" on_node="node1" call-id="12" rc-code="7" op-status="0" interval="0" last-rc-change="1687329223" exec-time="109" queue-time="0" op-digest="b93578750303776f6bb098395c53338a"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-1" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-1_last_0" operation_key="base-bundle-podman-1_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="4:0:7:159e0708-f49f-4071-a9f4-93c48dc468d2" transition-magic="0:7;4:0:7:159e0708-f49f-4071-a9f4-93c48dc468d2" exit-reason="" on_node="node1" call-id="13" rc-code="7" op-status="0" interval="0" last-rc-change="1687329223" exec-time="99" queue-time="0" op-digest="91ec5f49c5fd88f29405389545fb7d59"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-2" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-2_last_0" operation_key="base-bundle-podman-2_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="6:0:7:159e0708-f49f-4071-a9f4-93c48dc468d2" transition-magic="0:7;6:0:7:159e0708-f49f-4071-a9f4-93c48dc468d2" exit-reason="" on_node="node1" call-id="17" rc-code="7" op-status="0" interval="0" last-rc-change="1687329223" exec-time="47" queue-time="0" op-digest="eb1c148b6d2d7a7013f75d596536409b"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-1" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-1">
+ <lrm_rsc_op id="base-bundle-1_last_0" operation_key="base-bundle-1_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="5:0:7:159e0708-f49f-4071-a9f4-93c48dc468d2" transition-magic="0:7;5:0:7:159e0708-f49f-4071-a9f4-93c48dc468d2" exit-reason="" on_node="node1" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1687329224" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-0" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-0">
+ <lrm_rsc_op id="base-bundle-0_last_0" operation_key="base-bundle-0_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="3:0:7:159e0708-f49f-4071-a9f4-93c48dc468d2" transition-magic="0:7;3:0:7:159e0708-f49f-4071-a9f4-93c48dc468d2" exit-reason="" on_node="node1" call-id="2" rc-code="7" op-status="0" interval="0" last-rc-change="1687329224" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state id="3" uname="node3" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <lrm id="3">
+ <lrm_resources>
+ <lrm_resource id="Fencing" class="stonith" type="fence_xvm">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="15:0:7:159e0708-f49f-4071-a9f4-93c48dc468d2" transition-magic="0:7;15:0:7:159e0708-f49f-4071-a9f4-93c48dc468d2" exit-reason="" on_node="node3" call-id="8" rc-code="7" op-status="0" interval="0" last-rc-change="1687329222" exec-time="1" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-0" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-0_last_0" operation_key="base-bundle-podman-0_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="24:0:0:159e0708-f49f-4071-a9f4-93c48dc468d2" transition-magic="0:0;24:0:0:159e0708-f49f-4071-a9f4-93c48dc468d2" exit-reason="" on_node="node3" call-id="18" rc-code="0" op-status="0" interval="0" last-rc-change="1687329222" exec-time="863" queue-time="0" op-digest="b93578750303776f6bb098395c53338a"/>
+ <lrm_rsc_op id="base-bundle-podman-0_monitor_60000" operation_key="base-bundle-podman-0_monitor_60000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="25:0:0:159e0708-f49f-4071-a9f4-93c48dc468d2" transition-magic="0:0;25:0:0:159e0708-f49f-4071-a9f4-93c48dc468d2" exit-reason="" on_node="node3" call-id="20" rc-code="0" op-status="0" interval="60000" last-rc-change="1687329223" exec-time="164" queue-time="0" op-digest="902512fcf3e4556d9585c44184665d8c"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-1" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-1_last_0" operation_key="base-bundle-podman-1_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="18:0:7:159e0708-f49f-4071-a9f4-93c48dc468d2" transition-magic="0:7;18:0:7:159e0708-f49f-4071-a9f4-93c48dc468d2" exit-reason="" on_node="node3" call-id="13" rc-code="7" op-status="0" interval="0" last-rc-change="1687329222" exec-time="108" queue-time="0" op-digest="91ec5f49c5fd88f29405389545fb7d59"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-2" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-2_last_0" operation_key="base-bundle-podman-2_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="20:0:7:159e0708-f49f-4071-a9f4-93c48dc468d2" transition-magic="0:7;20:0:7:159e0708-f49f-4071-a9f4-93c48dc468d2" exit-reason="" on_node="node3" call-id="17" rc-code="7" op-status="0" interval="0" last-rc-change="1687329222" exec-time="59" queue-time="0" op-digest="eb1c148b6d2d7a7013f75d596536409b"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-1" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-1">
+ <lrm_rsc_op id="base-bundle-1_last_0" operation_key="base-bundle-1_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="19:0:7:159e0708-f49f-4071-a9f4-93c48dc468d2" transition-magic="0:7;19:0:7:159e0708-f49f-4071-a9f4-93c48dc468d2" exit-reason="" on_node="node3" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1687329223" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-0" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-0">
+ <lrm_rsc_op id="base-bundle-0_last_0" operation_key="base-bundle-0_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="26:0:0:159e0708-f49f-4071-a9f4-93c48dc468d2" transition-magic="0:0;26:0:0:159e0708-f49f-4071-a9f4-93c48dc468d2" exit-reason="" on_node="node3" call-id="3" rc-code="0" op-status="0" interval="0" last-rc-change="1687329223" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ <lrm_rsc_op id="base-bundle-0_monitor_30000" operation_key="base-bundle-0_monitor_30000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="15:1:0:159e0708-f49f-4071-a9f4-93c48dc468d2" transition-magic="0:0;15:1:0:159e0708-f49f-4071-a9f4-93c48dc468d2" exit-reason="" on_node="node3" call-id="4" rc-code="0" op-status="0" interval="30000" last-rc-change="1687329226" exec-time="0" queue-time="0" op-digest="354b9acaa7ea1113d708dc11a1d6bbfa"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="3">
+ <instance_attributes id="status-3">
+ <nvpair id="status-3-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-3-master-base" name="master-base" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state id="2" uname="node2" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <transient_attributes id="2">
+ <instance_attributes id="status-2">
+ <nvpair id="status-2-.feature-set" name="#feature-set" value="3.17.4"/>
+ <nvpair id="status-2-master-base" name="master-base" value="10"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="2">
+ <lrm_resources>
+ <lrm_resource id="Fencing" class="stonith" type="fence_xvm">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="22:0:0:159e0708-f49f-4071-a9f4-93c48dc468d2" transition-magic="0:0;22:0:0:159e0708-f49f-4071-a9f4-93c48dc468d2" exit-reason="" on_node="node2" call-id="14" rc-code="0" op-status="0" interval="0" last-rc-change="1687329223" exec-time="34" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
+ <lrm_rsc_op id="Fencing_monitor_120000" operation_key="Fencing_monitor_120000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="23:0:0:159e0708-f49f-4071-a9f4-93c48dc468d2" transition-magic="0:0;23:0:0:159e0708-f49f-4071-a9f4-93c48dc468d2" exit-reason="" on_node="node2" call-id="19" rc-code="0" op-status="0" interval="120000" last-rc-change="1687329223" exec-time="36" queue-time="0" op-digest="24989640311980988fb77ddd1cc1002b"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-0" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-0_last_0" operation_key="base-bundle-podman-0_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="9:0:7:159e0708-f49f-4071-a9f4-93c48dc468d2" transition-magic="0:7;9:0:7:159e0708-f49f-4071-a9f4-93c48dc468d2" exit-reason="" on_node="node2" call-id="12" rc-code="7" op-status="0" interval="0" last-rc-change="1687329223" exec-time="60" queue-time="0" op-digest="b93578750303776f6bb098395c53338a"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-1" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-1_last_0" operation_key="base-bundle-podman-1_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="28:0:0:159e0708-f49f-4071-a9f4-93c48dc468d2" transition-magic="0:0;28:0:0:159e0708-f49f-4071-a9f4-93c48dc468d2" exit-reason="" on_node="node2" call-id="20" rc-code="0" op-status="0" interval="0" last-rc-change="1687329223" exec-time="791" queue-time="0" op-digest="91ec5f49c5fd88f29405389545fb7d59"/>
+ <lrm_rsc_op id="base-bundle-podman-1_monitor_60000" operation_key="base-bundle-podman-1_monitor_60000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="29:0:0:159e0708-f49f-4071-a9f4-93c48dc468d2" transition-magic="0:0;29:0:0:159e0708-f49f-4071-a9f4-93c48dc468d2" exit-reason="" on_node="node2" call-id="21" rc-code="0" op-status="0" interval="60000" last-rc-change="1687329224" exec-time="144" queue-time="0" op-digest="f0ef4729d120aa3f5d938cabca4d06c7"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-podman-2" class="ocf" provider="heartbeat" type="podman">
+ <lrm_rsc_op id="base-bundle-podman-2_last_0" operation_key="base-bundle-podman-2_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="13:0:7:159e0708-f49f-4071-a9f4-93c48dc468d2" transition-magic="0:7;13:0:7:159e0708-f49f-4071-a9f4-93c48dc468d2" exit-reason="" on_node="node2" call-id="18" rc-code="7" op-status="0" interval="0" last-rc-change="1687329223" exec-time="68" queue-time="0" op-digest="eb1c148b6d2d7a7013f75d596536409b"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-1" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-1">
+ <lrm_rsc_op id="base-bundle-1_last_0" operation_key="base-bundle-1_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="30:0:0:159e0708-f49f-4071-a9f4-93c48dc468d2" transition-magic="0:0;30:0:0:159e0708-f49f-4071-a9f4-93c48dc468d2" exit-reason="" on_node="node2" call-id="2" rc-code="0" op-status="0" interval="0" last-rc-change="1687329224" exec-time="0" queue-time="0" op-digest="137e2067e195bc8e29ef8680e392a9e7" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ <lrm_rsc_op id="base-bundle-1_monitor_30000" operation_key="base-bundle-1_monitor_30000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="20:1:0:159e0708-f49f-4071-a9f4-93c48dc468d2" transition-magic="0:0;20:1:0:159e0708-f49f-4071-a9f4-93c48dc468d2" exit-reason="" on_node="node2" call-id="4" rc-code="0" op-status="0" interval="30000" last-rc-change="1687329227" exec-time="0" queue-time="0" op-digest="3929eec440004bca31f813a8e6097506"/>
+ </lrm_resource>
+ <lrm_resource id="base-bundle-0" class="ocf" provider="pacemaker" type="remote" container="base-bundle-podman-0">
+ <lrm_rsc_op id="base-bundle-0_last_0" operation_key="base-bundle-0_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="10:0:7:159e0708-f49f-4071-a9f4-93c48dc468d2" transition-magic="0:7;10:0:7:159e0708-f49f-4071-a9f4-93c48dc468d2" exit-reason="" on_node="node2" call-id="3" rc-code="7" op-status="0" interval="0" last-rc-change="1687329224" exec-time="0" queue-time="0" op-digest="b1f9a03e78ae75f94cc0c36e6c0cc527" op-force-restart=" port server " op-restart-digest="0801b3aee927c91149627bd5d5d7fb35"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state remote_node="true" id="base-bundle-1" uname="base-bundle-1" in_ccm="true" crm-debug-origin="controld_update_resource_history" node_fenced="0">
+ <lrm id="base-bundle-1">
+ <lrm_resources>
+ <lrm_resource id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="base_last_0" operation_key="base_promote_0" operation="promote" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="29:4:0:159e0708-f49f-4071-a9f4-93c48dc468d2" transition-magic="0:0;29:4:0:159e0708-f49f-4071-a9f4-93c48dc468d2" exit-reason="" on_node="node2" call-id="20" rc-code="0" op-status="0" interval="0" last-rc-change="1687329377" exec-time="307" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="base_monitor_15000" operation_key="base_monitor_15000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="30:5:8:159e0708-f49f-4071-a9f4-93c48dc468d2" transition-magic="0:8;30:5:8:159e0708-f49f-4071-a9f4-93c48dc468d2" exit-reason="" on_node="node2" call-id="27" rc-code="8" op-status="0" interval="15000" last-rc-change="1687329377" exec-time="7" queue-time="0" op-digest="3ef575c5f050ae086f0f31bc8f085fdc"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="base-bundle-1">
+ <instance_attributes id="status-base-bundle-1">
+ <nvpair id="status-base-bundle-1-master-base" name="master-base" value="10"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state remote_node="true" id="base-bundle-0" uname="base-bundle-0" in_ccm="true" crm-debug-origin="controld_update_resource_history" node_fenced="0">
+ <lrm id="base-bundle-0">
+ <lrm_resources>
+ <lrm_resource id="base" class="ocf" provider="pacemaker" type="Stateful">
+ <lrm_rsc_op id="base_last_0" operation_key="base_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="27:1:0:159e0708-f49f-4071-a9f4-93c48dc468d2" transition-magic="0:0;27:1:0:159e0708-f49f-4071-a9f4-93c48dc468d2" exit-reason="" on_node="node3" call-id="8" rc-code="0" op-status="0" interval="0" last-rc-change="1687329226" exec-time="269" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="base_monitor_16000" operation_key="base_monitor_16000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="27:2:0:159e0708-f49f-4071-a9f4-93c48dc468d2" transition-magic="0:0;27:2:0:159e0708-f49f-4071-a9f4-93c48dc468d2" exit-reason="" on_node="node3" call-id="14" rc-code="0" op-status="0" interval="16000" last-rc-change="1687329226" exec-time="13" queue-time="0" op-digest="e62ad827732488b88ef8722ff6a484b3"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="base-bundle-0">
+ <instance_attributes id="status-base-bundle-0">
+ <nvpair id="status-base-bundle-0-master-base" name="master-base" value="5"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ </status>
+</cib>
diff --git a/cts/scheduler/xml/coloc-with-inner-group-member.xml b/cts/scheduler/xml/coloc-with-inner-group-member.xml
new file mode 100644
index 0000000..c07edec
--- /dev/null
+++ b/cts/scheduler/xml/coloc-with-inner-group-member.xml
@@ -0,0 +1,258 @@
+<cib crm_feature_set="3.17.4" validate-with="pacemaker-3.0" epoch="78" num_updates="0" admin_epoch="0" cib-last-written="Tue Jun 20 15:45:06 2023" update-origin="rhel8-1" update-client="cibadmin" update-user="root" have-quorum="true" dc-uuid="2" execution-date="1687293906">
+ <!-- The essential elements of this test are:
+ * A group (grp) has three members (foo, bar, and vip).
+ * The last group member (vip) is colocated with a separate primitive
+ (vip-dep).
+ * The primitive and the last group member are active on the same node
+ (rhel8-3), while the first two group members are active on a different
+ node (rhel8-4).
+
+ In this situation, the first two group members should move to the
+ primitive's node.
+ -->
+ <configuration>
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cts-stonith-enabled" name="stonith-enabled" value="1"/>
+ <nvpair id="cts-start-failure-is-fatal" name="start-failure-is-fatal" value="false"/>
+ <nvpair id="cts-pe-input-series-max" name="pe-input-series-max" value="5000"/>
+ <nvpair id="cts-shutdown-escalation" name="shutdown-escalation" value="5min"/>
+ <nvpair id="cts-batch-limit" name="batch-limit" value="10"/>
+ <nvpair id="cts-dc-deadtime" name="dc-deadtime" value="5s"/>
+ <nvpair id="cts-no-quorum-policy" name="no-quorum-policy" value="stop"/>
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.1.6-1202.32f7557415.git.el8-32f7557415"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="rhel8-lab"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="5" uname="rhel8-5">
+ <instance_attributes id="rhel8-5-1">
+ <nvpair id="rhel8-5-1-cts-fencing" name="cts-fencing" value="levels-and"/>
+ </instance_attributes>
+ </node>
+ <node id="1" uname="rhel8-1"/>
+ <node id="3" uname="rhel8-3"/>
+ <node id="4" uname="rhel8-4"/>
+ <node id="2" uname="rhel8-2"/>
+ </nodes>
+ <resources>
+ <primitive class="stonith" id="Fencing" type="fence_xvm">
+ <meta_attributes id="Fencing-meta">
+ <nvpair id="Fencing-migration-threshold" name="migration-threshold" value="5"/>
+ </meta_attributes>
+ <instance_attributes id="Fencing-params">
+ <nvpair id="Fencing-pcmk_host_map" name="pcmk_host_map" value="remote-rhel8-1:rhel8-1;remote-rhel8-2:rhel8-2;remote-rhel8-3:rhel8-3;remote-rhel8-4:rhel8-4;remote-rhel8-5:rhel8-5;"/>
+ <nvpair id="Fencing-key_file" name="key_file" value="/etc/pacemaker/fence_xvm.key"/>
+ <nvpair id="Fencing-multicast_address" name="multicast_address" value="239.255.100.100"/>
+ <nvpair id="Fencing-pcmk_host_list" name="pcmk_host_list" value="rhel8-1 remote-rhel8-1 rhel8-2 remote-rhel8-2 rhel8-3 remote-rhel8-3 rhel8-4 remote-rhel8-4 rhel8-5 remote-rhel8-5"/>
+ </instance_attributes>
+ <operations>
+ <op id="Fencing-monitor-120s" interval="120s" name="monitor" timeout="120s"/>
+ <op id="Fencing-stop-0" interval="0" name="stop" timeout="60s"/>
+ <op id="Fencing-start-0" interval="0" name="start" timeout="60s"/>
+ </operations>
+ </primitive>
+ <primitive class="ocf" id="vip-dep" provider="pacemaker" type="Dummy">
+ <operations>
+ <op id="vip-dep-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="20s"/>
+ <op id="vip-dep-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="20s"/>
+ <op id="vip-dep-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/>
+ <op id="vip-dep-reload-interval-0s" interval="0s" name="reload" timeout="20s"/>
+ <op id="vip-dep-reload-agent-interval-0s" interval="0s" name="reload-agent" timeout="20s"/>
+ <op id="vip-dep-start-interval-0s" interval="0s" name="start" timeout="20s"/>
+ <op id="vip-dep-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
+ </operations>
+ </primitive>
+ <group id="grp">
+ <primitive class="ocf" id="foo" provider="pacemaker" type="Dummy">
+ <operations>
+ <op id="foo-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="20s"/>
+ <op id="foo-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="20s"/>
+ <op id="foo-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/>
+ <op id="foo-reload-interval-0s" interval="0s" name="reload" timeout="20s"/>
+ <op id="foo-reload-agent-interval-0s" interval="0s" name="reload-agent" timeout="20s"/>
+ <op id="foo-start-interval-0s" interval="0s" name="start" timeout="20s"/>
+ <op id="foo-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
+ </operations>
+ </primitive>
+ <primitive class="ocf" id="bar" provider="pacemaker" type="Dummy">
+ <operations>
+ <op id="bar-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="20s"/>
+ <op id="bar-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="20s"/>
+ <op id="bar-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/>
+ <op id="bar-reload-interval-0s" interval="0s" name="reload" timeout="20s"/>
+ <op id="bar-reload-agent-interval-0s" interval="0s" name="reload-agent" timeout="20s"/>
+ <op id="bar-start-interval-0s" interval="0s" name="start" timeout="20s"/>
+ <op id="bar-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
+ </operations>
+ </primitive>
+ <primitive class="ocf" id="vip" provider="pacemaker" type="Dummy">
+ <operations>
+ <op id="vip-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="20s"/>
+ <op id="vip-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="20s"/>
+ <op id="vip-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/>
+ <op id="vip-reload-interval-0s" interval="0s" name="reload" timeout="20s"/>
+ <op id="vip-reload-agent-interval-0s" interval="0s" name="reload-agent" timeout="20s"/>
+ <op id="vip-start-interval-0s" interval="0s" name="start" timeout="20s"/>
+ <op id="vip-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
+ </operations>
+ </primitive>
+ </group>
+ </resources>
+ <constraints>
+ <rsc_order first="vip-dep" first-action="start" id="order-vip-dep-vip-mandatory" then="vip" then-action="start"/>
+ <rsc_colocation id="colocation-vip-vip-dep-INFINITY" rsc="vip" score="INFINITY" with-rsc="vip-dep"/>
+ </constraints>
+ <fencing-topology/>
+ <op_defaults>
+ <meta_attributes id="cts-op_defaults-meta">
+ <nvpair id="cts-op_defaults-timeout" name="timeout" value="90s"/>
+ </meta_attributes>
+ </op_defaults>
+ <alerts>
+ <alert id="alert-1" path="/var/lib/pacemaker/notify.sh">
+ <recipient id="alert-1-recipient-1" value="/run/crm/alert.log"/>
+ </alert>
+ </alerts>
+ </configuration>
+ <status>
+ <node_state id="2" uname="rhel8-2" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <lrm id="2">
+ <lrm_resources>
+ <lrm_resource id="Fencing" class="stonith" type="fence_xvm">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="4:0:7:72d50bf3-3ecf-4bdb-af9c-fd66cdae2841" transition-magic="0:7;4:0:7:72d50bf3-3ecf-4bdb-af9c-fd66cdae2841" exit-reason="" on_node="rhel8-2" call-id="9" rc-code="7" op-status="0" interval="0" last-rc-change="1687293860" exec-time="1" queue-time="0" op-digest="bf974d77f2d4d33e434be1f89e362a52"/>
+ </lrm_resource>
+ <lrm_resource id="vip-dep" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="vip-dep_last_0" operation_key="vip-dep_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="3:1:7:72d50bf3-3ecf-4bdb-af9c-fd66cdae2841" transition-magic="0:7;3:1:7:72d50bf3-3ecf-4bdb-af9c-fd66cdae2841" exit-reason="" on_node="rhel8-2" call-id="21" rc-code="7" op-status="0" interval="0" last-rc-change="1687293879" exec-time="25" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params=" passwd " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ <lrm_resource id="vip" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="vip_last_0" operation_key="vip_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="4:2:7:72d50bf3-3ecf-4bdb-af9c-fd66cdae2841" transition-magic="0:7;4:2:7:72d50bf3-3ecf-4bdb-af9c-fd66cdae2841" exit-reason="" on_node="rhel8-2" call-id="25" rc-code="7" op-status="0" interval="0" last-rc-change="1687293885" exec-time="36" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params=" passwd " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ <lrm_resource id="foo" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="foo_last_0" operation_key="foo_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="5:5:7:72d50bf3-3ecf-4bdb-af9c-fd66cdae2841" transition-magic="0:7;5:5:7:72d50bf3-3ecf-4bdb-af9c-fd66cdae2841" exit-reason="" on_node="rhel8-2" call-id="29" rc-code="7" op-status="0" interval="0" last-rc-change="1687293893" exec-time="36" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params=" passwd " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ <lrm_resource id="bar" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="bar_last_0" operation_key="bar_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="6:6:7:72d50bf3-3ecf-4bdb-af9c-fd66cdae2841" transition-magic="0:7;6:6:7:72d50bf3-3ecf-4bdb-af9c-fd66cdae2841" exit-reason="" on_node="rhel8-2" call-id="33" rc-code="7" op-status="0" interval="0" last-rc-change="1687293894" exec-time="30" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params=" passwd " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="2">
+ <instance_attributes id="status-2">
+ <nvpair id="status-2-.feature-set" name="#feature-set" value="3.17.4"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state id="5" uname="rhel8-5" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <lrm id="5">
+ <lrm_resources>
+ <lrm_resource id="Fencing" class="stonith" type="fence_xvm">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="13:0:7:72d50bf3-3ecf-4bdb-af9c-fd66cdae2841" transition-magic="0:7;13:0:7:72d50bf3-3ecf-4bdb-af9c-fd66cdae2841" exit-reason="" on_node="rhel8-5" call-id="9" rc-code="7" op-status="0" interval="0" last-rc-change="1687293860" exec-time="1" queue-time="0" op-digest="bf974d77f2d4d33e434be1f89e362a52"/>
+ </lrm_resource>
+ <lrm_resource id="vip-dep" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="vip-dep_last_0" operation_key="vip-dep_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="6:1:7:72d50bf3-3ecf-4bdb-af9c-fd66cdae2841" transition-magic="0:7;6:1:7:72d50bf3-3ecf-4bdb-af9c-fd66cdae2841" exit-reason="" on_node="rhel8-5" call-id="21" rc-code="7" op-status="0" interval="0" last-rc-change="1687293879" exec-time="27" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params=" passwd " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ <lrm_resource id="vip" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="vip_last_0" operation_key="vip_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="7:2:7:72d50bf3-3ecf-4bdb-af9c-fd66cdae2841" transition-magic="0:7;7:2:7:72d50bf3-3ecf-4bdb-af9c-fd66cdae2841" exit-reason="" on_node="rhel8-5" call-id="25" rc-code="7" op-status="0" interval="0" last-rc-change="1687293885" exec-time="39" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params=" passwd " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ <lrm_resource id="foo" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="foo_last_0" operation_key="foo_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="8:5:7:72d50bf3-3ecf-4bdb-af9c-fd66cdae2841" transition-magic="0:7;8:5:7:72d50bf3-3ecf-4bdb-af9c-fd66cdae2841" exit-reason="" on_node="rhel8-5" call-id="29" rc-code="7" op-status="0" interval="0" last-rc-change="1687293893" exec-time="40" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params=" passwd " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ <lrm_resource id="bar" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="bar_last_0" operation_key="bar_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="9:6:7:72d50bf3-3ecf-4bdb-af9c-fd66cdae2841" transition-magic="0:7;9:6:7:72d50bf3-3ecf-4bdb-af9c-fd66cdae2841" exit-reason="" on_node="rhel8-5" call-id="33" rc-code="7" op-status="0" interval="0" last-rc-change="1687293894" exec-time="32" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params=" passwd " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="5">
+ <instance_attributes id="status-5">
+ <nvpair id="status-5-.feature-set" name="#feature-set" value="3.17.4"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state id="1" uname="rhel8-1" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <lrm id="1">
+ <lrm_resources>
+ <lrm_resource id="Fencing" class="stonith" type="fence_xvm">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="16:0:0:72d50bf3-3ecf-4bdb-af9c-fd66cdae2841" transition-magic="0:0;16:0:0:72d50bf3-3ecf-4bdb-af9c-fd66cdae2841" exit-reason="" on_node="rhel8-1" call-id="14" rc-code="0" op-status="0" interval="0" last-rc-change="1687293860" exec-time="52" queue-time="0" op-digest="bf974d77f2d4d33e434be1f89e362a52"/>
+ <lrm_rsc_op id="Fencing_monitor_120000" operation_key="Fencing_monitor_120000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="17:0:0:72d50bf3-3ecf-4bdb-af9c-fd66cdae2841" transition-magic="0:0;17:0:0:72d50bf3-3ecf-4bdb-af9c-fd66cdae2841" exit-reason="" on_node="rhel8-1" call-id="16" rc-code="0" op-status="0" interval="120000" last-rc-change="1687293860" exec-time="35" queue-time="0" op-digest="24c9c9364f847dcb857d6fb4e1b4d3c8"/>
+ </lrm_resource>
+ <lrm_resource id="vip-dep" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="vip-dep_last_0" operation_key="vip-dep_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="2:1:7:72d50bf3-3ecf-4bdb-af9c-fd66cdae2841" transition-magic="0:7;2:1:7:72d50bf3-3ecf-4bdb-af9c-fd66cdae2841" exit-reason="" on_node="rhel8-1" call-id="25" rc-code="7" op-status="0" interval="0" last-rc-change="1687293879" exec-time="57" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params=" passwd " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ <lrm_resource id="vip" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="vip_last_0" operation_key="vip_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="3:2:7:72d50bf3-3ecf-4bdb-af9c-fd66cdae2841" transition-magic="0:7;3:2:7:72d50bf3-3ecf-4bdb-af9c-fd66cdae2841" exit-reason="" on_node="rhel8-1" call-id="29" rc-code="7" op-status="0" interval="0" last-rc-change="1687293885" exec-time="34" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params=" passwd " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ <lrm_resource id="foo" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="foo_last_0" operation_key="foo_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="4:5:7:72d50bf3-3ecf-4bdb-af9c-fd66cdae2841" transition-magic="0:7;4:5:7:72d50bf3-3ecf-4bdb-af9c-fd66cdae2841" exit-reason="" on_node="rhel8-1" call-id="33" rc-code="7" op-status="0" interval="0" last-rc-change="1687293893" exec-time="62" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params=" passwd " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ <lrm_resource id="bar" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="bar_last_0" operation_key="bar_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="5:6:7:72d50bf3-3ecf-4bdb-af9c-fd66cdae2841" transition-magic="0:7;5:6:7:72d50bf3-3ecf-4bdb-af9c-fd66cdae2841" exit-reason="" on_node="rhel8-1" call-id="37" rc-code="7" op-status="0" interval="0" last-rc-change="1687293894" exec-time="70" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params=" passwd " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="1">
+ <instance_attributes id="status-1">
+ <nvpair id="status-1-.feature-set" name="#feature-set" value="3.17.4"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state id="3" uname="rhel8-3" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <lrm id="3">
+ <lrm_resources>
+ <lrm_resource id="Fencing" class="stonith" type="fence_xvm">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="7:0:7:72d50bf3-3ecf-4bdb-af9c-fd66cdae2841" transition-magic="0:7;7:0:7:72d50bf3-3ecf-4bdb-af9c-fd66cdae2841" exit-reason="" on_node="rhel8-3" call-id="9" rc-code="7" op-status="0" interval="0" last-rc-change="1687293860" exec-time="1" queue-time="0" op-digest="bf974d77f2d4d33e434be1f89e362a52"/>
+ </lrm_resource>
+ <lrm_resource id="vip-dep" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="vip-dep_last_0" operation_key="vip-dep_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="11:1:0:72d50bf3-3ecf-4bdb-af9c-fd66cdae2841" transition-magic="0:0;11:1:0:72d50bf3-3ecf-4bdb-af9c-fd66cdae2841" exit-reason="" on_node="rhel8-3" call-id="22" rc-code="0" op-status="0" interval="0" last-rc-change="1687293879" exec-time="19" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params=" passwd " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="vip-dep_monitor_10000" operation_key="vip-dep_monitor_10000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="12:1:0:72d50bf3-3ecf-4bdb-af9c-fd66cdae2841" transition-magic="0:0;12:1:0:72d50bf3-3ecf-4bdb-af9c-fd66cdae2841" exit-reason="" on_node="rhel8-3" call-id="24" rc-code="0" op-status="0" interval="10000" last-rc-change="1687293879" exec-time="18" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd" op-secure-params=" passwd " op-secure-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ <lrm_resource id="vip" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="vip_last_0" operation_key="vip_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="11:4:0:72d50bf3-3ecf-4bdb-af9c-fd66cdae2841" transition-magic="0:0;11:4:0:72d50bf3-3ecf-4bdb-af9c-fd66cdae2841" exit-reason="" on_node="rhel8-3" call-id="30" rc-code="0" op-status="0" interval="0" last-rc-change="1687293893" exec-time="20" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params=" passwd " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="vip_monitor_10000" operation_key="vip_monitor_10000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="12:4:0:72d50bf3-3ecf-4bdb-af9c-fd66cdae2841" transition-magic="0:0;12:4:0:72d50bf3-3ecf-4bdb-af9c-fd66cdae2841" exit-reason="" on_node="rhel8-3" call-id="32" rc-code="0" op-status="0" interval="10000" last-rc-change="1687293893" exec-time="20" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd" op-secure-params=" passwd " op-secure-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ <lrm_resource id="foo" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="foo_last_0" operation_key="foo_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="6:5:7:72d50bf3-3ecf-4bdb-af9c-fd66cdae2841" transition-magic="0:7;6:5:7:72d50bf3-3ecf-4bdb-af9c-fd66cdae2841" exit-reason="" on_node="rhel8-3" call-id="37" rc-code="7" op-status="0" interval="0" last-rc-change="1687293893" exec-time="34" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params=" passwd " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ <lrm_resource id="bar" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="bar_last_0" operation_key="bar_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="7:6:7:72d50bf3-3ecf-4bdb-af9c-fd66cdae2841" transition-magic="0:7;7:6:7:72d50bf3-3ecf-4bdb-af9c-fd66cdae2841" exit-reason="" on_node="rhel8-3" call-id="41" rc-code="7" op-status="0" interval="0" last-rc-change="1687293894" exec-time="45" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params=" passwd " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="3">
+ <instance_attributes id="status-3">
+ <nvpair id="status-3-.feature-set" name="#feature-set" value="3.17.4"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ <node_state id="4" uname="rhel8-4" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
+ <lrm id="4">
+ <lrm_resources>
+ <lrm_resource id="Fencing" class="stonith" type="fence_xvm">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="10:0:7:72d50bf3-3ecf-4bdb-af9c-fd66cdae2841" transition-magic="0:7;10:0:7:72d50bf3-3ecf-4bdb-af9c-fd66cdae2841" exit-reason="" on_node="rhel8-4" call-id="10" rc-code="7" op-status="0" interval="0" last-rc-change="1687293860" exec-time="5" queue-time="0" op-digest="bf974d77f2d4d33e434be1f89e362a52"/>
+ </lrm_resource>
+ <lrm_resource id="vip-dep" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="vip-dep_last_0" operation_key="vip-dep_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="5:1:7:72d50bf3-3ecf-4bdb-af9c-fd66cdae2841" transition-magic="0:7;5:1:7:72d50bf3-3ecf-4bdb-af9c-fd66cdae2841" exit-reason="" on_node="rhel8-4" call-id="22" rc-code="7" op-status="0" interval="0" last-rc-change="1687293879" exec-time="26" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params=" passwd " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ <lrm_resource id="vip" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="vip_last_0" operation_key="vip_stop_0" operation="stop" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="10:4:0:72d50bf3-3ecf-4bdb-af9c-fd66cdae2841" transition-magic="0:0;10:4:0:72d50bf3-3ecf-4bdb-af9c-fd66cdae2841" exit-reason="" on_node="rhel8-4" call-id="32" rc-code="0" op-status="0" interval="0" last-rc-change="1687293893" exec-time="29" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params=" passwd " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="vip_monitor_10000" operation_key="vip_monitor_10000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="15:2:0:72d50bf3-3ecf-4bdb-af9c-fd66cdae2841" transition-magic="0:0;15:2:0:72d50bf3-3ecf-4bdb-af9c-fd66cdae2841" exit-reason="" on_node="rhel8-4" call-id="29" rc-code="0" op-status="0" interval="10000" last-rc-change="1687293885" exec-time="18" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd" op-secure-params=" passwd " op-secure-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ <lrm_resource id="foo" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="foo_last_0" operation_key="foo_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="17:5:0:72d50bf3-3ecf-4bdb-af9c-fd66cdae2841" transition-magic="0:0;17:5:0:72d50bf3-3ecf-4bdb-af9c-fd66cdae2841" exit-reason="" on_node="rhel8-4" call-id="39" rc-code="0" op-status="0" interval="0" last-rc-change="1687293893" exec-time="14" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params=" passwd " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="foo_monitor_10000" operation_key="foo_monitor_10000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="18:5:0:72d50bf3-3ecf-4bdb-af9c-fd66cdae2841" transition-magic="0:0;18:5:0:72d50bf3-3ecf-4bdb-af9c-fd66cdae2841" exit-reason="" on_node="rhel8-4" call-id="41" rc-code="0" op-status="0" interval="10000" last-rc-change="1687293893" exec-time="18" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd" op-secure-params=" passwd " op-secure-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ <lrm_resource id="bar" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="bar_last_0" operation_key="bar_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="20:6:0:72d50bf3-3ecf-4bdb-af9c-fd66cdae2841" transition-magic="0:0;20:6:0:72d50bf3-3ecf-4bdb-af9c-fd66cdae2841" exit-reason="" on_node="rhel8-4" call-id="47" rc-code="0" op-status="0" interval="0" last-rc-change="1687293894" exec-time="16" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params=" passwd " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="bar_monitor_10000" operation_key="bar_monitor_10000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="21:6:0:72d50bf3-3ecf-4bdb-af9c-fd66cdae2841" transition-magic="0:0;21:6:0:72d50bf3-3ecf-4bdb-af9c-fd66cdae2841" exit-reason="" on_node="rhel8-4" call-id="49" rc-code="0" op-status="0" interval="10000" last-rc-change="1687293894" exec-time="19" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd" op-secure-params=" passwd " op-secure-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ <transient_attributes id="4">
+ <instance_attributes id="status-4">
+ <nvpair id="status-4-.feature-set" name="#feature-set" value="3.17.4"/>
+ </instance_attributes>
+ </transient_attributes>
+ </node_state>
+ </status>
+</cib>
diff --git a/cts/scheduler/xml/group-anticolocation-2.xml b/cts/scheduler/xml/group-anticolocation-2.xml
new file mode 100644
index 0000000..0fb5523
--- /dev/null
+++ b/cts/scheduler/xml/group-anticolocation-2.xml
@@ -0,0 +1,166 @@
+<cib crm_feature_set="3.16.1" validate-with="pacemaker-3.7" epoch="265" num_updates="16" admin_epoch="0" cib-last-written="Tue Oct 25 14:29:45 2022" update-origin="node2" update-client="cibadmin" update-user="root" have-quorum="1" dc-uuid="4">
+ <configuration>
+ <!-- The essential elements of this test are:
+ * A group (group2) is optionally anti-colocated (at score -5000) with
+ another group (group1)
+ * There are two nodes, and the groups are active on different ones
+ * The dependent group's last member (member2b) is failed and has
+ reached its migration-threshold
+ * Stickiness is infinite
+ * The anti-colocation has influence
+ * The dependent has nowhere to run other than the primary's node
+
+ In this situation, the goal of keeping as many resources active as
+ possible should override the anti-colocation, so the dependent should
+ move, but given the stickiness, the primary should stay where it is.
+ -->
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.1.5"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="test"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="node1"/>
+ <node id="2" uname="node2"/>
+ </nodes>
+ <resources>
+ <primitive class="stonith" id="Fencing" type="fence_xvm">
+ <meta_attributes id="Fencing-meta">
+ <nvpair id="Fencing-migration-threshold" name="migration-threshold" value="5"/>
+ </meta_attributes>
+ <instance_attributes id="Fencing-params">
+ <nvpair id="Fencing-key_file" name="key_file" value="/etc/pacemaker/fence_xvm.key"/>
+ <nvpair id="Fencing-multicast_address" name="multicast_address" value="239.255.100.100"/>
+ <nvpair id="Fencing-pcmk_host_list" name="pcmk_host_list" value="node1 node2 node3 node4 node5"/>
+ </instance_attributes>
+ <operations>
+ <op id="Fencing-monitor-120s" interval="120s" name="monitor" timeout="120s"/>
+ </operations>
+ </primitive>
+ <group id="group1">
+ <primitive class="ocf" id="member1a" provider="pacemaker" type="Dummy">
+ <operations>
+ <op id="member1a-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="20s"/>
+ <op id="member1a-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="20s"/>
+ <op id="member1a-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/>
+ <op id="member1a-reload-interval-0s" interval="0s" name="reload" timeout="20s"/>
+ <op id="member1a-reload-agent-interval-0s" interval="0s" name="reload-agent" timeout="20s"/>
+ <op id="member1a-start-interval-0s" interval="0s" name="start" timeout="20s"/>
+ <op id="member1a-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
+ </operations>
+ </primitive>
+ <primitive class="ocf" id="member1b" provider="pacemaker" type="Dummy">
+ <operations>
+ <op id="member1b-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="20s"/>
+ <op id="member1b-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="20s"/>
+ <op id="member1b-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/>
+ <op id="member1b-reload-interval-0s" interval="0s" name="reload" timeout="20s"/>
+ <op id="member1b-reload-agent-interval-0s" interval="0s" name="reload-agent" timeout="20s"/>
+ <op id="member1b-start-interval-0s" interval="0s" name="start" timeout="20s"/>
+ <op id="member1b-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
+ </operations>
+ </primitive>
+ </group>
+ <group id="group2">
+ <primitive class="ocf" id="member2a" provider="pacemaker" type="Dummy">
+ <operations>
+ <op id="member2a-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="20s"/>
+ <op id="member2a-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="20s"/>
+ <op id="member2a-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/>
+ <op id="member2a-reload-interval-0s" interval="0s" name="reload" timeout="20s"/>
+ <op id="member2a-reload-agent-interval-0s" interval="0s" name="reload-agent" timeout="20s"/>
+ <op id="member2a-start-interval-0s" interval="0s" name="start" timeout="20s"/>
+ <op id="member2a-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
+ </operations>
+ </primitive>
+ <primitive class="ocf" id="member2b" provider="pacemaker" type="Dummy">
+ <operations>
+ <op id="member2b-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="20s"/>
+ <op id="member2b-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="20s"/>
+ <op id="member2b-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/>
+ <op id="member2b-reload-interval-0s" interval="0s" name="reload" timeout="20s"/>
+ <op id="member2b-reload-agent-interval-0s" interval="0s" name="reload-agent" timeout="20s"/>
+ <op id="member2b-start-interval-0s" interval="0s" name="start" timeout="20s"/>
+ <op id="member2b-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
+ </operations>
+ </primitive>
+ </group>
+ </resources>
+ <constraints>
+ <rsc_colocation id="colocation-group2-group1--5000" rsc="group2" score="-5000" with-rsc="group1"/>
+ </constraints>
+ <fencing-topology/>
+ <op_defaults/>
+ <alerts/>
+ <rsc_defaults>
+ <meta_attributes id="rsc_defaults-meta_attributes">
+ <nvpair id="rsc_defaults-meta_attributes-migration-threshold" name="migration-threshold" value="1"/>
+ <nvpair id="rsc_defaults-meta_attributes-resource-stickiness" name="resource-stickiness" value="INFINITY"/>
+ </meta_attributes>
+ </rsc_defaults>
+ </configuration>
+ <status>
+ <node_state id="1" uname="node1" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
+ <transient_attributes id="1">
+ <instance_attributes id="status-1">
+ <nvpair id="status-1-.feature-set" name="#feature-set" value="3.16.1"/>
+ <nvpair id="status-1-fail-count-member2b.monitor_10000" name="fail-count-member2b#monitor_10000" value="1"/>
+ <nvpair id="status-1-last-failure-member2b.monitor_10000" name="last-failure-member2b#monitor_10000" value="1666727868"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="1">
+ <lrm_resources>
+ <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_start_0" operation="start" crm-debug-origin="crm_simulate" crm_feature_set="3.16.1" transition-key="9:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;9:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" on_node="node1" call-id="9" rc-code="0" op-status="0" interval="0" last-rc-change="1666726336" exec-time="0" queue-time="0" op-digest="52e34745a77d95a636428d3b550eb867"/>
+ <lrm_rsc_op id="Fencing_monitor_120000" operation_key="Fencing_monitor_120000" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.16.1" transition-key="10:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;10:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" on_node="node1" call-id="10" rc-code="0" op-status="0" interval="120000" last-rc-change="1666726336" exec-time="0" queue-time="0" op-digest="acc6dd2c58c637db4d12a6fe35626617"/>
+ </lrm_resource>
+ <lrm_resource id="member1a" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="member1a_last_0" operation_key="member1a_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.16.1" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1666727835" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ <lrm_resource id="member1b" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="member1b_last_0" operation_key="member1b_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.16.1" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1666727835" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ <lrm_resource id="member2a" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="member2a_last_0" operation_key="member2a_start_0" operation="start" crm-debug-origin="crm_simulate" crm_feature_set="3.16.1" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" last-rc-change="1666727835" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="member2a_monitor_10000" operation_key="member2a_monitor_10000" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.16.1" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="10000" last-rc-change="1666727835" exec-time="0" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ <lrm_resource id="member2b" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="member2b_last_0" operation_key="member2b_start_0" operation="start" crm-debug-origin="crm_simulate" crm_feature_set="3.16.1" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" last-rc-change="1666727835" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="member2b_monitor_10000" operation_key="member2b_monitor_10000" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.16.1" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="1" op-status="0" interval="10000" last-rc-change="1666727835" exec-time="0" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state id="2" uname="node2" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
+ <transient_attributes id="2">
+ <instance_attributes id="status-2">
+ <nvpair id="status-2-.feature-set" name="#feature-set" value="3.16.1"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="2">
+ <lrm_resources>
+ <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.16.1" transition-key="2:0:7:72f87069-7296-4421-b2b0-ab12fa9068ef" transition-magic="0:7;2:0:7:72f87069-7296-4421-b2b0-ab12fa9068ef" exit-reason="" on_node="node2" call-id="5" rc-code="7" op-status="0" interval="0" last-rc-change="1666726234" exec-time="2" queue-time="0" op-digest="ac94f147daea19463126aacea2f8cd39"/>
+ </lrm_resource>
+ <lrm_resource id="member1a" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="member1a_last_0" operation_key="member1a_start_0" operation="start" crm-debug-origin="crm_simulate" crm_feature_set="3.16.1" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" last-rc-change="1666727835" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="member1a_monitor_10000" operation_key="member1a_monitor_10000" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.16.1" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="10000" last-rc-change="1666727835" exec-time="0" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ <lrm_resource id="member1b" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="member1b_last_0" operation_key="member1b_start_0" operation="start" crm-debug-origin="crm_simulate" crm_feature_set="3.16.1" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" last-rc-change="1666727835" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="member1b_monitor_10000" operation_key="member1b_monitor_10000" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.16.1" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="10000" last-rc-change="1666727835" exec-time="0" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ <lrm_resource id="member2a" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="member2a_last_0" operation_key="member2a_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.16.1" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1666727835" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ <lrm_resource id="member2b" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="member2b_last_0" operation_key="member2b_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.16.1" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1666727835" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ </status>
+</cib>
diff --git a/cts/scheduler/xml/group-anticolocation-3.xml b/cts/scheduler/xml/group-anticolocation-3.xml
new file mode 100644
index 0000000..2c118fd
--- /dev/null
+++ b/cts/scheduler/xml/group-anticolocation-3.xml
@@ -0,0 +1,165 @@
+<cib crm_feature_set="3.16.1" validate-with="pacemaker-3.7" epoch="265" num_updates="16" admin_epoch="0" cib-last-written="Tue Oct 25 14:29:45 2022" update-origin="node2" update-client="cibadmin" update-user="root" have-quorum="1" dc-uuid="4">
+ <configuration>
+ <!-- The essential elements of this test are:
+ * A group (group2) is mandatorily anti-colocated with another group
+ (group1)
+ * There are two nodes, and the groups are active on different ones
+ * The dependent group's last member (member2b) is failed and has
+ reached its migration-threshold
+ * Stickiness is infinite
+ * The anti-colocation has influence
+ * The dependent has nowhere to run other than the primary's node
+
+ In this situation, the mandatory anti-colocation and stickiness mean
+ the failed dependent member has to remain stopped.
+ -->
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.1.5"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="test"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="node1"/>
+ <node id="2" uname="node2"/>
+ </nodes>
+ <resources>
+ <primitive class="stonith" id="Fencing" type="fence_xvm">
+ <meta_attributes id="Fencing-meta">
+ <nvpair id="Fencing-migration-threshold" name="migration-threshold" value="5"/>
+ </meta_attributes>
+ <instance_attributes id="Fencing-params">
+ <nvpair id="Fencing-key_file" name="key_file" value="/etc/pacemaker/fence_xvm.key"/>
+ <nvpair id="Fencing-multicast_address" name="multicast_address" value="239.255.100.100"/>
+ <nvpair id="Fencing-pcmk_host_list" name="pcmk_host_list" value="node1 node2 node3 node4 node5"/>
+ </instance_attributes>
+ <operations>
+ <op id="Fencing-monitor-120s" interval="120s" name="monitor" timeout="120s"/>
+ </operations>
+ </primitive>
+ <group id="group1">
+ <primitive class="ocf" id="member1a" provider="pacemaker" type="Dummy">
+ <operations>
+ <op id="member1a-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="20s"/>
+ <op id="member1a-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="20s"/>
+ <op id="member1a-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/>
+ <op id="member1a-reload-interval-0s" interval="0s" name="reload" timeout="20s"/>
+ <op id="member1a-reload-agent-interval-0s" interval="0s" name="reload-agent" timeout="20s"/>
+ <op id="member1a-start-interval-0s" interval="0s" name="start" timeout="20s"/>
+ <op id="member1a-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
+ </operations>
+ </primitive>
+ <primitive class="ocf" id="member1b" provider="pacemaker" type="Dummy">
+ <operations>
+ <op id="member1b-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="20s"/>
+ <op id="member1b-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="20s"/>
+ <op id="member1b-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/>
+ <op id="member1b-reload-interval-0s" interval="0s" name="reload" timeout="20s"/>
+ <op id="member1b-reload-agent-interval-0s" interval="0s" name="reload-agent" timeout="20s"/>
+ <op id="member1b-start-interval-0s" interval="0s" name="start" timeout="20s"/>
+ <op id="member1b-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
+ </operations>
+ </primitive>
+ </group>
+ <group id="group2">
+ <primitive class="ocf" id="member2a" provider="pacemaker" type="Dummy">
+ <operations>
+ <op id="member2a-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="20s"/>
+ <op id="member2a-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="20s"/>
+ <op id="member2a-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/>
+ <op id="member2a-reload-interval-0s" interval="0s" name="reload" timeout="20s"/>
+ <op id="member2a-reload-agent-interval-0s" interval="0s" name="reload-agent" timeout="20s"/>
+ <op id="member2a-start-interval-0s" interval="0s" name="start" timeout="20s"/>
+ <op id="member2a-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
+ </operations>
+ </primitive>
+ <primitive class="ocf" id="member2b" provider="pacemaker" type="Dummy">
+ <operations>
+ <op id="member2b-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="20s"/>
+ <op id="member2b-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="20s"/>
+ <op id="member2b-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/>
+ <op id="member2b-reload-interval-0s" interval="0s" name="reload" timeout="20s"/>
+ <op id="member2b-reload-agent-interval-0s" interval="0s" name="reload-agent" timeout="20s"/>
+ <op id="member2b-start-interval-0s" interval="0s" name="start" timeout="20s"/>
+ <op id="member2b-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
+ </operations>
+ </primitive>
+ </group>
+ </resources>
+ <constraints>
+ <rsc_colocation id="colocation-group2-group1" rsc="group2" score="-INFINITY" with-rsc="group1"/>
+ </constraints>
+ <fencing-topology/>
+ <op_defaults/>
+ <alerts/>
+ <rsc_defaults>
+ <meta_attributes id="rsc_defaults-meta_attributes">
+ <nvpair id="rsc_defaults-meta_attributes-migration-threshold" name="migration-threshold" value="1"/>
+ <nvpair id="rsc_defaults-meta_attributes-resource-stickiness" name="resource-stickiness" value="INFINITY"/>
+ </meta_attributes>
+ </rsc_defaults>
+ </configuration>
+ <status>
+ <node_state id="1" uname="node1" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
+ <transient_attributes id="1">
+ <instance_attributes id="status-1">
+ <nvpair id="status-1-.feature-set" name="#feature-set" value="3.16.1"/>
+ <nvpair id="status-1-fail-count-member2b.monitor_10000" name="fail-count-member2b#monitor_10000" value="1"/>
+ <nvpair id="status-1-last-failure-member2b.monitor_10000" name="last-failure-member2b#monitor_10000" value="1666727868"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="1">
+ <lrm_resources>
+ <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_start_0" operation="start" crm-debug-origin="crm_simulate" crm_feature_set="3.16.1" transition-key="9:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;9:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" on_node="node1" call-id="9" rc-code="0" op-status="0" interval="0" last-rc-change="1666726336" exec-time="0" queue-time="0" op-digest="52e34745a77d95a636428d3b550eb867"/>
+ <lrm_rsc_op id="Fencing_monitor_120000" operation_key="Fencing_monitor_120000" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.16.1" transition-key="10:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;10:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" on_node="node1" call-id="10" rc-code="0" op-status="0" interval="120000" last-rc-change="1666726336" exec-time="0" queue-time="0" op-digest="acc6dd2c58c637db4d12a6fe35626617"/>
+ </lrm_resource>
+ <lrm_resource id="member1a" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="member1a_last_0" operation_key="member1a_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.16.1" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1666727835" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ <lrm_resource id="member1b" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="member1b_last_0" operation_key="member1b_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.16.1" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1666727835" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ <lrm_resource id="member2a" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="member2a_last_0" operation_key="member2a_start_0" operation="start" crm-debug-origin="crm_simulate" crm_feature_set="3.16.1" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" last-rc-change="1666727835" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="member2a_monitor_10000" operation_key="member2a_monitor_10000" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.16.1" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="10000" last-rc-change="1666727835" exec-time="0" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ <lrm_resource id="member2b" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="member2b_last_0" operation_key="member2b_start_0" operation="start" crm-debug-origin="crm_simulate" crm_feature_set="3.16.1" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" last-rc-change="1666727835" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="member2b_monitor_10000" operation_key="member2b_monitor_10000" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.16.1" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="1" op-status="0" interval="10000" last-rc-change="1666727835" exec-time="0" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state id="2" uname="node2" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
+ <transient_attributes id="2">
+ <instance_attributes id="status-2">
+ <nvpair id="status-2-.feature-set" name="#feature-set" value="3.16.1"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="2">
+ <lrm_resources>
+ <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.16.1" transition-key="2:0:7:72f87069-7296-4421-b2b0-ab12fa9068ef" transition-magic="0:7;2:0:7:72f87069-7296-4421-b2b0-ab12fa9068ef" exit-reason="" on_node="node2" call-id="5" rc-code="7" op-status="0" interval="0" last-rc-change="1666726234" exec-time="2" queue-time="0" op-digest="ac94f147daea19463126aacea2f8cd39"/>
+ </lrm_resource>
+ <lrm_resource id="member1a" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="member1a_last_0" operation_key="member1a_start_0" operation="start" crm-debug-origin="crm_simulate" crm_feature_set="3.16.1" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" last-rc-change="1666727835" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="member1a_monitor_10000" operation_key="member1a_monitor_10000" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.16.1" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="10000" last-rc-change="1666727835" exec-time="0" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ <lrm_resource id="member1b" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="member1b_last_0" operation_key="member1b_start_0" operation="start" crm-debug-origin="crm_simulate" crm_feature_set="3.16.1" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" last-rc-change="1666727835" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="member1b_monitor_10000" operation_key="member1b_monitor_10000" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.16.1" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="10000" last-rc-change="1666727835" exec-time="0" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ <lrm_resource id="member2a" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="member2a_last_0" operation_key="member2a_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.16.1" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1666727835" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ <lrm_resource id="member2b" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="member2b_last_0" operation_key="member2b_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.16.1" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1666727835" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ </status>
+</cib>
diff --git a/cts/scheduler/xml/group-anticolocation-4.xml b/cts/scheduler/xml/group-anticolocation-4.xml
new file mode 100644
index 0000000..33ecb3f
--- /dev/null
+++ b/cts/scheduler/xml/group-anticolocation-4.xml
@@ -0,0 +1,167 @@
+<cib crm_feature_set="3.16.1" validate-with="pacemaker-3.7" epoch="265" num_updates="16" admin_epoch="0" cib-last-written="Tue Oct 25 14:29:45 2022" update-origin="node2" update-client="cibadmin" update-user="root" have-quorum="1" dc-uuid="4">
+ <configuration>
+ <!-- The essential elements of this test are:
+ * A group (group2) is optionally anti-colocated (at score -5000) with
+ another group (group1)
+ * There are two nodes, and the groups are active on different ones
+ * The dependent group's last member (member2b) is failed and has
+ reached its migration-threshold
+ * Stickiness is zero
+ * The anti-colocation does not have influence
+ * The dependent has nowhere to run other than the primary's node
+
+ In this situation, the goal of keeping as many resources active as
+ possible should override the anti-colocation, so the dependent should
+ move, but given the lack of influence, the primary should stay where
+ it is.
+ -->
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.1.5"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="test"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="node1"/>
+ <node id="2" uname="node2"/>
+ </nodes>
+ <resources>
+ <primitive class="stonith" id="Fencing" type="fence_xvm">
+ <meta_attributes id="Fencing-meta">
+ <nvpair id="Fencing-migration-threshold" name="migration-threshold" value="5"/>
+ </meta_attributes>
+ <instance_attributes id="Fencing-params">
+ <nvpair id="Fencing-key_file" name="key_file" value="/etc/pacemaker/fence_xvm.key"/>
+ <nvpair id="Fencing-multicast_address" name="multicast_address" value="239.255.100.100"/>
+ <nvpair id="Fencing-pcmk_host_list" name="pcmk_host_list" value="node1 node2 node3 node4 node5"/>
+ </instance_attributes>
+ <operations>
+ <op id="Fencing-monitor-120s" interval="120s" name="monitor" timeout="120s"/>
+ </operations>
+ </primitive>
+ <group id="group1">
+ <primitive class="ocf" id="member1a" provider="pacemaker" type="Dummy">
+ <operations>
+ <op id="member1a-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="20s"/>
+ <op id="member1a-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="20s"/>
+ <op id="member1a-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/>
+ <op id="member1a-reload-interval-0s" interval="0s" name="reload" timeout="20s"/>
+ <op id="member1a-reload-agent-interval-0s" interval="0s" name="reload-agent" timeout="20s"/>
+ <op id="member1a-start-interval-0s" interval="0s" name="start" timeout="20s"/>
+ <op id="member1a-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
+ </operations>
+ </primitive>
+ <primitive class="ocf" id="member1b" provider="pacemaker" type="Dummy">
+ <operations>
+ <op id="member1b-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="20s"/>
+ <op id="member1b-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="20s"/>
+ <op id="member1b-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/>
+ <op id="member1b-reload-interval-0s" interval="0s" name="reload" timeout="20s"/>
+ <op id="member1b-reload-agent-interval-0s" interval="0s" name="reload-agent" timeout="20s"/>
+ <op id="member1b-start-interval-0s" interval="0s" name="start" timeout="20s"/>
+ <op id="member1b-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
+ </operations>
+ </primitive>
+ </group>
+ <group id="group2">
+ <primitive class="ocf" id="member2a" provider="pacemaker" type="Dummy">
+ <operations>
+ <op id="member2a-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="20s"/>
+ <op id="member2a-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="20s"/>
+ <op id="member2a-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/>
+ <op id="member2a-reload-interval-0s" interval="0s" name="reload" timeout="20s"/>
+ <op id="member2a-reload-agent-interval-0s" interval="0s" name="reload-agent" timeout="20s"/>
+ <op id="member2a-start-interval-0s" interval="0s" name="start" timeout="20s"/>
+ <op id="member2a-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
+ </operations>
+ </primitive>
+ <primitive class="ocf" id="member2b" provider="pacemaker" type="Dummy">
+ <operations>
+ <op id="member2b-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="20s"/>
+ <op id="member2b-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="20s"/>
+ <op id="member2b-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/>
+ <op id="member2b-reload-interval-0s" interval="0s" name="reload" timeout="20s"/>
+ <op id="member2b-reload-agent-interval-0s" interval="0s" name="reload-agent" timeout="20s"/>
+ <op id="member2b-start-interval-0s" interval="0s" name="start" timeout="20s"/>
+ <op id="member2b-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
+ </operations>
+ </primitive>
+ </group>
+ </resources>
+ <constraints>
+ <rsc_colocation id="colocation-group2-group1--5000" rsc="group2" with-rsc="group1"
+ score="-5000" influence="false"/>
+ </constraints>
+ <fencing-topology/>
+ <op_defaults/>
+ <alerts/>
+ <rsc_defaults>
+ <meta_attributes id="rsc_defaults-meta_attributes">
+ <nvpair id="rsc_defaults-meta_attributes-migration-threshold" name="migration-threshold" value="1"/>
+ </meta_attributes>
+ </rsc_defaults>
+ </configuration>
+ <status>
+ <node_state id="1" uname="node1" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
+ <transient_attributes id="1">
+ <instance_attributes id="status-1">
+ <nvpair id="status-1-.feature-set" name="#feature-set" value="3.16.1"/>
+ <nvpair id="status-1-fail-count-member2b.monitor_10000" name="fail-count-member2b#monitor_10000" value="1"/>
+ <nvpair id="status-1-last-failure-member2b.monitor_10000" name="last-failure-member2b#monitor_10000" value="1666727868"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="1">
+ <lrm_resources>
+ <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_start_0" operation="start" crm-debug-origin="crm_simulate" crm_feature_set="3.16.1" transition-key="9:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;9:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" on_node="node1" call-id="9" rc-code="0" op-status="0" interval="0" last-rc-change="1666726336" exec-time="0" queue-time="0" op-digest="52e34745a77d95a636428d3b550eb867"/>
+ <lrm_rsc_op id="Fencing_monitor_120000" operation_key="Fencing_monitor_120000" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.16.1" transition-key="10:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;10:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" on_node="node1" call-id="10" rc-code="0" op-status="0" interval="120000" last-rc-change="1666726336" exec-time="0" queue-time="0" op-digest="acc6dd2c58c637db4d12a6fe35626617"/>
+ </lrm_resource>
+ <lrm_resource id="member1a" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="member1a_last_0" operation_key="member1a_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.16.1" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1666727835" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ <lrm_resource id="member1b" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="member1b_last_0" operation_key="member1b_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.16.1" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1666727835" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ <lrm_resource id="member2a" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="member2a_last_0" operation_key="member2a_start_0" operation="start" crm-debug-origin="crm_simulate" crm_feature_set="3.16.1" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" last-rc-change="1666727835" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="member2a_monitor_10000" operation_key="member2a_monitor_10000" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.16.1" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="10000" last-rc-change="1666727835" exec-time="0" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ <lrm_resource id="member2b" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="member2b_last_0" operation_key="member2b_start_0" operation="start" crm-debug-origin="crm_simulate" crm_feature_set="3.16.1" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" last-rc-change="1666727835" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="member2b_monitor_10000" operation_key="member2b_monitor_10000" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.16.1" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="1" op-status="0" interval="10000" last-rc-change="1666727835" exec-time="0" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state id="2" uname="node2" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
+ <transient_attributes id="2">
+ <instance_attributes id="status-2">
+ <nvpair id="status-2-.feature-set" name="#feature-set" value="3.16.1"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="2">
+ <lrm_resources>
+ <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.16.1" transition-key="2:0:7:72f87069-7296-4421-b2b0-ab12fa9068ef" transition-magic="0:7;2:0:7:72f87069-7296-4421-b2b0-ab12fa9068ef" exit-reason="" on_node="node2" call-id="5" rc-code="7" op-status="0" interval="0" last-rc-change="1666726234" exec-time="2" queue-time="0" op-digest="ac94f147daea19463126aacea2f8cd39"/>
+ </lrm_resource>
+ <lrm_resource id="member1a" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="member1a_last_0" operation_key="member1a_start_0" operation="start" crm-debug-origin="crm_simulate" crm_feature_set="3.16.1" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" last-rc-change="1666727835" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="member1a_monitor_10000" operation_key="member1a_monitor_10000" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.16.1" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="10000" last-rc-change="1666727835" exec-time="0" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ <lrm_resource id="member1b" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="member1b_last_0" operation_key="member1b_start_0" operation="start" crm-debug-origin="crm_simulate" crm_feature_set="3.16.1" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" last-rc-change="1666727835" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="member1b_monitor_10000" operation_key="member1b_monitor_10000" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.16.1" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="10000" last-rc-change="1666727835" exec-time="0" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ <lrm_resource id="member2a" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="member2a_last_0" operation_key="member2a_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.16.1" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1666727835" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ <lrm_resource id="member2b" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="member2b_last_0" operation_key="member2b_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.16.1" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1666727835" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ </status>
+</cib>
diff --git a/cts/scheduler/xml/group-anticolocation-5.xml b/cts/scheduler/xml/group-anticolocation-5.xml
new file mode 100644
index 0000000..b7eb9f3
--- /dev/null
+++ b/cts/scheduler/xml/group-anticolocation-5.xml
@@ -0,0 +1,188 @@
+<cib crm_feature_set="3.16.1" validate-with="pacemaker-3.7" epoch="265" num_updates="16" admin_epoch="0" cib-last-written="Tue Oct 25 14:29:45 2022" update-origin="node2" update-client="cibadmin" update-user="root" have-quorum="1" dc-uuid="4">
+ <configuration>
+ <!-- The essential elements of this test are:
+ * A group (group2) is optionally anti-colocated (at score -5000) with
+ another group (group1)
+ * There are three nodes, and the groups are active on different ones
+ * The dependent group's last member (member2b) is failed and has
+ reached its migration-threshold
+ * Stickiness is zero
+ * The anti-colocation has influence
+ * The dependent is allowed on the third node
+
+ In this situation, the dependent should move to the third node, and
+ the primary should stay where it is.
+ -->
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.1.5"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="test"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="node1"/>
+ <node id="2" uname="node2"/>
+ <node id="3" uname="node3"/>
+ </nodes>
+ <resources>
+ <primitive class="stonith" id="Fencing" type="fence_xvm">
+ <meta_attributes id="Fencing-meta">
+ <nvpair id="Fencing-migration-threshold" name="migration-threshold" value="5"/>
+ </meta_attributes>
+ <instance_attributes id="Fencing-params">
+ <nvpair id="Fencing-key_file" name="key_file" value="/etc/pacemaker/fence_xvm.key"/>
+ <nvpair id="Fencing-multicast_address" name="multicast_address" value="239.255.100.100"/>
+ <nvpair id="Fencing-pcmk_host_list" name="pcmk_host_list" value="node1 node2 node3 node4 node5"/>
+ </instance_attributes>
+ <operations>
+ <op id="Fencing-monitor-120s" interval="120s" name="monitor" timeout="120s"/>
+ </operations>
+ </primitive>
+ <group id="group1">
+ <primitive class="ocf" id="member1a" provider="pacemaker" type="Dummy">
+ <operations>
+ <op id="member1a-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="20s"/>
+ <op id="member1a-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="20s"/>
+ <op id="member1a-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/>
+ <op id="member1a-reload-interval-0s" interval="0s" name="reload" timeout="20s"/>
+ <op id="member1a-reload-agent-interval-0s" interval="0s" name="reload-agent" timeout="20s"/>
+ <op id="member1a-start-interval-0s" interval="0s" name="start" timeout="20s"/>
+ <op id="member1a-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
+ </operations>
+ </primitive>
+ <primitive class="ocf" id="member1b" provider="pacemaker" type="Dummy">
+ <operations>
+ <op id="member1b-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="20s"/>
+ <op id="member1b-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="20s"/>
+ <op id="member1b-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/>
+ <op id="member1b-reload-interval-0s" interval="0s" name="reload" timeout="20s"/>
+ <op id="member1b-reload-agent-interval-0s" interval="0s" name="reload-agent" timeout="20s"/>
+ <op id="member1b-start-interval-0s" interval="0s" name="start" timeout="20s"/>
+ <op id="member1b-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
+ </operations>
+ </primitive>
+ </group>
+ <group id="group2">
+ <primitive class="ocf" id="member2a" provider="pacemaker" type="Dummy">
+ <operations>
+ <op id="member2a-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="20s"/>
+ <op id="member2a-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="20s"/>
+ <op id="member2a-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/>
+ <op id="member2a-reload-interval-0s" interval="0s" name="reload" timeout="20s"/>
+ <op id="member2a-reload-agent-interval-0s" interval="0s" name="reload-agent" timeout="20s"/>
+ <op id="member2a-start-interval-0s" interval="0s" name="start" timeout="20s"/>
+ <op id="member2a-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
+ </operations>
+ </primitive>
+ <primitive class="ocf" id="member2b" provider="pacemaker" type="Dummy">
+ <operations>
+ <op id="member2b-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="20s"/>
+ <op id="member2b-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="20s"/>
+ <op id="member2b-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/>
+ <op id="member2b-reload-interval-0s" interval="0s" name="reload" timeout="20s"/>
+ <op id="member2b-reload-agent-interval-0s" interval="0s" name="reload-agent" timeout="20s"/>
+ <op id="member2b-start-interval-0s" interval="0s" name="start" timeout="20s"/>
+ <op id="member2b-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
+ </operations>
+ </primitive>
+ </group>
+ </resources>
+ <constraints>
+ <rsc_colocation id="colocation-group2-group1--5000" rsc="group2" score="-5000" with-rsc="group1"/>
+ </constraints>
+ <fencing-topology/>
+ <op_defaults/>
+ <alerts/>
+ <rsc_defaults>
+ <meta_attributes id="rsc_defaults-meta_attributes">
+ <nvpair id="rsc_defaults-meta_attributes-migration-threshold" name="migration-threshold" value="1"/>
+ </meta_attributes>
+ </rsc_defaults>
+ </configuration>
+ <status>
+ <node_state id="3" uname="node3" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
+ <transient_attributes id="3">
+ </transient_attributes>
+ <lrm id="3">
+ <lrm_resources>
+ <lrm_resource id="Fencing" class="stonith" type="fence_xvm">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.17.4" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1686003179" exec-time="0" queue-time="0" op-digest="52e34745a77d95a636428d3b550eb867"/>
+ </lrm_resource>
+ <lrm_resource id="member1a" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="member1a_last_0" operation_key="member1a_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.17.4" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1686003179" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ <lrm_resource id="member1b" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="member1b_last_0" operation_key="member1b_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.17.4" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1686003179" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ <lrm_resource id="member2a" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="member2a_last_0" operation_key="member2a_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.17.4" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1686003179" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ <lrm_resource id="member2b" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="member2b_last_0" operation_key="member2b_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.17.4" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1686003179" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+</node_state>
+ <node_state id="1" uname="node1" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
+ <transient_attributes id="1">
+ <instance_attributes id="status-1">
+ <nvpair id="status-1-.feature-set" name="#feature-set" value="3.16.1"/>
+ <nvpair id="status-1-fail-count-member2b.monitor_10000" name="fail-count-member2b#monitor_10000" value="1"/>
+ <nvpair id="status-1-last-failure-member2b.monitor_10000" name="last-failure-member2b#monitor_10000" value="1666727868"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="1">
+ <lrm_resources>
+ <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_start_0" operation="start" crm-debug-origin="crm_simulate" crm_feature_set="3.16.1" transition-key="9:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;9:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" on_node="node1" call-id="9" rc-code="0" op-status="0" interval="0" last-rc-change="1666726336" exec-time="0" queue-time="0" op-digest="52e34745a77d95a636428d3b550eb867"/>
+ <lrm_rsc_op id="Fencing_monitor_120000" operation_key="Fencing_monitor_120000" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.16.1" transition-key="10:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;10:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" on_node="node1" call-id="10" rc-code="0" op-status="0" interval="120000" last-rc-change="1666726336" exec-time="0" queue-time="0" op-digest="acc6dd2c58c637db4d12a6fe35626617"/>
+ </lrm_resource>
+ <lrm_resource id="member1a" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="member1a_last_0" operation_key="member1a_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.16.1" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1666727835" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ <lrm_resource id="member1b" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="member1b_last_0" operation_key="member1b_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.16.1" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1666727835" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ <lrm_resource id="member2a" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="member2a_last_0" operation_key="member2a_start_0" operation="start" crm-debug-origin="crm_simulate" crm_feature_set="3.16.1" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" last-rc-change="1666727835" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="member2a_monitor_10000" operation_key="member2a_monitor_10000" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.16.1" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="10000" last-rc-change="1666727835" exec-time="0" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ <lrm_resource id="member2b" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="member2b_last_0" operation_key="member2b_start_0" operation="start" crm-debug-origin="crm_simulate" crm_feature_set="3.16.1" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" last-rc-change="1666727835" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="member2b_monitor_10000" operation_key="member2b_monitor_10000" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.16.1" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="1" op-status="0" interval="10000" last-rc-change="1666727835" exec-time="0" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state id="2" uname="node2" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
+ <transient_attributes id="2">
+ <instance_attributes id="status-2">
+ <nvpair id="status-2-.feature-set" name="#feature-set" value="3.16.1"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="2">
+ <lrm_resources>
+ <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.16.1" transition-key="2:0:7:72f87069-7296-4421-b2b0-ab12fa9068ef" transition-magic="0:7;2:0:7:72f87069-7296-4421-b2b0-ab12fa9068ef" exit-reason="" on_node="node2" call-id="5" rc-code="7" op-status="0" interval="0" last-rc-change="1666726234" exec-time="2" queue-time="0" op-digest="ac94f147daea19463126aacea2f8cd39"/>
+ </lrm_resource>
+ <lrm_resource id="member1a" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="member1a_last_0" operation_key="member1a_start_0" operation="start" crm-debug-origin="crm_simulate" crm_feature_set="3.16.1" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" last-rc-change="1666727835" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="member1a_monitor_10000" operation_key="member1a_monitor_10000" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.16.1" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="10000" last-rc-change="1666727835" exec-time="0" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ <lrm_resource id="member1b" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="member1b_last_0" operation_key="member1b_start_0" operation="start" crm-debug-origin="crm_simulate" crm_feature_set="3.16.1" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" last-rc-change="1666727835" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ <lrm_rsc_op id="member1b_monitor_10000" operation_key="member1b_monitor_10000" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.16.1" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="10000" last-rc-change="1666727835" exec-time="0" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+ </lrm_resource>
+ <lrm_resource id="member2a" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="member2a_last_0" operation_key="member2a_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.16.1" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1666727835" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ <lrm_resource id="member2b" class="ocf" provider="pacemaker" type="Dummy">
+ <lrm_rsc_op id="member2b_last_0" operation_key="member2b_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.16.1" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" last-rc-change="1666727835" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ </status>
+</cib>
diff --git a/cts/scheduler/xml/group-anticolocation.xml b/cts/scheduler/xml/group-anticolocation.xml
index 1f895ec..1c6c8c9 100644
--- a/cts/scheduler/xml/group-anticolocation.xml
+++ b/cts/scheduler/xml/group-anticolocation.xml
@@ -1,15 +1,17 @@
<cib crm_feature_set="3.16.1" validate-with="pacemaker-3.7" epoch="265" num_updates="16" admin_epoch="0" cib-last-written="Tue Oct 25 14:29:45 2022" update-origin="node2" update-client="cibadmin" update-user="root" have-quorum="1" dc-uuid="4">
<configuration>
<!-- The essential elements of this test are:
- * A group (group2) is optionally anti-colocated (at score -5000) with
+ * A group (group2) is optionally anti-colocated (at score -5000) with
another group (group1)
- * There are two nodes, and the groups are active on different ones
- * The dependent group's last member (member2b) is failed and has
+ * There are two nodes, and the groups are active on different ones
+ * The dependent group's last member (member2b) is failed and has
reached its migration-threshold
+ * Stickiness is zero
+ * The anti-colocation has influence
+ * The dependent has nowhere to run other than the primary's node
- In this situation, the goal of keeping as many resources active as
- possible should override the optional anti-colocation, and the
- dependent group should move to the primary group's node.
+ In this situation, the goal of keeping as many resources active as
+ possible and the lack of stickiness mean the groups should swap nodes.
-->
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
diff --git a/cts/scheduler/xml/node-pending-timeout.xml b/cts/scheduler/xml/node-pending-timeout.xml
new file mode 100644
index 0000000..b4c3614
--- /dev/null
+++ b/cts/scheduler/xml/node-pending-timeout.xml
@@ -0,0 +1,27 @@
+<cib crm_feature_set="3.18.0" validate-with="pacemaker-2.10" epoch="5" num_updates="0" admin_epoch="0" cib-last-written="Tue Feb 21 13:19:57 2023" update-origin="node-1" update-client="cibadmin" update-user="root" have-quorum="true" dc-uuid="1" execution-date="1676981997">
+ <configuration>
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="true"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-node-pending-timeout" name="node-pending-timeout" value="300s"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="node-1"/>
+ <node id="2" uname="node-2"/>
+ </nodes>
+ <resources>
+ <primitive id="st-sbd" class="stonith" type="external/sbd"/>
+ </resources>
+ <constraints/>
+ </configuration>
+ <status>
+ <node_state id="1" uname="node-1" in_ccm="true" crmd="online" crm-debug-origin="post_cache_update" join="member" expected="member">
+ <lrm id="1">
+ <lrm_resources/>
+ </lrm>
+ </node_state>
+ <node_state id="2" in_ccm="1676981697" crmd="0" crm-debug-origin="post_cache_update"/>
+ </status>
+</cib>
diff --git a/cts/scheduler/xml/pending-node-no-uname.xml b/cts/scheduler/xml/pending-node-no-uname.xml
new file mode 100644
index 0000000..d1b3664
--- /dev/null
+++ b/cts/scheduler/xml/pending-node-no-uname.xml
@@ -0,0 +1,26 @@
+<cib crm_feature_set="3.0.4" validate-with="pacemaker-2.10" epoch="5" num_updates="0" admin_epoch="0" cib-last-written="Tue Feb 21 13:19:57 2023" update-origin="node-1" update-client="cibadmin" update-user="root" have-quorum="true" dc-uuid="1" execution-date="1676981997">
+ <configuration>
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="true"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="node-1"/>
+ <node id="2" uname="node-2"/>
+ </nodes>
+ <resources>
+ <primitive id="st-sbd" class="stonith" type="external/sbd"/>
+ </resources>
+ <constraints/>
+ </configuration>
+ <status>
+ <node_state id="1" uname="node-1" in_ccm="true" crmd="online" crm-debug-origin="post_cache_update" join="member" expected="member">
+ <lrm id="1">
+ <lrm_resources/>
+ </lrm>
+ </node_state>
+ <node_state id="2" in_ccm="true" crmd="offline" crm-debug-origin="post_cache_update"/>
+ </status>
+</cib>
diff --git a/cts/scheduler/xml/promoted-ordering.xml b/cts/scheduler/xml/promoted-ordering.xml
index bcf018e..7dd2415 100644
--- a/cts/scheduler/xml/promoted-ordering.xml
+++ b/cts/scheduler/xml/promoted-ordering.xml
@@ -61,7 +61,7 @@
</primitive>
<meta_attributes id="group-group_main.meta"/>
</group>
- <primitive id="intip_1_master" class="ocf" type="IPaddr2" provider="heartbeat">
+ <primitive id="intip_1_active" class="ocf" type="IPaddr2" provider="heartbeat">
<operations>
<op name="monitor" interval="30s" id="intip_1_mon" timeout="30s"/>
</operations>
@@ -75,7 +75,7 @@
<nvpair id="nvpair.meta.auto-131" name="target-role" value="started"/>
</meta_attributes>
</primitive>
- <primitive id="intip_2_slave" class="ocf" type="IPaddr2" provider="heartbeat">
+ <primitive id="intip_2_passive" class="ocf" type="IPaddr2" provider="heartbeat">
<operations>
<op name="monitor" interval="30s" id="intip_2_mon" timeout="30s"/>
</operations>
@@ -85,7 +85,7 @@
<nvpair id="nvpair.id22294" name="nic" value="eth1"/>
<nvpair id="nvpair.id22303" name="target_role" value="started"/>
</instance_attributes>
- <meta_attributes id="primitive-intip_2_unpromoted.meta">
+ <meta_attributes id="primitive-intip_2_passive.meta">
<nvpair id="nvpair.meta.auto-153" name="target-role" value="started"/>
</meta_attributes>
</primitive>
@@ -242,24 +242,24 @@
<expression attribute="#uname" operation="eq" value="webcluster01" id="expression.id23076"/>
</rule>
</rsc_location>
- <rsc_location id="run_intip_1_promoted" rsc="intip_1_master">
+ <rsc_location id="run_intip_1_promoted" rsc="intip_1_active">
<rule id="pref_run_intip_1" score="200">
<expression attribute="#uname" operation="eq" value="webcluster01" id="expression.id23101"/>
</rule>
</rsc_location>
- <rsc_location id="run_intip_2_unpromoted" rsc="intip_2_slave">
+ <rsc_location id="run_intip_2_passive" rsc="intip_2_passive">
<rule id="pref_run_intip_2_wc2" score="100">
<expression attribute="#uname" operation="eq" value="webcluster02" id="expression.id23125"/>
</rule>
</rsc_location>
- <rsc_order id="order_drbd_mysql_ip0" first-action="start" first="intip_1_master" then="ms_drbd_mysql"/>
- <rsc_order id="order_drbd_mysql_ip1" first-action="start" first="intip_2_slave" then="ms_drbd_mysql"/>
- <rsc_order id="order_drbd_www_ip0" first-action="start" first="intip_1_master" then="ms_drbd_www"/>
- <rsc_order id="order_drbd_www_ip1" first-action="start" first="intip_2_slave" then="ms_drbd_www"/>
- <rsc_colocation id="colo_drbd_mysql_ip0" rsc="drbd_mysql" with-rsc="intip_1_master" rsc-role="Promoted"/>
- <rsc_colocation id="colo_drbd_mysql_ip1" rsc="drbd_mysql" with-rsc="intip_2_slave" rsc-role="Unpromoted" score="100"/>
- <rsc_colocation id="colo_drbd_www_ip0" rsc="drbd_www" with-rsc="intip_1_master" rsc-role="Promoted" score="100"/>
- <rsc_colocation id="colo_drbd_www_ip1" rsc="drbd_www" with-rsc="intip_2_slave" rsc-role="Unpromoted" score="100"/>
+ <rsc_order id="order_drbd_mysql_ip0" first-action="start" first="intip_1_active" then="ms_drbd_mysql"/>
+ <rsc_order id="order_drbd_mysql_ip1" first-action="start" first="intip_2_passive" then="ms_drbd_mysql"/>
+ <rsc_order id="order_drbd_www_ip0" first-action="start" first="intip_1_active" then="ms_drbd_www"/>
+ <rsc_order id="order_drbd_www_ip1" first-action="start" first="intip_2_passive" then="ms_drbd_www"/>
+ <rsc_colocation id="colo_drbd_mysql_ip0" rsc="drbd_mysql" with-rsc="intip_1_active" rsc-role="Promoted"/>
+ <rsc_colocation id="colo_drbd_mysql_ip1" rsc="drbd_mysql" with-rsc="intip_2_passive" rsc-role="Unpromoted" score="100"/>
+ <rsc_colocation id="colo_drbd_www_ip0" rsc="drbd_www" with-rsc="intip_1_active" rsc-role="Promoted" score="100"/>
+ <rsc_colocation id="colo_drbd_www_ip1" rsc="drbd_www" with-rsc="intip_2_passive" rsc-role="Unpromoted" score="100"/>
<rsc_order id="drbd_before_fs_mysql" first="ms_drbd_mysql" then="fs_mysql" then-action="start" first-action="promote"/>
<rsc_colocation id="colo_drbd_fs_mysql" rsc="fs_mysql" with-rsc="ms_drbd_mysql" with-rsc-role="Promoted" score="INFINITY"/>
<rsc_order id="drbd_before_ocfs2_www" first="ms_drbd_www" then="clone_ocfs2_www" then-action="start" first-action="promote"/>
diff --git a/cts/scheduler/xml/promoted-probed-score.xml b/cts/scheduler/xml/promoted-probed-score.xml
index cedc909..bc42aa4 100644
--- a/cts/scheduler/xml/promoted-probed-score.xml
+++ b/cts/scheduler/xml/promoted-probed-score.xml
@@ -623,7 +623,7 @@
</lrm>
<transient_attributes id="hypatia-corosync.nevis.columbia.edu">
<instance_attributes id="status-hypatia-corosync.nevis.columbia.edu">
- <nvpair id="status-hypatia-corosync.nevis.columbia.edu-promoted-AdminDrbd.0" name="master-AdminDrbd:0" value="5"/>
+ <nvpair id="status-hypatia-corosync.nevis.columbia.edu-promoted-AdminDrbd" name="master-AdminDrbd" value="5"/>
</instance_attributes>
</transient_attributes>
</node_state>
@@ -691,7 +691,7 @@
</lrm>
<transient_attributes id="orestes-corosync.nevis.columbia.edu">
<instance_attributes id="status-orestes-corosync.nevis.columbia.edu">
- <nvpair id="status-orestes-corosync.nevis.columbia.edu-promoted-AdminDrbd.1" name="master-AdminDrbd:1" value="5"/>
+ <nvpair id="status-orestes-corosync.nevis.columbia.edu-promoted-AdminDrbd" name="master-AdminDrbd" value="5"/>
</instance_attributes>
</transient_attributes>
</node_state>
diff --git a/cts/scheduler/xml/timeout-by-node.xml b/cts/scheduler/xml/timeout-by-node.xml
new file mode 100644
index 0000000..221885b
--- /dev/null
+++ b/cts/scheduler/xml/timeout-by-node.xml
@@ -0,0 +1,139 @@
+<cib crm_feature_set="3.16.1" validate-with="pacemaker-3.9" epoch="2" num_updates="0" admin_epoch="1" cib-last-written="Tue Oct 25 14:29:45 2022" update-origin="node2" update-client="cibadmin" update-user="root" have-quorum="1" dc-uuid="4">
+ <configuration>
+ <!-- This is a simple test of an operation timeout that varies by node.
+ rsc1-clone should use a start timeout of 25s on node1, and
+ 23s everywhere else.
+ -->
+ <crm_config>
+ <cluster_property_set id="cib-bootstrap-options">
+ <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.1.5"/>
+ <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+ <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="test"/>
+ </cluster_property_set>
+ </crm_config>
+ <nodes>
+ <node id="1" uname="node1"/>
+ <node id="2" uname="node2"/>
+ <node id="3" uname="node3"/>
+ <node id="4" uname="node4"/>
+ <node id="5" uname="node5"/>
+ </nodes>
+ <resources>
+ <primitive class="stonith" id="Fencing" type="fence_xvm">
+ <meta_attributes id="Fencing-meta">
+ <nvpair id="Fencing-migration-threshold" name="migration-threshold" value="5"/>
+ </meta_attributes>
+ <instance_attributes id="Fencing-params">
+ <nvpair id="Fencing-key_file" name="key_file" value="/etc/pacemaker/fence_xvm.key"/>
+ <nvpair id="Fencing-multicast_address" name="multicast_address" value="239.255.100.100"/>
+ <nvpair id="Fencing-pcmk_host_list" name="pcmk_host_list" value="node1 node2 node3 node4 node5"/>
+ </instance_attributes>
+ <operations>
+ <op id="Fencing-monitor-120s" interval="120s" name="monitor" timeout="120s"/>
+ </operations>
+ </primitive>
+ <clone id="rsc1-clone">
+ <primitive class="ocf" id="rsc1" provider="pacemaker" type="Dummy">
+ <operations>
+ <op id="rsc1-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="20s"/>
+ <op id="rsc1-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="20s"/>
+ <op id="rsc1-monitor-interval-10s" interval="10s" name="monitor" timeout="20s"/>
+ <op id="rsc1-reload-interval-0s" interval="0s" name="reload" timeout="20s"/>
+ <op id="rsc1-reload-agent-interval-0s" interval="0s" name="reload-agent" timeout="20s"/>
+ <op id="rsc1-start-interval-0s" interval="0s" name="start">
+ <meta_attributes id="start-meta" score="10">
+ <nvpair id="start-meta-timeout" name="timeout" value="23s"/>
+ </meta_attributes>
+ <meta_attributes id="start-meta-node1" score="20">
+ <rule id="node1-rule" score="INFINITY">
+ <expression id="node1-expr" attribute="#uname" operation="eq" value="node1"/>
+ </rule>
+ <nvpair id="start-meta-timeout-node1" name="timeout" value="25s"/>
+ </meta_attributes>
+ </op>
+ <op id="rsc1-stop-interval-0s" interval="0s" name="stop" timeout="20s"/>
+ </operations>
+ </primitive>
+ </clone>
+ </resources>
+ <constraints/>
+ <fencing-topology/>
+ <op_defaults/>
+ <alerts/>
+ <rsc_defaults/>
+ </configuration>
+ <status>
+ <node_state id="1" uname="node1" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
+ <transient_attributes id="1">
+ <instance_attributes id="status-1">
+ <nvpair id="status-1-.feature-set" name="#feature-set" value="3.16.1"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="1">
+ <lrm_resources>
+ <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_start_0" operation="start" crm-debug-origin="crm_simulate" crm_feature_set="3.16.1" transition-key="9:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;9:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" on_node="node1" call-id="9" rc-code="0" op-status="0" interval="0" last-rc-change="1666726336" exec-time="0" queue-time="0" op-digest="52e34745a77d95a636428d3b550eb867"/>
+ <lrm_rsc_op id="Fencing_monitor_120000" operation_key="Fencing_monitor_120000" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.16.1" transition-key="10:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;10:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" on_node="node1" call-id="10" rc-code="0" op-status="0" interval="120000" last-rc-change="1666726336" exec-time="0" queue-time="0" op-digest="acc6dd2c58c637db4d12a6fe35626617"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state id="2" uname="node2" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
+ <transient_attributes id="2">
+ <instance_attributes id="status-2">
+ <nvpair id="status-2-.feature-set" name="#feature-set" value="3.16.1"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="2">
+ <lrm_resources>
+ <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.16.1" transition-key="2:0:7:72f87069-7296-4421-b2b0-ab12fa9068ef" transition-magic="0:7;2:0:7:72f87069-7296-4421-b2b0-ab12fa9068ef" exit-reason="" on_node="node2" call-id="5" rc-code="7" op-status="0" interval="0" last-rc-change="1666726234" exec-time="2" queue-time="0" op-digest="ac94f147daea19463126aacea2f8cd39"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state id="3" uname="node3" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
+ <transient_attributes id="3">
+ <instance_attributes id="status-3">
+ <nvpair id="status-3-.feature-set" name="#feature-set" value="3.16.1"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="3">
+ <lrm_resources>
+ <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.16.1" transition-key="3:0:7:72f87069-7296-4421-b2b0-ab12fa9068ef" transition-magic="0:7;3:0:7:72f87069-7296-4421-b2b0-ab12fa9068ef" exit-reason="" on_node="node3" call-id="5" rc-code="7" op-status="0" interval="0" last-rc-change="1666726234" exec-time="1" queue-time="0" op-digest="ac94f147daea19463126aacea2f8cd39"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state id="4" uname="node4" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
+ <transient_attributes id="4">
+ <instance_attributes id="status-4">
+ <nvpair id="status-4-.feature-set" name="#feature-set" value="3.16.1"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="4">
+ <lrm_resources>
+ <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.16.1" transition-key="4:0:7:72f87069-7296-4421-b2b0-ab12fa9068ef" transition-magic="0:7;4:0:7:72f87069-7296-4421-b2b0-ab12fa9068ef" exit-reason="" on_node="node4" call-id="5" rc-code="7" op-status="0" interval="0" last-rc-change="1666726234" exec-time="1" queue-time="0" op-digest="ac94f147daea19463126aacea2f8cd39"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ <node_state id="5" uname="node5" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
+ <transient_attributes id="5">
+ <instance_attributes id="status-5">
+ <nvpair id="status-5-.feature-set" name="#feature-set" value="3.16.1"/>
+ </instance_attributes>
+ </transient_attributes>
+ <lrm id="5">
+ <lrm_resources>
+ <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+ <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.16.1" transition-key="5:0:7:72f87069-7296-4421-b2b0-ab12fa9068ef" transition-magic="0:7;5:0:7:72f87069-7296-4421-b2b0-ab12fa9068ef" exit-reason="" on_node="node5" call-id="5" rc-code="7" op-status="0" interval="0" last-rc-change="1666726234" exec-time="1" queue-time="0" op-digest="ac94f147daea19463126aacea2f8cd39"/>
+ </lrm_resource>
+ </lrm_resources>
+ </lrm>
+ </node_state>
+ </status>
+</cib>
diff --git a/cts/support/Makefile.am b/cts/support/Makefile.am
index 33cfa6f..d591633 100644
--- a/cts/support/Makefile.am
+++ b/cts/support/Makefile.am
@@ -19,6 +19,6 @@ dist_cts_DATA = cts.conf
if BUILD_UPSTART
dist_cts_DATA += pacemaker-cts-dummyd.conf
endif
-cts_SCRIPTS = fence_dummy \
- LSBDummy \
- pacemaker-cts-dummyd
+cts_SCRIPTS = fence_dummy \
+ LSBDummy \
+ pacemaker-cts-dummyd
diff --git a/daemons/Makefile.am b/daemons/Makefile.am
index 743320b..30dd17e 100644
--- a/daemons/Makefile.am
+++ b/daemons/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2018 the Pacemaker project contributors
+# Copyright 2018-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -8,4 +8,10 @@
#
MAINTAINERCLEANFILES = Makefile.in
-SUBDIRS = based schedulerd attrd controld execd fenced pacemakerd
+SUBDIRS = based \
+ schedulerd \
+ attrd \
+ controld \
+ execd \
+ fenced \
+ pacemakerd
diff --git a/daemons/attrd/Makefile.am b/daemons/attrd/Makefile.am
index 6bb81c4..f8d8bc9 100644
--- a/daemons/attrd/Makefile.am
+++ b/daemons/attrd/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2004-2022 the Pacemaker project contributors
+# Copyright 2004-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -18,31 +18,32 @@ noinst_HEADERS = pacemaker-attrd.h
pacemaker_attrd_CFLAGS = $(CFLAGS_HARDENED_EXE)
pacemaker_attrd_LDFLAGS = $(LDFLAGS_HARDENED_EXE)
-pacemaker_attrd_LDADD = $(top_builddir)/lib/cluster/libcrmcluster.la \
- $(top_builddir)/lib/pengine/libpe_rules.la \
- $(top_builddir)/lib/common/libcrmcommon.la \
- $(top_builddir)/lib/cib/libcib.la \
- $(top_builddir)/lib/lrmd/liblrmd.la \
- $(CLUSTERLIBS)
+pacemaker_attrd_LDADD = $(top_builddir)/lib/cluster/libcrmcluster.la
+pacemaker_attrd_LDADD += $(top_builddir)/lib/cib/libcib.la
+pacemaker_attrd_LDADD += $(top_builddir)/lib/pengine/libpe_rules.la
+pacemaker_attrd_LDADD += $(top_builddir)/lib/lrmd/liblrmd.la
+pacemaker_attrd_LDADD += $(top_builddir)/lib/common/libcrmcommon.la
+pacemaker_attrd_LDADD += $(CLUSTERLIBS)
pacemaker_attrd_SOURCES = attrd_alerts.c \
- attrd_attributes.c \
- attrd_cib.c \
- attrd_corosync.c \
- attrd_elections.c \
- attrd_ipc.c \
- attrd_messages.c \
- attrd_sync.c \
- attrd_utils.c \
- pacemaker-attrd.c
-
-clean-generic:
- rm -f *.log *.debug *.xml *~
-
-if BUILD_LEGACY_LINKS
+ attrd_attributes.c \
+ attrd_cib.c \
+ attrd_corosync.c \
+ attrd_elections.c \
+ attrd_ipc.c \
+ attrd_messages.c \
+ attrd_sync.c \
+ attrd_utils.c \
+ pacemaker-attrd.c
+
+.PHONY: install-exec-hook
install-exec-hook:
+if BUILD_LEGACY_LINKS
cd $(DESTDIR)$(CRM_DAEMON_DIR) && rm -f attrd && $(LN_S) pacemaker-attrd attrd
+endif
+.PHONY: uninstall-hook
uninstall-hook:
+if BUILD_LEGACY_LINKS
cd $(DESTDIR)$(CRM_DAEMON_DIR) && rm -f attrd
endif
diff --git a/daemons/attrd/attrd_alerts.c b/daemons/attrd/attrd_alerts.c
index b694891..495e18f 100644
--- a/daemons/attrd/attrd_alerts.c
+++ b/daemons/attrd/attrd_alerts.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2015-2021 the Pacemaker project contributors
+ * Copyright 2015-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -14,6 +14,7 @@
#include <crm/cluster/internal.h>
#include <crm/cluster/election_internal.h>
#include <crm/common/alerts_internal.h>
+#include <crm/common/cib_internal.h>
#include <crm/pengine/rules_internal.h>
#include <crm/lrmd_internal.h>
#include "pacemaker-attrd.h"
@@ -92,7 +93,7 @@ config_query_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void
}
crmalerts = output;
- if (crmalerts && !pcmk__str_eq(crm_element_name(crmalerts), XML_CIB_TAG_ALERTS, pcmk__str_none)) {
+ if ((crmalerts != NULL) && !pcmk__xe_is(crmalerts, XML_CIB_TAG_ALERTS)) {
crmalerts = first_named_child(crmalerts, XML_CIB_TAG_ALERTS);
}
if (!crmalerts) {
@@ -104,9 +105,6 @@ config_query_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void
attrd_alert_list = pe_unpack_alerts(crmalerts);
}
-#define XPATH_ALERTS \
- "/" XML_TAG_CIB "/" XML_CIB_TAG_CONFIGURATION "/" XML_CIB_TAG_ALERTS
-
gboolean
attrd_read_options(gpointer user_data)
{
@@ -114,8 +112,9 @@ attrd_read_options(gpointer user_data)
CRM_CHECK(the_cib != NULL, return TRUE);
- call_id = the_cib->cmds->query(the_cib, XPATH_ALERTS, NULL,
- cib_xpath | cib_scope_local);
+ call_id = the_cib->cmds->query(the_cib,
+ pcmk__cib_abs_xpath_for(XML_CIB_TAG_ALERTS),
+ NULL, cib_xpath|cib_scope_local);
the_cib->cmds->register_callback_full(the_cib, call_id, 120, FALSE, NULL,
"config_query_callback",
@@ -125,14 +124,6 @@ attrd_read_options(gpointer user_data)
return TRUE;
}
-void
-attrd_cib_updated_cb(const char *event, xmlNode * msg)
-{
- if (!attrd_shutting_down() && pcmk__alert_in_patchset(msg, false)) {
- mainloop_set_trigger(attrd_config_read);
- }
-}
-
int
attrd_send_attribute_alert(const char *node, int nodeid,
const char *attr, const char *value)
diff --git a/daemons/attrd/attrd_attributes.c b/daemons/attrd/attrd_attributes.c
index 516ced7..388c181 100644
--- a/daemons/attrd/attrd_attributes.c
+++ b/daemons/attrd/attrd_attributes.c
@@ -25,25 +25,45 @@
static attribute_t *
attrd_create_attribute(xmlNode *xml)
{
+ int is_private = 0;
int dampen = 0;
- const char *value = crm_element_value(xml, PCMK__XA_ATTR_DAMPENING);
- attribute_t *a = calloc(1, sizeof(attribute_t));
+ const char *name = crm_element_value(xml, PCMK__XA_ATTR_NAME);
+ const char *set_type = crm_element_value(xml, PCMK__XA_ATTR_SET_TYPE);
+ const char *dampen_s = crm_element_value(xml, PCMK__XA_ATTR_DAMPENING);
+ attribute_t *a = NULL;
+
+ if (set_type == NULL) {
+ set_type = XML_TAG_ATTR_SETS;
+ }
+
+ /* Set type is meaningful only when writing to the CIB. Private
+ * attributes are not written.
+ */
+ crm_element_value_int(xml, PCMK__XA_ATTR_IS_PRIVATE, &is_private);
+ if ((is_private != 0)
+ && !pcmk__str_any_of(set_type, XML_TAG_ATTR_SETS, XML_TAG_UTILIZATION,
+ NULL)) {
+ crm_warn("Ignoring attribute %s with invalid set type %s",
+ pcmk__s(name, "(unidentified)"), set_type);
+ return NULL;
+ }
+ a = calloc(1, sizeof(attribute_t));
CRM_ASSERT(a != NULL);
- a->id = crm_element_value_copy(xml, PCMK__XA_ATTR_NAME);
- a->set_id = crm_element_value_copy(xml, PCMK__XA_ATTR_SET);
- a->set_type = crm_element_value_copy(xml, PCMK__XA_ATTR_SET_TYPE);
- a->uuid = crm_element_value_copy(xml, PCMK__XA_ATTR_UUID);
- a->values = pcmk__strikey_table(NULL, attrd_free_attribute_value);
+ a->is_private = is_private;
+ pcmk__str_update(&a->id, name);
+ pcmk__str_update(&a->set_type, set_type);
- crm_element_value_int(xml, PCMK__XA_ATTR_IS_PRIVATE, &a->is_private);
+ a->set_id = crm_element_value_copy(xml, PCMK__XA_ATTR_SET);
+ a->uuid = crm_element_value_copy(xml, PCMK__XA_ATTR_UUID);
+ a->values = pcmk__strikey_table(NULL, attrd_free_attribute_value);
a->user = crm_element_value_copy(xml, PCMK__XA_ATTR_USER);
crm_trace("Performing all %s operations as user '%s'", a->id, a->user);
- if (value != NULL) {
- dampen = crm_get_msec(value);
+ if (dampen_s != NULL) {
+ dampen = crm_get_msec(dampen_s);
}
crm_trace("Created attribute %s with %s write delay", a->id,
(a->timeout_ms == 0)? "no" : pcmk__readable_interval(a->timeout_ms));
@@ -52,7 +72,7 @@ attrd_create_attribute(xmlNode *xml)
a->timeout_ms = dampen;
a->timer = attrd_add_timer(a->id, a->timeout_ms, a);
} else if (dampen < 0) {
- crm_warn("Ignoring invalid delay %s for attribute %s", value, a->id);
+ crm_warn("Ignoring invalid delay %s for attribute %s", dampen_s, a->id);
}
g_hash_table_replace(attributes, a->id, a);
@@ -169,6 +189,10 @@ attrd_populate_attribute(xmlNode *xml, const char *attr)
if (a == NULL) {
if (update_both || pcmk__str_eq(op, PCMK__ATTRD_CMD_UPDATE, pcmk__str_none)) {
a = attrd_create_attribute(xml);
+ if (a == NULL) {
+ return NULL;
+ }
+
} else {
crm_warn("Could not update %s: attribute not found", attr);
return NULL;
diff --git a/daemons/attrd/attrd_cib.c b/daemons/attrd/attrd_cib.c
index 928c013..80e5580 100644
--- a/daemons/attrd/attrd_cib.c
+++ b/daemons/attrd/attrd_cib.c
@@ -10,6 +10,7 @@
#include <crm_internal.h>
#include <errno.h>
+#include <inttypes.h> // PRIu32
#include <stdbool.h>
#include <stdlib.h>
#include <glib.h>
@@ -24,6 +25,188 @@
static int last_cib_op_done = 0;
+static void write_attribute(attribute_t *a, bool ignore_delay);
+
+static void
+attrd_cib_destroy_cb(gpointer user_data)
+{
+ cib_t *cib = user_data;
+
+ cib->cmds->signoff(cib);
+
+ if (attrd_shutting_down(false)) {
+ crm_info("Disconnected from the CIB manager");
+
+ } else {
+ // @TODO This should trigger a reconnect, not a shutdown
+ crm_crit("Lost connection to the CIB manager, shutting down");
+ attrd_exit_status = CRM_EX_DISCONNECT;
+ attrd_shutdown(0);
+ }
+}
+
+static void
+attrd_cib_updated_cb(const char *event, xmlNode *msg)
+{
+ const xmlNode *patchset = NULL;
+ const char *client_name = NULL;
+
+ if (attrd_shutting_down(true)) {
+ return;
+ }
+
+ if (cib__get_notify_patchset(msg, &patchset) != pcmk_rc_ok) {
+ return;
+ }
+
+ if (cib__element_in_patchset(patchset, XML_CIB_TAG_ALERTS)) {
+ mainloop_set_trigger(attrd_config_read);
+ }
+
+ if (!attrd_election_won()) {
+ // Don't write attributes if we're not the writer
+ return;
+ }
+
+ client_name = crm_element_value(msg, F_CIB_CLIENTNAME);
+ if (!cib__client_triggers_refresh(client_name)) {
+ // The CIB is still accurate
+ return;
+ }
+
+ if (cib__element_in_patchset(patchset, XML_CIB_TAG_NODES)
+ || cib__element_in_patchset(patchset, XML_CIB_TAG_STATUS)) {
+
+ /* An unsafe client modified the nodes or status section. Write
+ * transient attributes to ensure they're up-to-date in the CIB.
+ */
+ if (client_name == NULL) {
+ client_name = crm_element_value(msg, F_CIB_CLIENTID);
+ }
+ crm_notice("Updating all attributes after %s event triggered by %s",
+ event, pcmk__s(client_name, "(unidentified client)"));
+
+ attrd_write_attributes(attrd_write_all);
+ }
+}
+
+int
+attrd_cib_connect(int max_retry)
+{
+ static int attempts = 0;
+
+ int rc = -ENOTCONN;
+
+ the_cib = cib_new();
+ if (the_cib == NULL) {
+ return -ENOTCONN;
+ }
+
+ do {
+ if (attempts > 0) {
+ sleep(attempts);
+ }
+ attempts++;
+ crm_debug("Connection attempt %d to the CIB manager", attempts);
+ rc = the_cib->cmds->signon(the_cib, T_ATTRD, cib_command);
+
+ } while ((rc != pcmk_ok) && (attempts < max_retry));
+
+ if (rc != pcmk_ok) {
+ crm_err("Connection to the CIB manager failed: %s " CRM_XS " rc=%d",
+ pcmk_strerror(rc), rc);
+ goto cleanup;
+ }
+
+ crm_debug("Connected to the CIB manager after %d attempts", attempts);
+
+ rc = the_cib->cmds->set_connection_dnotify(the_cib, attrd_cib_destroy_cb);
+ if (rc != pcmk_ok) {
+ crm_err("Could not set disconnection callback");
+ goto cleanup;
+ }
+
+ rc = the_cib->cmds->add_notify_callback(the_cib, T_CIB_DIFF_NOTIFY,
+ attrd_cib_updated_cb);
+ if (rc != pcmk_ok) {
+ crm_err("Could not set CIB notification callback");
+ goto cleanup;
+ }
+
+ return pcmk_ok;
+
+cleanup:
+ cib__clean_up_connection(&the_cib);
+ return -ENOTCONN;
+}
+
+void
+attrd_cib_disconnect(void)
+{
+ CRM_CHECK(the_cib != NULL, return);
+ the_cib->cmds->del_notify_callback(the_cib, T_CIB_DIFF_NOTIFY,
+ attrd_cib_updated_cb);
+ cib__clean_up_connection(&the_cib);
+}
+
+static void
+attrd_erase_cb(xmlNode *msg, int call_id, int rc, xmlNode *output,
+ void *user_data)
+{
+ do_crm_log_unlikely(((rc != pcmk_ok)? LOG_NOTICE : LOG_DEBUG),
+ "Cleared transient attributes: %s "
+ CRM_XS " xpath=%s rc=%d",
+ pcmk_strerror(rc), (char *) user_data, rc);
+}
+
+#define XPATH_TRANSIENT "//node_state[@uname='%s']/" XML_TAG_TRANSIENT_NODEATTRS
+
+/*!
+ * \internal
+ * \brief Wipe all transient attributes for this node from the CIB
+ *
+ * Clear any previous transient node attributes from the CIB. This is
+ * normally done by the DC's controller when this node leaves the cluster, but
+ * this handles the case where the node restarted so quickly that the
+ * cluster layer didn't notice.
+ *
+ * \todo If pacemaker-attrd respawns after crashing (see PCMK_ENV_RESPAWNED),
+ * ideally we'd skip this and sync our attributes from the writer.
+ * However, currently we reject any values for us that the writer has, in
+ * attrd_peer_update().
+ */
+static void
+attrd_erase_attrs(void)
+{
+ int call_id = 0;
+ char *xpath = crm_strdup_printf(XPATH_TRANSIENT, attrd_cluster->uname);
+
+ crm_info("Clearing transient attributes from CIB " CRM_XS " xpath=%s",
+ xpath);
+
+ call_id = the_cib->cmds->remove(the_cib, xpath, NULL, cib_xpath);
+ the_cib->cmds->register_callback_full(the_cib, call_id, 120, FALSE, xpath,
+ "attrd_erase_cb", attrd_erase_cb,
+ free);
+}
+
+/*!
+ * \internal
+ * \brief Prepare the CIB after cluster is connected
+ */
+void
+attrd_cib_init(void)
+{
+ // We have no attribute values in memory, wipe the CIB to match
+ attrd_erase_attrs();
+
+ // Set a trigger for reading the CIB (for the alerts section)
+ attrd_config_read = mainloop_add_trigger(G_PRIORITY_HIGH, attrd_read_options, NULL);
+
+ // Always read the CIB at start-up
+ mainloop_set_trigger(attrd_config_read);
+}
+
static gboolean
attribute_timer_cb(gpointer data)
{
@@ -92,7 +275,7 @@ attrd_cib_callback(xmlNode *msg, int call_id, int rc, xmlNode *output, void *use
/* We deferred a write of a new update because this update was in
* progress. Write out the new value without additional delay.
*/
- attrd_write_attribute(a, false);
+ write_attribute(a, false);
/* We're re-attempting a write because the original failed; delay
* the next attempt so we don't potentially flood the CIB manager
@@ -121,48 +304,134 @@ attrd_cib_callback(xmlNode *msg, int call_id, int rc, xmlNode *output, void *use
}
}
-static void
-build_update_element(xmlNode *parent, attribute_t *a, const char *nodeid, const char *value)
+/*!
+ * \internal
+ * \brief Add a set-attribute update request to the current CIB transaction
+ *
+ * \param[in] attr Attribute to update
+ * \param[in] attr_id ID of attribute to update
+ * \param[in] node_id ID of node for which to update attribute value
+ * \param[in] set_id ID of attribute set
+ * \param[in] value New value for attribute
+ *
+ * \return Standard Pacemaker return code
+ */
+static int
+add_set_attr_update(const attribute_t *attr, const char *attr_id,
+ const char *node_id, const char *set_id, const char *value)
{
- const char *set = NULL;
- xmlNode *xml_obj = NULL;
+ xmlNode *update = create_xml_node(NULL, XML_CIB_TAG_STATE);
+ xmlNode *child = update;
+ int rc = ENOMEM;
- xml_obj = create_xml_node(parent, XML_CIB_TAG_STATE);
- crm_xml_add(xml_obj, XML_ATTR_ID, nodeid);
+ if (child == NULL) {
+ goto done;
+ }
+ crm_xml_add(child, XML_ATTR_ID, node_id);
- xml_obj = create_xml_node(xml_obj, XML_TAG_TRANSIENT_NODEATTRS);
- crm_xml_add(xml_obj, XML_ATTR_ID, nodeid);
+ child = create_xml_node(child, XML_TAG_TRANSIENT_NODEATTRS);
+ if (child == NULL) {
+ goto done;
+ }
+ crm_xml_add(child, XML_ATTR_ID, node_id);
- if (pcmk__str_eq(a->set_type, XML_TAG_ATTR_SETS, pcmk__str_null_matches)) {
- xml_obj = create_xml_node(xml_obj, XML_TAG_ATTR_SETS);
- } else if (pcmk__str_eq(a->set_type, XML_TAG_UTILIZATION, pcmk__str_none)) {
- xml_obj = create_xml_node(xml_obj, XML_TAG_UTILIZATION);
- } else {
- crm_err("Unknown set type attribute: %s", a->set_type);
+ child = create_xml_node(child, attr->set_type);
+ if (child == NULL) {
+ goto done;
}
+ crm_xml_add(child, XML_ATTR_ID, set_id);
- if (a->set_id) {
- crm_xml_set_id(xml_obj, "%s", a->set_id);
- } else {
- crm_xml_set_id(xml_obj, "%s-%s", XML_CIB_TAG_STATUS, nodeid);
+ child = create_xml_node(child, XML_CIB_TAG_NVPAIR);
+ if (child == NULL) {
+ goto done;
}
- set = ID(xml_obj);
+ crm_xml_add(child, XML_ATTR_ID, attr_id);
+ crm_xml_add(child, XML_NVPAIR_ATTR_NAME, attr->id);
+ crm_xml_add(child, XML_NVPAIR_ATTR_VALUE, value);
+
+ rc = the_cib->cmds->modify(the_cib, XML_CIB_TAG_STATUS, update,
+ cib_can_create|cib_transaction);
+ rc = pcmk_legacy2rc(rc);
+
+done:
+ free_xml(update);
+ return rc;
+}
+
+/*!
+ * \internal
+ * \brief Add an unset-attribute update request to the current CIB transaction
+ *
+ * \param[in] attr Attribute to update
+ * \param[in] attr_id ID of attribute to update
+ * \param[in] node_id ID of node for which to update attribute value
+ * \param[in] set_id ID of attribute set
+ *
+ * \return Standard Pacemaker return code
+ */
+static int
+add_unset_attr_update(const attribute_t *attr, const char *attr_id,
+ const char *node_id, const char *set_id)
+{
+ char *xpath = crm_strdup_printf("/" XML_TAG_CIB
+ "/" XML_CIB_TAG_STATUS
+ "/" XML_CIB_TAG_STATE
+ "[@" XML_ATTR_ID "='%s']"
+ "/" XML_TAG_TRANSIENT_NODEATTRS
+ "[@" XML_ATTR_ID "='%s']"
+ "/%s[@" XML_ATTR_ID "='%s']"
+ "/" XML_CIB_TAG_NVPAIR
+ "[@" XML_ATTR_ID "='%s' "
+ "and @" XML_NVPAIR_ATTR_NAME "='%s']",
+ node_id, node_id, attr->set_type, set_id,
+ attr_id, attr->id);
+
+ int rc = the_cib->cmds->remove(the_cib, xpath, NULL,
+ cib_xpath|cib_transaction);
+
+ free(xpath);
+ return pcmk_legacy2rc(rc);
+}
+
+/*!
+ * \internal
+ * \brief Add an attribute update request to the current CIB transaction
+ *
+ * \param[in] attr Attribute to update
+ * \param[in] value New value for attribute
+ * \param[in] node_id ID of node for which to update attribute value
+ *
+ * \return Standard Pacemaker return code
+ */
+static int
+add_attr_update(const attribute_t *attr, const char *value, const char *node_id)
+{
+ char *set_id = NULL;
+ char *attr_id = NULL;
+ int rc = pcmk_rc_ok;
- xml_obj = create_xml_node(xml_obj, XML_CIB_TAG_NVPAIR);
- if (a->uuid) {
- crm_xml_set_id(xml_obj, "%s", a->uuid);
+ if (attr->set_id != NULL) {
+ pcmk__str_update(&set_id, attr->set_id);
} else {
- crm_xml_set_id(xml_obj, "%s-%s", set, a->id);
+ set_id = crm_strdup_printf("%s-%s", XML_CIB_TAG_STATUS, node_id);
}
- crm_xml_add(xml_obj, XML_NVPAIR_ATTR_NAME, a->id);
+ crm_xml_sanitize_id(set_id);
- if(value) {
- crm_xml_add(xml_obj, XML_NVPAIR_ATTR_VALUE, value);
+ if (attr->uuid != NULL) {
+ pcmk__str_update(&attr_id, attr->uuid);
+ } else {
+ attr_id = crm_strdup_printf("%s-%s", set_id, attr->id);
+ }
+ crm_xml_sanitize_id(attr_id);
+ if (value != NULL) {
+ rc = add_set_attr_update(attr, attr_id, node_id, set_id, value);
} else {
- crm_xml_add(xml_obj, XML_NVPAIR_ATTR_VALUE, "");
- crm_xml_add(xml_obj, "__delete__", XML_NVPAIR_ATTR_VALUE);
+ rc = add_unset_attr_update(attr, attr_id, node_id, set_id);
}
+ free(set_id);
+ free(attr_id);
+ return rc;
}
static void
@@ -202,15 +471,22 @@ attrd_add_timer(const char *id, int timeout_ms, attribute_t *attr)
return mainloop_timer_add(id, timeout_ms, FALSE, attribute_timer_cb, attr);
}
-void
-attrd_write_attribute(attribute_t *a, bool ignore_delay)
+/*!
+ * \internal
+ * \brief Write an attribute's values to the CIB if appropriate
+ *
+ * \param[in,out] a Attribute to write
+ * \param[in] ignore_delay If true, write attribute now regardless of any
+ * configured delay
+ */
+static void
+write_attribute(attribute_t *a, bool ignore_delay)
{
int private_updates = 0, cib_updates = 0;
- xmlNode *xml_top = NULL;
attribute_value_t *v = NULL;
GHashTableIter iter;
- enum cib_call_options flags = cib_none;
GHashTable *alert_attribute_value = NULL;
+ int rc = pcmk_ok;
if (a == NULL) {
return;
@@ -218,32 +494,37 @@ attrd_write_attribute(attribute_t *a, bool ignore_delay)
/* If this attribute will be written to the CIB ... */
if (!stand_alone && !a->is_private) {
-
/* Defer the write if now's not a good time */
- CRM_CHECK(the_cib != NULL, return);
if (a->update && (a->update < last_cib_op_done)) {
- crm_info("Write out of '%s' continuing: update %d considered lost", a->id, a->update);
+ crm_info("Write out of '%s' continuing: update %d considered lost",
+ a->id, a->update);
a->update = 0; // Don't log this message again
} else if (a->update) {
- crm_info("Write out of '%s' delayed: update %d in progress", a->id, a->update);
- return;
+ crm_info("Write out of '%s' delayed: update %d in progress",
+ a->id, a->update);
+ goto done;
} else if (mainloop_timer_running(a->timer)) {
if (ignore_delay) {
- /* 'refresh' forces a write of the current value of all attributes
- * Cancel any existing timers, we're writing it NOW
- */
mainloop_timer_stop(a->timer);
- crm_debug("Write out of '%s': timer is running but ignore delay", a->id);
+ crm_debug("Overriding '%s' write delay", a->id);
} else {
- crm_info("Write out of '%s' delayed: timer is running", a->id);
- return;
+ crm_info("Delaying write of '%s'", a->id);
+ goto done;
}
}
- /* Initialize the status update XML */
- xml_top = create_xml_node(NULL, XML_CIB_TAG_STATUS);
+ // Initiate a transaction for all the peer value updates
+ CRM_CHECK(the_cib != NULL, goto done);
+ the_cib->cmds->set_user(the_cib, a->user);
+ rc = the_cib->cmds->init_transaction(the_cib);
+ if (rc != pcmk_ok) {
+ crm_err("Failed to write %s (id %s, set %s): Could not initiate "
+ "CIB transaction",
+ a->id, pcmk__s(a->uuid, "n/a"), pcmk__s(a->set_id, "n/a"));
+ goto done;
+ }
}
/* Attribute will be written shortly, so clear changed flag */
@@ -256,12 +537,14 @@ attrd_write_attribute(attribute_t *a, bool ignore_delay)
a->force_write = FALSE;
/* Make the table for the attribute trap */
- alert_attribute_value = pcmk__strikey_table(NULL, attrd_free_attribute_value);
+ alert_attribute_value = pcmk__strikey_table(NULL,
+ attrd_free_attribute_value);
/* Iterate over each peer value of this attribute */
g_hash_table_iter_init(&iter, a->values);
- while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & v)) {
- crm_node_t *peer = crm_get_peer_full(v->nodeid, v->nodename, CRM_GET_PEER_ANY);
+ while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &v)) {
+ crm_node_t *peer = crm_get_peer_full(v->nodeid, v->nodename,
+ CRM_GET_PEER_ANY);
/* If the value's peer info does not correspond to a peer, ignore it */
if (peer == NULL) {
@@ -291,11 +574,20 @@ attrd_write_attribute(attribute_t *a, bool ignore_delay)
continue;
}
- /* Add this value to status update XML */
- crm_debug("Updating %s[%s]=%s (peer known as %s, UUID %s, ID %u/%u)",
+ // Update this value as part of the CIB transaction we're building
+ rc = add_attr_update(a, v->current, peer->uuid);
+ if (rc != pcmk_rc_ok) {
+ crm_err("Failed to update %s[%s]=%s (peer known as %s, UUID %s, "
+ "ID %" PRIu32 "/%" PRIu32 "): %s",
+ a->id, v->nodename, v->current, peer->uname, peer->uuid,
+ peer->id, v->nodeid, pcmk_rc_str(rc));
+ continue;
+ }
+
+ crm_debug("Updating %s[%s]=%s (peer known as %s, UUID %s, ID "
+ "%" PRIu32 "/%" PRIu32 ")",
a->id, v->nodename, v->current,
peer->uname, peer->uuid, peer->id, v->nodeid);
- build_update_element(xml_top, a, peer->uuid, v->current);
cib_updates++;
/* Preservation of the attribute to transmit alert */
@@ -305,12 +597,6 @@ attrd_write_attribute(attribute_t *a, bool ignore_delay)
v->requested = NULL;
if (v->current) {
v->requested = strdup(v->current);
- } else {
- /* Older attrd versions don't know about the cib_mixed_update
- * flag so make sure it goes to the local cib which does
- */
- cib__set_call_options(flags, crm_system_name,
- cib_mixed_update|cib_scope_local);
}
}
@@ -319,40 +605,55 @@ attrd_write_attribute(attribute_t *a, bool ignore_delay)
private_updates, pcmk__plural_s(private_updates),
a->id, pcmk__s(a->uuid, "n/a"), pcmk__s(a->set_id, "n/a"));
}
- if (cib_updates) {
- crm_log_xml_trace(xml_top, __func__);
+ if (cib_updates > 0) {
+ char *id = NULL;
- a->update = cib_internal_op(the_cib, PCMK__CIB_REQUEST_MODIFY, NULL,
- XML_CIB_TAG_STATUS, xml_top, NULL, flags,
- a->user);
+ // Commit transaction
+ a->update = the_cib->cmds->end_transaction(the_cib, true, cib_none);
crm_info("Sent CIB request %d with %d change%s for %s (id %s, set %s)",
a->update, cib_updates, pcmk__plural_s(cib_updates),
a->id, pcmk__s(a->uuid, "n/a"), pcmk__s(a->set_id, "n/a"));
- the_cib->cmds->register_callback_full(the_cib, a->update,
- CIB_OP_TIMEOUT_S, FALSE,
- strdup(a->id),
- "attrd_cib_callback",
- attrd_cib_callback, free);
- /* Transmit alert of the attribute */
- send_alert_attributes_value(a, alert_attribute_value);
+ pcmk__str_update(&id, a->id);
+ if (the_cib->cmds->register_callback_full(the_cib, a->update,
+ CIB_OP_TIMEOUT_S, FALSE, id,
+ "attrd_cib_callback",
+ attrd_cib_callback, free)) {
+ // Transmit alert of the attribute
+ send_alert_attributes_value(a, alert_attribute_value);
+ }
}
- g_hash_table_destroy(alert_attribute_value);
- free_xml(xml_top);
+done:
+ // Discard transaction (if any)
+ if (the_cib != NULL) {
+ the_cib->cmds->end_transaction(the_cib, false, cib_none);
+ the_cib->cmds->set_user(the_cib, NULL);
+ }
+
+ if (alert_attribute_value != NULL) {
+ g_hash_table_destroy(alert_attribute_value);
+ }
}
+/*!
+ * \internal
+ * \brief Write out attributes
+ *
+ * \param[in] options Group of enum attrd_write_options
+ */
void
-attrd_write_attributes(bool all, bool ignore_delay)
+attrd_write_attributes(uint32_t options)
{
GHashTableIter iter;
attribute_t *a = NULL;
- crm_debug("Writing out %s attributes", all? "all" : "changed");
+ crm_debug("Writing out %s attributes",
+ pcmk_is_set(options, attrd_write_all)? "all" : "changed");
g_hash_table_iter_init(&iter, attributes);
while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & a)) {
- if (!all && a->unknown_peer_uuids) {
+ if (!pcmk_is_set(options, attrd_write_all) && a->unknown_peer_uuids) {
// Try writing this attribute again, in case peer ID was learned
a->changed = true;
} else if (a->force_write) {
@@ -360,9 +661,14 @@ attrd_write_attributes(bool all, bool ignore_delay)
a->changed = true;
}
- if(all || a->changed) {
- /* When forced write flag is set, ignore delay. */
- attrd_write_attribute(a, (a->force_write ? true : ignore_delay));
+ if (pcmk_is_set(options, attrd_write_all) || a->changed) {
+ bool ignore_delay = pcmk_is_set(options, attrd_write_no_delay);
+
+ if (a->force_write) {
+ // Always ignore delay when forced write flag is set
+ ignore_delay = true;
+ }
+ write_attribute(a, ignore_delay);
} else {
crm_trace("Skipping unchanged attribute %s", a->id);
}
@@ -373,7 +679,7 @@ void
attrd_write_or_elect_attribute(attribute_t *a)
{
if (attrd_election_won()) {
- attrd_write_attribute(a, false);
+ write_attribute(a, false);
} else {
attrd_start_election_if_needed();
}
diff --git a/daemons/attrd/attrd_corosync.c b/daemons/attrd/attrd_corosync.c
index ef205e6..86dc67b 100644
--- a/daemons/attrd/attrd_corosync.c
+++ b/daemons/attrd/attrd_corosync.c
@@ -23,8 +23,6 @@
#include "pacemaker-attrd.h"
-extern crm_exit_t attrd_exit_status;
-
static xmlNode *
attrd_confirmation(int callid)
{
@@ -48,7 +46,7 @@ attrd_peer_message(crm_node_t *peer, xmlNode *xml)
return;
}
- if (attrd_shutting_down()) {
+ if (attrd_shutting_down(false)) {
/* If we're shutting down, we want to continue responding to election
* ops as long as we're a cluster member (because our vote may be
* needed). Ignore all other messages.
@@ -133,11 +131,11 @@ attrd_cpg_dispatch(cpg_handle_t handle,
static void
attrd_cpg_destroy(gpointer unused)
{
- if (attrd_shutting_down()) {
- crm_info("Corosync disconnection complete");
+ if (attrd_shutting_down(false)) {
+ crm_info("Disconnected from Corosync process group");
} else {
- crm_crit("Lost connection to cluster layer, shutting down");
+ crm_crit("Lost connection to Corosync process group, shutting down");
attrd_exit_status = CRM_EX_DISCONNECT;
attrd_shutdown(0);
}
@@ -180,7 +178,7 @@ cache_remote_node(const char *node_name)
/* If we previously assumed this node was an unseen cluster node,
* remove its entry from the cluster peer cache.
*/
- crm_node_t *dup = pcmk__search_cluster_node_cache(0, node_name);
+ crm_node_t *dup = pcmk__search_cluster_node_cache(0, node_name, NULL);
if (dup && (dup->uuid == NULL)) {
reap_crm_member(0, node_name);
@@ -285,7 +283,7 @@ record_peer_nodeid(attribute_value_t *v, const char *host)
crm_trace("Learned %s has node id %s", known_peer->uname, known_peer->uuid);
if (attrd_election_won()) {
- attrd_write_attributes(false, false);
+ attrd_write_attributes(attrd_write_changed);
}
}
@@ -476,9 +474,7 @@ attrd_peer_clear_failure(pcmk__request_t *request)
crm_xml_add(xml, PCMK__XA_TASK, PCMK__ATTRD_CMD_UPDATE);
/* Make sure value is not set, so we delete */
- if (crm_element_value(xml, PCMK__XA_ATTR_VALUE)) {
- crm_xml_replace(xml, PCMK__XA_ATTR_VALUE, NULL);
- }
+ xml_remove_prop(xml, PCMK__XA_ATTR_VALUE);
g_hash_table_iter_init(&iter, attributes);
while (g_hash_table_iter_next(&iter, (gpointer *) &attr, NULL)) {
@@ -591,7 +587,8 @@ attrd_peer_update(const crm_node_t *peer, xmlNode *xml, const char *host,
{
bool handle_sync_point = false;
- if (xml_has_children(xml)) {
+ CRM_CHECK((peer != NULL) && (xml != NULL), return);
+ if (xml->children != NULL) {
for (xmlNode *child = first_named_child(xml, XML_ATTR_OP); child != NULL;
child = crm_next_same_xml(child)) {
attrd_copy_xml_attributes(xml, child);
diff --git a/daemons/attrd/attrd_elections.c b/daemons/attrd/attrd_elections.c
index 3b6b55a..82fbe8a 100644
--- a/daemons/attrd/attrd_elections.c
+++ b/daemons/attrd/attrd_elections.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2013-2022 the Pacemaker project contributors
+ * Copyright 2013-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -25,9 +25,11 @@ attrd_election_cb(gpointer user_data)
/* Update the peers after an election */
attrd_peer_sync(NULL, NULL);
- /* Update the CIB after an election */
- attrd_write_attributes(true, false);
- return FALSE;
+ /* After winning an election, update the CIB with the values of all
+ * attributes as the winner knows them.
+ */
+ attrd_write_attributes(attrd_write_all);
+ return G_SOURCE_REMOVE;
}
void
@@ -48,7 +50,7 @@ attrd_start_election_if_needed(void)
{
if ((peer_writer == NULL)
&& (election_state(writer) != election_in_progress)
- && !attrd_shutting_down()) {
+ && !attrd_shutting_down(false)) {
crm_info("Starting an election to determine the writer");
election_vote(writer);
@@ -70,7 +72,7 @@ attrd_handle_election_op(const crm_node_t *peer, xmlNode *xml)
crm_xml_add(xml, F_CRM_HOST_FROM, peer->uname);
// Don't become writer if we're shutting down
- rc = election_count_vote(writer, xml, !attrd_shutting_down());
+ rc = election_count_vote(writer, xml, !attrd_shutting_down(false));
switch(rc) {
case election_start:
diff --git a/daemons/attrd/attrd_ipc.c b/daemons/attrd/attrd_ipc.c
index 9d3dfff..05c4a69 100644
--- a/daemons/attrd/attrd_ipc.c
+++ b/daemons/attrd/attrd_ipc.c
@@ -140,12 +140,8 @@ attrd_client_clear_failure(pcmk__request_t *request)
}
/* Make sure attribute and value are not set, so we delete via regex */
- if (crm_element_value(xml, PCMK__XA_ATTR_NAME)) {
- crm_xml_replace(xml, PCMK__XA_ATTR_NAME, NULL);
- }
- if (crm_element_value(xml, PCMK__XA_ATTR_VALUE)) {
- crm_xml_replace(xml, PCMK__XA_ATTR_VALUE, NULL);
- }
+ xml_remove_prop(xml, PCMK__XA_ATTR_NAME);
+ xml_remove_prop(xml, PCMK__XA_ATTR_VALUE);
return attrd_client_update(request);
}
@@ -166,7 +162,8 @@ attrd_client_peer_remove(pcmk__request_t *request)
crm_element_value_int(xml, PCMK__XA_ATTR_NODE_ID, &nodeid);
if (nodeid > 0) {
- crm_node_t *node = pcmk__search_cluster_node_cache(nodeid, NULL);
+ crm_node_t *node = pcmk__search_cluster_node_cache(nodeid, NULL,
+ NULL);
char *host_alloc = NULL;
if (node && node->uname) {
@@ -235,7 +232,7 @@ attrd_client_refresh(pcmk__request_t *request)
crm_info("Updating all attributes");
attrd_send_ack(request->ipc_client, request->ipc_id, request->ipc_flags);
- attrd_write_attributes(true, true);
+ attrd_write_attributes(attrd_write_all|attrd_write_no_delay);
pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL);
return NULL;
@@ -282,7 +279,7 @@ expand_regexes(xmlNode *xml, const char *attr, const char *value, const char *re
* regex and replace it with the name.
*/
attrd_copy_xml_attributes(xml, child);
- crm_xml_replace(child, PCMK__XA_ATTR_PATTERN, NULL);
+ xml_remove_prop(child, PCMK__XA_ATTR_PATTERN);
crm_xml_add(child, PCMK__XA_ATTR_NAME, attr);
}
}
@@ -401,14 +398,18 @@ send_child_update(xmlNode *child, void *data)
xmlNode *
attrd_client_update(pcmk__request_t *request)
{
- xmlNode *xml = request->xml;
+ xmlNode *xml = NULL;
const char *attr, *value, *regex;
+ CRM_CHECK((request != NULL) && (request->xml != NULL), return NULL);
+
+ xml = request->xml;
+
/* If the message has children, that means it is a message from a newer
* client that supports sending multiple operations at a time. There are
* two ways we can handle that.
*/
- if (xml_has_children(xml)) {
+ if (xml->children != NULL) {
if (ATTRD_SUPPORTS_MULTI_MESSAGE(minimum_protocol_version)) {
/* First, if all peers support a certain protocol version, we can
* just broadcast the big message and they'll handle it. However,
@@ -494,7 +495,7 @@ static int32_t
attrd_ipc_accept(qb_ipcs_connection_t *c, uid_t uid, gid_t gid)
{
crm_trace("New client connection %p", c);
- if (attrd_shutting_down()) {
+ if (attrd_shutting_down(false)) {
crm_info("Ignoring new connection from pid %d during shutdown",
pcmk__client_pid(c));
return -EPERM;
diff --git a/daemons/attrd/attrd_messages.c b/daemons/attrd/attrd_messages.c
index 184176a..89da6d8 100644
--- a/daemons/attrd/attrd_messages.c
+++ b/daemons/attrd/attrd_messages.c
@@ -20,6 +20,36 @@ int minimum_protocol_version = -1;
static GHashTable *attrd_handlers = NULL;
+static bool
+is_sync_point_attr(xmlAttrPtr attr, void *data)
+{
+ return pcmk__str_eq((const char *) attr->name, PCMK__XA_ATTR_SYNC_POINT, pcmk__str_none);
+}
+
+static int
+remove_sync_point_attribute(xmlNode *xml, void *data)
+{
+ pcmk__xe_remove_matching_attrs(xml, is_sync_point_attr, NULL);
+ pcmk__xe_foreach_child(xml, XML_ATTR_OP, remove_sync_point_attribute, NULL);
+ return pcmk_rc_ok;
+}
+
+/* Sync points on a multi-update IPC message to an attrd too old to support
+ * multi-update messages won't work. Strip the sync point attribute off here
+ * so we don't pretend to support this situation and instead ACK the client
+ * immediately.
+ */
+static void
+remove_unsupported_sync_points(pcmk__request_t *request)
+{
+ if (request->xml->children != NULL && !ATTRD_SUPPORTS_MULTI_MESSAGE(minimum_protocol_version) &&
+ attrd_request_has_sync_point(request->xml)) {
+ crm_warn("Ignoring sync point in request from %s because not all nodes support it",
+ pcmk__request_origin(request));
+ remove_sync_point_attribute(request->xml, NULL);
+ }
+}
+
static xmlNode *
handle_unknown_request(pcmk__request_t *request)
{
@@ -42,6 +72,8 @@ handle_clear_failure_request(pcmk__request_t *request)
pcmk__set_result(&request->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL);
return NULL;
} else {
+ remove_unsupported_sync_points(request);
+
if (attrd_request_has_sync_point(request->xml)) {
/* If this client supplied a sync point it wants to wait for, add it to
* the wait list. Clients on this list will not receive an ACK until
@@ -180,6 +212,8 @@ handle_update_request(pcmk__request_t *request)
return NULL;
} else {
+ remove_unsupported_sync_points(request);
+
if (attrd_request_has_sync_point(request->xml)) {
/* If this client supplied a sync point it wants to wait for, add it to
* the wait list. Clients on this list will not receive an ACK until
diff --git a/daemons/attrd/attrd_sync.c b/daemons/attrd/attrd_sync.c
index d59ddd5..1a6c24c 100644
--- a/daemons/attrd/attrd_sync.c
+++ b/daemons/attrd/attrd_sync.c
@@ -313,7 +313,9 @@ attrd_cluster_sync_point_update(xmlNode *xml)
const char *
attrd_request_sync_point(xmlNode *xml)
{
- if (xml_has_children(xml)) {
+ CRM_CHECK(xml != NULL, return NULL);
+
+ if (xml->children != NULL) {
xmlNode *child = pcmk__xe_match(xml, XML_ATTR_OP, PCMK__XA_ATTR_SYNC_POINT, NULL);
if (child) {
diff --git a/daemons/attrd/attrd_utils.c b/daemons/attrd/attrd_utils.c
index 7de8dd9..341ee1a 100644
--- a/daemons/attrd/attrd_utils.c
+++ b/daemons/attrd/attrd_utils.c
@@ -56,26 +56,22 @@ attrd_clear_requesting_shutdown(void)
/*!
* \internal
- * \brief Check whether we're currently requesting shutdown
+ * \brief Check whether local attribute manager is shutting down
*
- * \return true if requesting shutdown, false otherwise
- */
-bool
-attrd_requesting_shutdown(void)
-{
- return requesting_shutdown;
-}
-
-/*!
- * \internal
- * \brief Check whether we're currently shutting down
+ * \param[in] if_requested Also consider presence of "shutdown" attribute
*
- * \return true if shutting down, false otherwise
+ * \return \c true if local attribute manager has begun shutdown sequence
+ * or (if \p if_requested is \c true) whether local node has a nonzero
+ * "shutdown" attribute set, otherwise \c false
+ * \note Most callers should pass \c false for \p if_requested, because the
+ * attribute manager needs to continue performing while the controller is
+ * shutting down, and even needs to be eligible for election in case all
+ * nodes are shutting down.
*/
bool
-attrd_shutting_down(void)
+attrd_shutting_down(bool if_requested)
{
- return shutting_down;
+ return shutting_down || (if_requested && requesting_shutdown);
}
/*!
@@ -137,39 +133,6 @@ attrd_run_mainloop(void)
g_main_loop_run(mloop);
}
-void
-attrd_cib_disconnect(void)
-{
- CRM_CHECK(the_cib != NULL, return);
- the_cib->cmds->del_notify_callback(the_cib, T_CIB_REPLACE_NOTIFY, attrd_cib_replaced_cb);
- the_cib->cmds->del_notify_callback(the_cib, T_CIB_DIFF_NOTIFY, attrd_cib_updated_cb);
- cib__clean_up_connection(&the_cib);
-}
-
-void
-attrd_cib_replaced_cb(const char *event, xmlNode * msg)
-{
- int change_section = cib_change_section_nodes | cib_change_section_status | cib_change_section_alerts;
-
- if (attrd_requesting_shutdown() || attrd_shutting_down()) {
- return;
- }
-
- crm_element_value_int(msg, F_CIB_CHANGE_SECTION, &change_section);
-
- if (attrd_election_won()) {
- if (change_section & (cib_change_section_nodes | cib_change_section_status)) {
- crm_notice("Updating all attributes after %s event", event);
- attrd_write_attributes(true, false);
- }
- }
-
- if (change_section & cib_change_section_alerts) {
- // Check for changes in alerts
- mainloop_set_trigger(attrd_config_read);
- }
-}
-
/* strlen("value") */
#define plus_plus_len (5)
diff --git a/daemons/attrd/pacemaker-attrd.c b/daemons/attrd/pacemaker-attrd.c
index 037825b..8091c5b 100644
--- a/daemons/attrd/pacemaker-attrd.c
+++ b/daemons/attrd/pacemaker-attrd.c
@@ -63,140 +63,6 @@ crm_cluster_t *attrd_cluster = NULL;
crm_trigger_t *attrd_config_read = NULL;
crm_exit_t attrd_exit_status = CRM_EX_OK;
-static void
-attrd_cib_destroy_cb(gpointer user_data)
-{
- cib_t *conn = user_data;
-
- conn->cmds->signoff(conn); /* Ensure IPC is cleaned up */
-
- if (attrd_shutting_down()) {
- crm_info("Connection disconnection complete");
-
- } else {
- /* eventually this should trigger a reconnect, not a shutdown */
- crm_crit("Lost connection to the CIB manager, shutting down");
- attrd_exit_status = CRM_EX_DISCONNECT;
- attrd_shutdown(0);
- }
-
- return;
-}
-
-static void
-attrd_erase_cb(xmlNode *msg, int call_id, int rc, xmlNode *output,
- void *user_data)
-{
- do_crm_log_unlikely((rc? LOG_NOTICE : LOG_DEBUG),
- "Cleared transient attributes: %s "
- CRM_XS " xpath=%s rc=%d",
- pcmk_strerror(rc), (char *) user_data, rc);
-}
-
-#define XPATH_TRANSIENT "//node_state[@uname='%s']/" XML_TAG_TRANSIENT_NODEATTRS
-
-/*!
- * \internal
- * \brief Wipe all transient attributes for this node from the CIB
- *
- * Clear any previous transient node attributes from the CIB. This is
- * normally done by the DC's controller when this node leaves the cluster, but
- * this handles the case where the node restarted so quickly that the
- * cluster layer didn't notice.
- *
- * \todo If pacemaker-attrd respawns after crashing (see PCMK_respawned),
- * ideally we'd skip this and sync our attributes from the writer.
- * However, currently we reject any values for us that the writer has, in
- * attrd_peer_update().
- */
-static void
-attrd_erase_attrs(void)
-{
- int call_id;
- char *xpath = crm_strdup_printf(XPATH_TRANSIENT, attrd_cluster->uname);
-
- crm_info("Clearing transient attributes from CIB " CRM_XS " xpath=%s",
- xpath);
-
- call_id = the_cib->cmds->remove(the_cib, xpath, NULL, cib_xpath);
- the_cib->cmds->register_callback_full(the_cib, call_id, 120, FALSE, xpath,
- "attrd_erase_cb", attrd_erase_cb,
- free);
-}
-
-static int
-attrd_cib_connect(int max_retry)
-{
- static int attempts = 0;
-
- int rc = -ENOTCONN;
-
- the_cib = cib_new();
- if (the_cib == NULL) {
- return -ENOTCONN;
- }
-
- do {
- if(attempts > 0) {
- sleep(attempts);
- }
-
- attempts++;
- crm_debug("Connection attempt %d to the CIB manager", attempts);
- rc = the_cib->cmds->signon(the_cib, T_ATTRD, cib_command);
-
- } while(rc != pcmk_ok && attempts < max_retry);
-
- if (rc != pcmk_ok) {
- crm_err("Connection to the CIB manager failed: %s " CRM_XS " rc=%d",
- pcmk_strerror(rc), rc);
- goto cleanup;
- }
-
- crm_debug("Connected to the CIB manager after %d attempts", attempts);
-
- rc = the_cib->cmds->set_connection_dnotify(the_cib, attrd_cib_destroy_cb);
- if (rc != pcmk_ok) {
- crm_err("Could not set disconnection callback");
- goto cleanup;
- }
-
- rc = the_cib->cmds->add_notify_callback(the_cib, T_CIB_REPLACE_NOTIFY, attrd_cib_replaced_cb);
- if(rc != pcmk_ok) {
- crm_err("Could not set CIB notification callback");
- goto cleanup;
- }
-
- rc = the_cib->cmds->add_notify_callback(the_cib, T_CIB_DIFF_NOTIFY, attrd_cib_updated_cb);
- if (rc != pcmk_ok) {
- crm_err("Could not set CIB notification callback (update)");
- goto cleanup;
- }
-
- return pcmk_ok;
-
- cleanup:
- cib__clean_up_connection(&the_cib);
- return -ENOTCONN;
-}
-
-/*!
- * \internal
- * \brief Prepare the CIB after cluster is connected
- */
-static void
-attrd_cib_init(void)
-{
- // We have no attribute values in memory, wipe the CIB to match
- attrd_erase_attrs();
-
- // Set a trigger for reading the CIB (for the alerts section)
- attrd_config_read = mainloop_add_trigger(G_PRIORITY_HIGH, attrd_read_options, NULL);
-
- // Always read the CIB at start-up
- mainloop_set_trigger(attrd_config_read);
-}
-
static bool
ipc_already_running(void)
{
@@ -208,8 +74,10 @@ ipc_already_running(void)
return false;
}
- rc = pcmk_connect_ipc(old_instance, pcmk_ipc_dispatch_sync);
+ rc = pcmk__connect_ipc(old_instance, pcmk_ipc_dispatch_sync, 2);
if (rc != pcmk_rc_ok) {
+ crm_debug("No existing %s manager instance found: %s",
+ pcmk_ipc_name(old_instance, true), pcmk_rc_str(rc));
pcmk_free_ipc_api(old_instance);
return false;
}
@@ -277,7 +145,7 @@ main(int argc, char **argv)
attrd_exit_status = CRM_EX_OK;
g_set_error(&error, PCMK__EXITC_ERROR, attrd_exit_status, "%s", msg);
- crm_err(msg);
+ crm_err("%s", msg);
goto done;
}
diff --git a/daemons/attrd/pacemaker-attrd.h b/daemons/attrd/pacemaker-attrd.h
index 329fb5a..b8929a7 100644
--- a/daemons/attrd/pacemaker-attrd.h
+++ b/daemons/attrd/pacemaker-attrd.h
@@ -57,13 +57,14 @@ void attrd_run_mainloop(void);
void attrd_set_requesting_shutdown(void);
void attrd_clear_requesting_shutdown(void);
void attrd_free_waitlist(void);
-bool attrd_requesting_shutdown(void);
-bool attrd_shutting_down(void);
+bool attrd_shutting_down(bool if_requested);
void attrd_shutdown(int nsig);
void attrd_init_ipc(void);
void attrd_ipc_fini(void);
+int attrd_cib_connect(int max_retry);
void attrd_cib_disconnect(void);
+void attrd_cib_init(void);
bool attrd_value_needs_expansion(const char *value);
int attrd_expand_value(const char *value, const char *old_value);
@@ -92,6 +93,7 @@ int attrd_failure_regex(regex_t *regex, const char *rsc, const char *op,
guint interval_ms);
extern cib_t *the_cib;
+extern crm_exit_t attrd_exit_status;
/* Alerts */
@@ -100,8 +102,6 @@ extern crm_trigger_t *attrd_config_read;
void attrd_lrmd_disconnect(void);
gboolean attrd_read_options(gpointer user_data);
-void attrd_cib_replaced_cb(const char *event, xmlNode * msg);
-void attrd_cib_updated_cb(const char *event, xmlNode *msg);
int attrd_send_attribute_alert(const char *node, int nodeid,
const char *attr, const char *value);
@@ -177,8 +177,13 @@ void attrd_free_attribute(gpointer data);
void attrd_free_attribute_value(gpointer data);
attribute_t *attrd_populate_attribute(xmlNode *xml, const char *attr);
-void attrd_write_attribute(attribute_t *a, bool ignore_delay);
-void attrd_write_attributes(bool all, bool ignore_delay);
+enum attrd_write_options {
+ attrd_write_changed = 0,
+ attrd_write_all = (1 << 0),
+ attrd_write_no_delay = (1 << 1),
+};
+
+void attrd_write_attributes(uint32_t options);
void attrd_write_or_elect_attribute(attribute_t *a);
extern int minimum_protocol_version;
diff --git a/daemons/based/Makefile.am b/daemons/based/Makefile.am
index 053d93c..022fc47 100644
--- a/daemons/based/Makefile.am
+++ b/daemons/based/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2004-2021 the Pacemaker project contributors
+# Copyright 2004-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -13,35 +13,37 @@ EXTRA_DIST = cib.pam
halibdir = $(CRM_DAEMON_DIR)
-COMMONLIBS = $(top_builddir)/lib/common/libcrmcommon.la \
- $(top_builddir)/lib/cib/libcib.la
-
halib_PROGRAMS = pacemaker-based
-noinst_HEADERS = pacemaker-based.h
+noinst_HEADERS = based_transaction.h \
+ pacemaker-based.h
pacemaker_based_CFLAGS = $(CFLAGS_HARDENED_EXE)
pacemaker_based_LDFLAGS = $(LDFLAGS_HARDENED_EXE)
-pacemaker_based_LDADD = $(top_builddir)/lib/cluster/libcrmcluster.la \
- $(COMMONLIBS) $(CLUSTERLIBS)
-
-pacemaker_based_SOURCES = pacemaker-based.c \
- based_callbacks.c \
- based_common.c \
- based_io.c \
- based_messages.c \
- based_notify.c \
- based_remote.c
-
-clean-generic:
- rm -f *.log *.debug *.xml *~
-
-if BUILD_LEGACY_LINKS
+pacemaker_based_LDADD = $(top_builddir)/lib/cluster/libcrmcluster.la
+pacemaker_based_LDADD += $(top_builddir)/lib/cib/libcib.la
+pacemaker_based_LDADD += $(top_builddir)/lib/common/libcrmcommon.la
+pacemaker_based_LDADD += $(CLUSTERLIBS)
+
+pacemaker_based_SOURCES = pacemaker-based.c \
+ based_callbacks.c \
+ based_io.c \
+ based_messages.c \
+ based_notify.c \
+ based_operation.c \
+ based_remote.c \
+ based_transaction.c
+
+.PHONY: install-exec-hook
install-exec-hook:
+if BUILD_LEGACY_LINKS
$(MKDIR_P) -- $(DESTDIR)$(CRM_DAEMON_DIR)
cd $(DESTDIR)$(CRM_DAEMON_DIR) && rm -f cib && $(LN_S) pacemaker-based cib
+endif
+.PHONY: uninstall-hook
uninstall-hook:
+if BUILD_LEGACY_LINKS
cd $(DESTDIR)$(CRM_DAEMON_DIR) && rm -f cib
endif
diff --git a/daemons/based/based_callbacks.c b/daemons/based/based_callbacks.c
index 3726caa..4fac222 100644
--- a/daemons/based/based_callbacks.c
+++ b/daemons/based/based_callbacks.c
@@ -20,6 +20,9 @@
#include <fcntl.h>
#include <inttypes.h> // PRIu64
+#include <glib.h>
+#include <libxml/tree.h>
+
#include <crm/crm.h>
#include <crm/cib.h>
#include <crm/msg_xml.h>
@@ -31,7 +34,6 @@
#include <pacemaker-based.h>
#define EXIT_ESCALATION_MS 10000
-#define OUR_NODENAME (stand_alone? "localhost" : crm_cluster->uname)
static unsigned long cib_local_bcast_num = 0;
@@ -50,11 +52,10 @@ qb_ipcs_service_t *ipcs_ro = NULL;
qb_ipcs_service_t *ipcs_rw = NULL;
qb_ipcs_service_t *ipcs_shm = NULL;
-static void cib_process_request(xmlNode *request, gboolean privileged,
- const pcmk__client_t *cib_client);
-
-static int cib_process_command(xmlNode *request, xmlNode **reply,
- xmlNode **cib_diff, gboolean privileged);
+static int cib_process_command(xmlNode *request,
+ const cib__operation_t *operation,
+ cib__op_fn_t op_function, xmlNode **reply,
+ xmlNode **cib_diff, bool privileged);
static gboolean cib_common_callback(qb_ipcs_connection_t *c, void *data,
size_t size, gboolean privileged);
@@ -138,11 +139,130 @@ struct qb_ipcs_service_handlers ipc_rw_callbacks = {
.connection_destroyed = cib_ipc_destroy
};
+/*!
+ * \internal
+ * \brief Create reply XML for a CIB request
+ *
+ * \param[in] op CIB operation type
+ * \param[in] call_id CIB call ID
+ * \param[in] client_id CIB client ID
+ * \param[in] call_options Group of <tt>enum cib_call_options</tt> flags
+ * \param[in] rc Request return code
+ * \param[in] call_data Request output data
+ *
+ * \return Reply XML
+ *
+ * \note The caller is responsible for freeing the return value using
+ * \p free_xml().
+ */
+static xmlNode *
+create_cib_reply(const char *op, const char *call_id, const char *client_id,
+ int call_options, int rc, xmlNode *call_data)
+{
+ xmlNode *reply = create_xml_node(NULL, "cib-reply");
+
+ CRM_ASSERT(reply != NULL);
+
+ crm_xml_add(reply, F_TYPE, T_CIB);
+ crm_xml_add(reply, F_CIB_OPERATION, op);
+ crm_xml_add(reply, F_CIB_CALLID, call_id);
+ crm_xml_add(reply, F_CIB_CLIENTID, client_id);
+ crm_xml_add_int(reply, F_CIB_CALLOPTS, call_options);
+ crm_xml_add_int(reply, F_CIB_RC, rc);
+
+ if (call_data != NULL) {
+ crm_trace("Attaching reply output");
+ add_message_xml(reply, F_CIB_CALLDATA, call_data);
+ }
+
+ crm_log_xml_explicit(reply, "cib:reply");
+ return reply;
+}
+
+static void
+do_local_notify(const xmlNode *notify_src, const char *client_id,
+ bool sync_reply, bool from_peer)
+{
+ int rid = 0;
+ int call_id = 0;
+ pcmk__client_t *client_obj = NULL;
+
+ CRM_ASSERT(notify_src && client_id);
+
+ crm_element_value_int(notify_src, F_CIB_CALLID, &call_id);
+
+ client_obj = pcmk__find_client_by_id(client_id);
+ if (client_obj == NULL) {
+ crm_debug("Could not send response %d: client %s not found",
+ call_id, client_id);
+ return;
+ }
+
+ if (sync_reply) {
+ if (client_obj->ipcs) {
+ CRM_LOG_ASSERT(client_obj->request_id);
+
+ rid = client_obj->request_id;
+ client_obj->request_id = 0;
+
+ crm_trace("Sending response %d to client %s%s",
+ rid, pcmk__client_name(client_obj),
+ (from_peer? " (originator of delegated request)" : ""));
+ } else {
+ crm_trace("Sending response (call %d) to client %s%s",
+ call_id, pcmk__client_name(client_obj),
+ (from_peer? " (originator of delegated request)" : ""));
+ }
+
+ } else {
+ crm_trace("Sending event %d to client %s%s",
+ call_id, pcmk__client_name(client_obj),
+ (from_peer? " (originator of delegated request)" : ""));
+ }
+
+ switch (PCMK__CLIENT_TYPE(client_obj)) {
+ case pcmk__client_ipc:
+ {
+ int rc = pcmk__ipc_send_xml(client_obj, rid, notify_src,
+ (sync_reply? crm_ipc_flags_none
+ : crm_ipc_server_event));
+
+ if (rc != pcmk_rc_ok) {
+ crm_warn("%s reply to client %s failed: %s " CRM_XS " rc=%d",
+ (sync_reply? "Synchronous" : "Asynchronous"),
+ pcmk__client_name(client_obj), pcmk_rc_str(rc),
+ rc);
+ }
+ }
+ break;
+#ifdef HAVE_GNUTLS_GNUTLS_H
+ case pcmk__client_tls:
+#endif
+ case pcmk__client_tcp:
+ pcmk__remote_send_xml(client_obj->remote, notify_src);
+ break;
+ default:
+ crm_err("Unknown transport for client %s "
+ CRM_XS " flags=%#016" PRIx64,
+ pcmk__client_name(client_obj), client_obj->flags);
+ }
+}
+
void
cib_common_callback_worker(uint32_t id, uint32_t flags, xmlNode * op_request,
pcmk__client_t *cib_client, gboolean privileged)
{
const char *op = crm_element_value(op_request, F_CIB_OPERATION);
+ int call_options = cib_none;
+
+ crm_element_value_int(op_request, F_CIB_CALLOPTS, &call_options);
+
+ /* Requests with cib_transaction set should not be sent to based directly
+ * (outside of a commit-transaction request)
+ */
+ if (pcmk_is_set(call_options, cib_transaction)) {
+ return;
+ }
if (pcmk__str_eq(op, CRM_OP_REGISTER, pcmk__str_none)) {
if (flags & crm_ipc_client_response) {
@@ -180,9 +300,6 @@ cib_common_callback_worker(uint32_t id, uint32_t flags, xmlNode * op_request,
} else if (pcmk__str_eq(type, T_CIB_DIFF_NOTIFY, pcmk__str_casei)) {
bit = cib_notify_diff;
- } else if (pcmk__str_eq(type, T_CIB_REPLACE_NOTIFY, pcmk__str_casei)) {
- bit = cib_notify_replace;
-
} else {
status = CRM_EX_INVALID_PARAM;
}
@@ -354,9 +471,7 @@ process_ping_reply(xmlNode *reply)
if(remote_cib && remote_cib->children) {
// Additional debug
xml_calculate_changes(the_cib, remote_cib);
-
- pcmk__output_set_log_level(logger_out, LOG_INFO);
- pcmk__xml_show_changes(logger_out, remote_cib);
+ pcmk__log_xml_changes(LOG_INFO, remote_cib);
crm_trace("End of differences");
}
@@ -367,75 +482,6 @@ process_ping_reply(xmlNode *reply)
}
static void
-do_local_notify(xmlNode * notify_src, const char *client_id,
- gboolean sync_reply, gboolean from_peer)
-{
- int rid = 0;
- int call_id = 0;
- pcmk__client_t *client_obj = NULL;
-
- CRM_ASSERT(notify_src && client_id);
-
- crm_element_value_int(notify_src, F_CIB_CALLID, &call_id);
-
- client_obj = pcmk__find_client_by_id(client_id);
- if (client_obj == NULL) {
- crm_debug("Could not send response %d: client %s not found",
- call_id, client_id);
- return;
- }
-
- if (sync_reply) {
- if (client_obj->ipcs) {
- CRM_LOG_ASSERT(client_obj->request_id);
-
- rid = client_obj->request_id;
- client_obj->request_id = 0;
-
- crm_trace("Sending response %d to client %s%s",
- rid, pcmk__client_name(client_obj),
- (from_peer? " (originator of delegated request)" : ""));
- } else {
- crm_trace("Sending response (call %d) to client %s%s",
- call_id, pcmk__client_name(client_obj),
- (from_peer? " (originator of delegated request)" : ""));
- }
-
- } else {
- crm_trace("Sending event %d to client %s%s",
- call_id, pcmk__client_name(client_obj),
- (from_peer? " (originator of delegated request)" : ""));
- }
-
- switch (PCMK__CLIENT_TYPE(client_obj)) {
- case pcmk__client_ipc:
- {
- int rc = pcmk__ipc_send_xml(client_obj, rid, notify_src,
- (sync_reply? crm_ipc_flags_none
- : crm_ipc_server_event));
-
- if (rc != pcmk_rc_ok) {
- crm_warn("%s reply to client %s failed: %s " CRM_XS " rc=%d",
- (sync_reply? "Synchronous" : "Asynchronous"),
- pcmk__client_name(client_obj), pcmk_rc_str(rc),
- rc);
- }
- }
- break;
-#ifdef HAVE_GNUTLS_GNUTLS_H
- case pcmk__client_tls:
-#endif
- case pcmk__client_tcp:
- pcmk__remote_send_xml(client_obj->remote, notify_src);
- break;
- default:
- crm_err("Unknown transport for client %s "
- CRM_XS " flags=%#016" PRIx64,
- pcmk__client_name(client_obj), client_obj->flags);
- }
-}
-
-static void
local_notify_destroy_callback(gpointer data)
{
cib_local_notify_t *notify = data;
@@ -448,7 +494,7 @@ local_notify_destroy_callback(gpointer data)
static void
check_local_notify(int bcast_id)
{
- cib_local_notify_t *notify = NULL;
+ const cib_local_notify_t *notify = NULL;
if (!local_notify_queue) {
return;
@@ -483,13 +529,14 @@ queue_local_notify(xmlNode * notify_src, const char *client_id, gboolean sync_re
}
static void
-parse_local_options_v1(const pcmk__client_t *cib_client, int call_type,
- int call_options, const char *host, const char *op,
- gboolean *local_notify, gboolean *needs_reply,
- gboolean *process, gboolean *needs_forward)
+parse_local_options_v1(const pcmk__client_t *cib_client,
+ const cib__operation_t *operation, int call_options,
+ const char *host, const char *op, gboolean *local_notify,
+ gboolean *needs_reply, gboolean *process,
+ gboolean *needs_forward)
{
- if (cib_op_modifies(call_type)
- && !(call_options & cib_inhibit_bcast)) {
+ if (pcmk_is_set(operation->flags, cib__op_attr_modifies)
+ && !pcmk_is_set(call_options, cib_inhibit_bcast)) {
/* we need to send an update anyway */
*needs_reply = TRUE;
} else {
@@ -526,78 +573,87 @@ parse_local_options_v1(const pcmk__client_t *cib_client, int call_type,
}
static void
-parse_local_options_v2(const pcmk__client_t *cib_client, int call_type,
- int call_options, const char *host, const char *op,
- gboolean *local_notify, gboolean *needs_reply,
- gboolean *process, gboolean *needs_forward)
+parse_local_options_v2(const pcmk__client_t *cib_client,
+ const cib__operation_t *operation, int call_options,
+ const char *host, const char *op, gboolean *local_notify,
+ gboolean *needs_reply, gboolean *process,
+ gboolean *needs_forward)
{
- if (cib_op_modifies(call_type)) {
- if (pcmk__str_any_of(op, PCMK__CIB_REQUEST_PRIMARY,
- PCMK__CIB_REQUEST_SECONDARY, NULL)) {
- /* Always handle these locally */
- *process = TRUE;
- *needs_reply = FALSE;
- *local_notify = TRUE;
- *needs_forward = FALSE;
- return;
-
- } else {
- /* Redirect all other updates via CPG */
- *needs_reply = TRUE;
- *needs_forward = TRUE;
- *process = FALSE;
- crm_trace("%s op from %s needs to be forwarded to client %s",
- op, pcmk__client_name(cib_client),
- pcmk__s(host, "the primary instance"));
- return;
- }
- }
-
-
+ // Process locally and notify local client
*process = TRUE;
*needs_reply = FALSE;
*local_notify = TRUE;
*needs_forward = FALSE;
- if (stand_alone) {
- crm_trace("Processing %s op from client %s (stand-alone)",
+ if (pcmk_is_set(operation->flags, cib__op_attr_local)) {
+ /* Always process locally if cib__op_attr_local is set.
+ *
+ * @COMPAT: Currently host is ignored. At a compatibility break, throw
+ * an error (from cib_process_request() or earlier) if host is not NULL or
+ * OUR_NODENAME.
+ */
+ crm_trace("Processing always-local %s op from client %s",
op, pcmk__client_name(cib_client));
- } else if (host == NULL) {
- crm_trace("Processing unaddressed %s op from client %s",
- op, pcmk__client_name(cib_client));
+ if (!pcmk__str_eq(host, OUR_NODENAME,
+ pcmk__str_casei|pcmk__str_null_matches)) {
- } else if (pcmk__str_eq(host, OUR_NODENAME, pcmk__str_casei)) {
- crm_trace("Processing locally addressed %s op from client %s",
+ crm_warn("Operation '%s' is always local but its target host is "
+ "set to '%s'",
+ op, host);
+ }
+ return;
+ }
+
+ if (pcmk_is_set(operation->flags, cib__op_attr_modifies)
+ || !pcmk__str_eq(host, OUR_NODENAME,
+ pcmk__str_casei|pcmk__str_null_matches)) {
+
+ // Forward modifying and non-local requests via cluster
+ *process = FALSE;
+ *needs_reply = FALSE;
+ *local_notify = FALSE;
+ *needs_forward = TRUE;
+
+ crm_trace("%s op from %s needs to be forwarded to %s",
+ op, pcmk__client_name(cib_client),
+ pcmk__s(host, "all nodes"));
+ return;
+ }
+
+ if (stand_alone) {
+ crm_trace("Processing %s op from client %s (stand-alone)",
op, pcmk__client_name(cib_client));
} else {
- crm_trace("%s op from %s needs to be forwarded to client %s",
- op, pcmk__client_name(cib_client), host);
- *needs_forward = TRUE;
- *process = FALSE;
+ crm_trace("Processing %saddressed %s op from client %s",
+ ((host != NULL)? "locally " : "un"),
+ op, pcmk__client_name(cib_client));
}
}
static void
-parse_local_options(const pcmk__client_t *cib_client, int call_type,
- int call_options, const char *host, const char *op,
- gboolean *local_notify, gboolean *needs_reply,
- gboolean *process, gboolean *needs_forward)
+parse_local_options(const pcmk__client_t *cib_client,
+ const cib__operation_t *operation, int call_options,
+ const char *host, const char *op, gboolean *local_notify,
+ gboolean *needs_reply, gboolean *process,
+ gboolean *needs_forward)
{
if(cib_legacy_mode()) {
- parse_local_options_v1(cib_client, call_type, call_options, host,
- op, local_notify, needs_reply, process, needs_forward);
+ parse_local_options_v1(cib_client, operation, call_options, host,
+ op, local_notify, needs_reply, process,
+ needs_forward);
} else {
- parse_local_options_v2(cib_client, call_type, call_options, host,
- op, local_notify, needs_reply, process, needs_forward);
+ parse_local_options_v2(cib_client, operation, call_options, host,
+ op, local_notify, needs_reply, process,
+ needs_forward);
}
}
static gboolean
-parse_peer_options_v1(int call_type, xmlNode * request,
- gboolean * local_notify, gboolean * needs_reply, gboolean * process,
- gboolean * needs_forward)
+parse_peer_options_v1(const cib__operation_t *operation, xmlNode *request,
+ gboolean *local_notify, gboolean *needs_reply,
+ gboolean *process)
{
const char *op = NULL;
const char *host = NULL;
@@ -620,7 +676,8 @@ parse_peer_options_v1(int call_type, xmlNode * request,
}
op = crm_element_value(request, F_CIB_OPERATION);
- crm_trace("Processing %s request sent by %s", op, originator);
+ crm_trace("Processing legacy %s request sent by %s", op, originator);
+
if (pcmk__str_eq(op, PCMK__CIB_REQUEST_SHUTDOWN, pcmk__str_none)) {
/* Always process these */
*local_notify = FALSE;
@@ -693,9 +750,9 @@ parse_peer_options_v1(int call_type, xmlNode * request,
}
static gboolean
-parse_peer_options_v2(int call_type, xmlNode * request,
- gboolean * local_notify, gboolean * needs_reply, gboolean * process,
- gboolean * needs_forward)
+parse_peer_options_v2(const cib__operation_t *operation, xmlNode *request,
+ gboolean *local_notify, gboolean *needs_reply,
+ gboolean *process)
{
const char *host = NULL;
const char *delegated = crm_element_value(request, F_CIB_DELEGATED);
@@ -705,6 +762,10 @@ parse_peer_options_v2(int call_type, xmlNode * request,
gboolean is_reply = pcmk__str_eq(reply_to, OUR_NODENAME, pcmk__str_casei);
+ if (originator == NULL) { // Shouldn't be possible
+ originator = "peer";
+ }
+
if (pcmk__str_eq(op, PCMK__CIB_REQUEST_REPLACE, pcmk__str_none)) {
/* sync_our_cib() sets F_CIB_ISREPLY */
if (reply_to) {
@@ -734,10 +795,10 @@ parse_peer_options_v2(int call_type, xmlNode * request,
const char *max = crm_element_value(request, F_CIB_SCHEMA_MAX);
const char *upgrade_rc = crm_element_value(request, F_CIB_UPGRADE_RC);
- crm_trace("Parsing %s operation%s for %s with max=%s and upgrade_rc=%s",
- op, (is_reply? " reply" : ""),
+ crm_trace("Parsing upgrade %s for %s with max=%s and upgrade_rc=%s",
+ (is_reply? "reply" : "request"),
(based_is_primary? "primary" : "secondary"),
- (max? max : "none"), (upgrade_rc? upgrade_rc : "none"));
+ pcmk__s(max, "none"), pcmk__s(upgrade_rc, "none"));
if (upgrade_rc != NULL) {
// Our upgrade request was rejected by DC, notify clients of result
@@ -752,7 +813,7 @@ parse_peer_options_v2(int call_type, xmlNode * request,
goto skip_is_reply;
} else {
- // Ignore broadcast client requests when we're not DC
+ // Ignore broadcast client requests when we're not primary
return FALSE;
}
@@ -762,22 +823,25 @@ parse_peer_options_v2(int call_type, xmlNode * request,
legacy_mode = TRUE;
return FALSE;
- } else if (is_reply && cib_op_modifies(call_type)) {
+ } else if (is_reply
+ && pcmk_is_set(operation->flags, cib__op_attr_modifies)) {
crm_trace("Ignoring legacy %s reply sent from %s to local clients", op, originator);
return FALSE;
} else if (pcmk__str_eq(op, PCMK__CIB_REQUEST_SHUTDOWN, pcmk__str_none)) {
- /* Legacy handling */
- crm_debug("Legacy handling of %s message from %s", op, originator);
*local_notify = FALSE;
if (reply_to == NULL) {
*process = TRUE;
+ } else { // Not possible?
+ crm_debug("Ignoring shutdown request from %s because reply_to=%s",
+ originator, reply_to);
}
return *process;
}
- if(is_reply) {
- crm_trace("Handling %s reply sent from %s to local clients", op, originator);
+ if (is_reply) {
+ crm_trace("Will notify local clients for %s reply from %s",
+ op, originator);
*process = FALSE;
*needs_reply = FALSE;
*local_notify = TRUE;
@@ -797,62 +861,78 @@ parse_peer_options_v2(int call_type, xmlNode * request,
return TRUE;
} else if (host != NULL) {
- /* this is for a specific instance and we're not it */
- crm_trace("Ignoring %s operation for instance on %s", op, host);
+ crm_trace("Ignoring %s request intended for CIB manager on %s",
+ op, host);
return FALSE;
} else if(is_reply == FALSE && pcmk__str_eq(op, CRM_OP_PING, pcmk__str_casei)) {
*needs_reply = TRUE;
}
- crm_trace("Processing %s request sent to everyone by %s/%s on %s %s", op,
- crm_element_value(request, F_CIB_CLIENTNAME),
- crm_element_value(request, F_CIB_CALLID),
- originator, (*local_notify)?"(notify)":"");
+ crm_trace("Processing %s request broadcast by %s call %s on %s "
+ "(local clients will%s be notified)", op,
+ pcmk__s(crm_element_value(request, F_CIB_CLIENTNAME), "client"),
+ pcmk__s(crm_element_value(request, F_CIB_CALLID), "without ID"),
+ originator, (*local_notify? "" : "not"));
return TRUE;
}
static gboolean
-parse_peer_options(int call_type, xmlNode * request,
- gboolean * local_notify, gboolean * needs_reply, gboolean * process,
- gboolean * needs_forward)
+parse_peer_options(const cib__operation_t *operation, xmlNode *request,
+ gboolean *local_notify, gboolean *needs_reply,
+ gboolean *process)
{
/* TODO: What happens when an update comes in after node A
* requests the CIB from node B, but before it gets the reply (and
* sends out the replace operation)
*/
if(cib_legacy_mode()) {
- return parse_peer_options_v1(
- call_type, request, local_notify, needs_reply, process, needs_forward);
+ return parse_peer_options_v1(operation, request, local_notify,
+ needs_reply, process);
} else {
- return parse_peer_options_v2(
- call_type, request, local_notify, needs_reply, process, needs_forward);
+ return parse_peer_options_v2(operation, request, local_notify,
+ needs_reply, process);
}
}
+/*!
+ * \internal
+ * \brief Forward a CIB request to the appropriate target host(s)
+ *
+ * \param[in] request CIB request to forward
+ */
static void
-forward_request(xmlNode *request, int call_options)
+forward_request(xmlNode *request)
{
const char *op = crm_element_value(request, F_CIB_OPERATION);
+ const char *section = crm_element_value(request, F_CIB_SECTION);
const char *host = crm_element_value(request, F_CIB_HOST);
+ const char *originator = crm_element_value(request, F_ORIG);
+ const char *client_name = crm_element_value(request, F_CIB_CLIENTNAME);
+ const char *call_id = crm_element_value(request, F_CIB_CALLID);
- crm_xml_add(request, F_CIB_DELEGATED, OUR_NODENAME);
-
- if (host != NULL) {
- crm_trace("Forwarding %s op to %s", op, host);
- send_cluster_message(crm_get_peer(0, host), crm_msg_cib, request, FALSE);
+ int log_level = LOG_INFO;
- } else {
- crm_trace("Forwarding %s op to primary instance", op);
- send_cluster_message(NULL, crm_msg_cib, request, FALSE);
+ if (pcmk__str_eq(op, PCMK__CIB_REQUEST_NOOP, pcmk__str_none)) {
+ log_level = LOG_DEBUG;
}
- /* Return the request to its original state */
- xml_remove_prop(request, F_CIB_DELEGATED);
+ do_crm_log(log_level,
+ "Forwarding %s operation for section %s to %s (origin=%s/%s/%s)",
+ pcmk__s(op, "invalid"),
+ pcmk__s(section, "all"),
+ pcmk__s(host, (cib_legacy_mode()? "primary" : "all")),
+ pcmk__s(originator, "local"),
+ pcmk__s(client_name, "unspecified"),
+ pcmk__s(call_id, "unspecified"));
- if (call_options & cib_discard_reply) {
- crm_trace("Client not interested in reply");
- }
+ crm_xml_add(request, F_CIB_DELEGATED, OUR_NODENAME);
+
+ send_cluster_message(((host != NULL)? crm_get_peer(0, host) : NULL),
+ crm_msg_cib, request, FALSE);
+
+ // Return the request to its original state
+ xml_remove_prop(request, F_CIB_DELEGATED);
}
static gboolean
@@ -861,9 +941,10 @@ send_peer_reply(xmlNode * msg, xmlNode * result_diff, const char *originator, gb
CRM_ASSERT(msg != NULL);
if (broadcast) {
- /* this (successful) call modified the CIB _and_ the
- * change needs to be broadcast...
- * send via HA to other nodes
+ /* @COMPAT: Legacy code
+ *
+ * This successful call modified the CIB, and the change needs to be
+ * broadcast (sent via cluster to all nodes).
*/
int diff_add_updates = 0;
int diff_add_epoch = 0;
@@ -878,7 +959,7 @@ send_peer_reply(xmlNode * msg, xmlNode * result_diff, const char *originator, gb
CRM_LOG_ASSERT(result_diff != NULL);
digest = crm_element_value(result_diff, XML_ATTR_DIGEST);
- crm_element_value_int(result_diff, "format", &format);
+ crm_element_value_int(result_diff, PCMK_XA_FORMAT, &format);
cib_diff_version_details(result_diff,
&diff_add_admin_epoch, &diff_add_epoch, &diff_add_updates,
@@ -919,12 +1000,14 @@ send_peer_reply(xmlNode * msg, xmlNode * result_diff, const char *originator, gb
* \param[in] privileged Whether privileged commands may be run
* (see cib_server_ops[] definition)
* \param[in] cib_client IPC client that sent request (or NULL if CPG)
+ *
+ * \return Legacy Pacemaker return code
*/
-static void
+int
cib_process_request(xmlNode *request, gboolean privileged,
const pcmk__client_t *cib_client)
{
- int call_type = 0;
+ // @TODO: Break into multiple smaller functions
int call_options = 0;
gboolean process = TRUE; // Whether to process request locally now
@@ -946,12 +1029,16 @@ cib_process_request(xmlNode *request, gboolean privileged,
const char *client_name = crm_element_value(request, F_CIB_CLIENTNAME);
const char *reply_to = crm_element_value(request, F_CIB_ISREPLY);
+ const cib__operation_t *operation = NULL;
+ cib__op_fn_t op_function = NULL;
+
crm_element_value_int(request, F_CIB_CALLOPTS, &call_options);
if ((host != NULL) && (*host == '\0')) {
host = NULL;
}
+ // @TODO: Improve trace messages. Target is accurate only for legacy mode.
if (host) {
target = host;
@@ -970,72 +1057,68 @@ cib_process_request(xmlNode *request, gboolean privileged,
crm_trace("Processing local %s operation from %s/%s intended for %s", op, client_name, call_id, target);
}
- rc = cib_get_operation_id(op, &call_type);
+ rc = cib__get_operation(op, &operation);
+ rc = pcmk_rc2legacy(rc);
if (rc != pcmk_ok) {
/* TODO: construct error reply? */
crm_err("Pre-processing of command failed: %s", pcmk_strerror(rc));
- return;
+ return rc;
+ }
+
+ op_function = based_get_op_function(operation);
+ if (op_function == NULL) {
+ crm_err("Operation %s not supported by CIB manager", op);
+ return -EOPNOTSUPP;
}
if (cib_client != NULL) {
- parse_local_options(cib_client, call_type, call_options, host, op,
- &local_notify, &needs_reply, &process, &needs_forward);
+ parse_local_options(cib_client, operation, call_options, host, op,
+ &local_notify, &needs_reply, &process,
+ &needs_forward);
- } else if (parse_peer_options(call_type, request, &local_notify,
- &needs_reply, &process, &needs_forward) == FALSE) {
- return;
+ } else if (!parse_peer_options(operation, request, &local_notify,
+ &needs_reply, &process)) {
+ return rc;
+ }
+
+ if (pcmk_is_set(call_options, cib_transaction)) {
+ /* All requests in a transaction are processed locally against a working
+ * CIB copy, and we don't notify for individual requests because the
+ * entire transaction is atomic.
+ *
+ * We still call the option parser functions above, for the sake of log
+ * messages and checking whether we're the target for peer requests.
+ */
+ process = TRUE;
+ needs_reply = FALSE;
+ local_notify = FALSE;
+ needs_forward = FALSE;
}
- is_update = cib_op_modifies(call_type);
+ is_update = pcmk_is_set(operation->flags, cib__op_attr_modifies);
- if (call_options & cib_discard_reply) {
+ if (pcmk_is_set(call_options, cib_discard_reply)) {
/* If the request will modify the CIB, and we are in legacy mode, we
* need to build a reply so we can broadcast a diff, even if the
* requester doesn't want one.
*/
needs_reply = is_update && cib_legacy_mode();
local_notify = FALSE;
+ crm_trace("Client is not interested in the reply");
}
if (needs_forward) {
- const char *section = crm_element_value(request, F_CIB_SECTION);
- int log_level = LOG_INFO;
-
- if (pcmk__str_eq(op, PCMK__CIB_REQUEST_NOOP, pcmk__str_none)) {
- log_level = LOG_DEBUG;
- }
-
- do_crm_log(log_level,
- "Forwarding %s operation for section %s to %s (origin=%s/%s/%s)",
- op,
- section ? section : "'all'",
- pcmk__s(host, (cib_legacy_mode() ? "primary" : "all")),
- originator ? originator : "local",
- client_name, call_id);
-
- forward_request(request, call_options);
- return;
+ forward_request(request);
+ return rc;
}
if (cib_status != pcmk_ok) {
- const char *call = crm_element_value(request, F_CIB_CALLID);
-
rc = cib_status;
crm_err("Operation ignored, cluster configuration is invalid."
" Please repair and restart: %s", pcmk_strerror(cib_status));
- op_reply = create_xml_node(NULL, "cib-reply");
- crm_xml_add(op_reply, F_TYPE, T_CIB);
- crm_xml_add(op_reply, F_CIB_OPERATION, op);
- crm_xml_add(op_reply, F_CIB_CALLID, call);
- crm_xml_add(op_reply, F_CIB_CLIENTID, client_id);
- crm_xml_add_int(op_reply, F_CIB_CALLOPTS, call_options);
- crm_xml_add_int(op_reply, F_CIB_RC, rc);
-
- crm_trace("Attaching reply output");
- add_message_xml(op_reply, F_CIB_CALLDATA, the_cib);
-
- crm_log_xml_explicit(op_reply, "cib:reply");
+ op_reply = create_cib_reply(op, call_id, client_id, call_options, rc,
+ the_cib);
} else if (process) {
time_t finished = 0;
@@ -1043,7 +1126,8 @@ cib_process_request(xmlNode *request, gboolean privileged,
int level = LOG_INFO;
const char *section = crm_element_value(request, F_CIB_SECTION);
- rc = cib_process_command(request, &op_reply, &result_diff, privileged);
+ rc = cib_process_command(request, operation, op_function, &op_reply,
+ &result_diff, privileged);
if (!is_update) {
level = LOG_TRACE;
@@ -1120,10 +1204,9 @@ cib_process_request(xmlNode *request, gboolean privileged,
op_reply = NULL; /* the reply is queued, so don't free here */
}
- } else if (call_options & cib_discard_reply) {
- crm_trace("Caller isn't interested in reply");
+ } else if ((cib_client == NULL)
+ && !pcmk_is_set(call_options, cib_discard_reply)) {
- } else if (cib_client == NULL) {
if (is_update == FALSE || result_diff == NULL) {
crm_trace("Request not broadcast: R/O call");
@@ -1158,24 +1241,51 @@ cib_process_request(xmlNode *request, gboolean privileged,
free_xml(op_reply);
free_xml(result_diff);
- return;
+ return rc;
}
-static char *
-calculate_section_digest(const char *xpath, xmlNode * xml_obj)
+/*!
+ * \internal
+ * \brief Get a CIB operation's input from the request XML
+ *
+ * \param[in] request CIB request XML
+ * \param[in] type CIB operation type
+ * \param[out] section Where to store CIB section name
+ *
+ * \return Input XML for CIB operation
+ *
+ * \note If not \c NULL, the return value is a non-const pointer to part of
+ * \p request. The caller should not free it directly.
+ */
+static xmlNode *
+prepare_input(const xmlNode *request, enum cib__op_type type,
+ const char **section)
{
- xmlNode *xml_section = NULL;
+ xmlNode *input = NULL;
+
+ *section = NULL;
+
+ switch (type) {
+ case cib__op_apply_patch:
+ if (pcmk__xe_attr_is_true(request, F_CIB_GLOBAL_UPDATE)) {
+ input = get_message_xml(request, F_CIB_UPDATE_DIFF);
+ } else {
+ input = get_message_xml(request, F_CIB_CALLDATA);
+ }
+ break;
- if (xml_obj == NULL) {
- return NULL;
+ default:
+ input = get_message_xml(request, F_CIB_CALLDATA);
+ *section = crm_element_value(request, F_CIB_SECTION);
+ break;
}
- xml_section = get_xpath_object(xpath, xml_obj, LOG_TRACE);
- if (xml_section == NULL) {
- return NULL;
+ // Grab the specified section
+ if ((*section != NULL) && pcmk__xe_is(input, XML_TAG_CIB)) {
+ input = pcmk_find_cib_element(input, *section);
}
- return calculate_xml_versioned_digest(xml_section, FALSE, TRUE, CRM_FEATURE_SET);
+ return input;
}
// v1 and v2 patch formats
@@ -1201,14 +1311,14 @@ contains_config_change(xmlNode *diff)
}
static int
-cib_process_command(xmlNode * request, xmlNode ** reply, xmlNode ** cib_diff, gboolean privileged)
+cib_process_command(xmlNode *request, const cib__operation_t *operation,
+ cib__op_fn_t op_function, xmlNode **reply,
+ xmlNode **cib_diff, bool privileged)
{
xmlNode *input = NULL;
xmlNode *output = NULL;
xmlNode *result_cib = NULL;
- xmlNode *current_cib = NULL;
- int call_type = 0;
int call_options = 0;
const char *op = NULL;
@@ -1216,24 +1326,15 @@ cib_process_command(xmlNode * request, xmlNode ** reply, xmlNode ** cib_diff, gb
const char *call_id = crm_element_value(request, F_CIB_CALLID);
const char *client_id = crm_element_value(request, F_CIB_CLIENTID);
const char *client_name = crm_element_value(request, F_CIB_CLIENTNAME);
- const char *origin = crm_element_value(request, F_ORIG);
+ const char *originator = crm_element_value(request, F_ORIG);
int rc = pcmk_ok;
- int rc2 = pcmk_ok;
- gboolean send_r_notify = FALSE;
- gboolean config_changed = FALSE;
- gboolean manage_counters = TRUE;
+ bool config_changed = false;
+ bool manage_counters = true;
static mainloop_timer_t *digest_timer = NULL;
- char *current_nodes_digest = NULL;
- char *current_alerts_digest = NULL;
- char *current_status_digest = NULL;
- uint32_t change_section = cib_change_section_nodes
- |cib_change_section_alerts
- |cib_change_section_status;
-
CRM_ASSERT(cib_status == pcmk_ok);
if(digest_timer == NULL) {
@@ -1242,91 +1343,64 @@ cib_process_command(xmlNode * request, xmlNode ** reply, xmlNode ** cib_diff, gb
*reply = NULL;
*cib_diff = NULL;
- current_cib = the_cib;
/* Start processing the request... */
op = crm_element_value(request, F_CIB_OPERATION);
crm_element_value_int(request, F_CIB_CALLOPTS, &call_options);
- rc = cib_get_operation_id(op, &call_type);
- if (rc == pcmk_ok && privileged == FALSE) {
- rc = cib_op_can_run(call_type, call_options, privileged);
+ if (!privileged && pcmk_is_set(operation->flags, cib__op_attr_privileged)) {
+ rc = -EACCES;
+ crm_trace("Failed due to lack of privileges: %s", pcmk_strerror(rc));
+ goto done;
}
- rc2 = cib_op_prepare(call_type, request, &input, &section);
- if (rc == pcmk_ok) {
- rc = rc2;
- }
+ input = prepare_input(request, operation->type, &section);
- if (rc != pcmk_ok) {
- crm_trace("Call setup failed: %s", pcmk_strerror(rc));
- goto done;
-
- } else if (cib_op_modifies(call_type) == FALSE) {
- rc = cib_perform_op(op, call_options, cib_op_func(call_type), TRUE,
- section, request, input, FALSE, &config_changed,
- current_cib, &result_cib, NULL, &output);
+ if (!pcmk_is_set(operation->flags, cib__op_attr_modifies)) {
+ rc = cib_perform_op(op, call_options, op_function, true, section,
+ request, input, false, &config_changed, &the_cib,
+ &result_cib, NULL, &output);
CRM_CHECK(result_cib == NULL, free_xml(result_cib));
goto done;
}
- /* Handle a valid write action */
+ /* @COMPAT: Handle a valid write action (legacy)
+ *
+ * @TODO: Re-evaluate whether this is all truly legacy. The cib_force_diff
+ * portion is. However, F_CIB_GLOBAL_UPDATE may be set by a sync operation
+ * even in non-legacy mode, and manage_counters tells xml_create_patchset()
+ * whether to update version/epoch info.
+ */
if (pcmk__xe_attr_is_true(request, F_CIB_GLOBAL_UPDATE)) {
- /* legacy code */
- manage_counters = FALSE;
+ manage_counters = false;
cib__set_call_options(call_options, "call", cib_force_diff);
crm_trace("Global update detected");
- CRM_CHECK(call_type == 3 || call_type == 4, crm_err("Call type: %d", call_type);
- crm_log_xml_err(request, "bad op"));
+ CRM_LOG_ASSERT(pcmk__str_any_of(op,
+ PCMK__CIB_REQUEST_APPLY_PATCH,
+ PCMK__CIB_REQUEST_REPLACE,
+ NULL));
}
ping_modified_since = TRUE;
if (pcmk_is_set(call_options, cib_inhibit_bcast)) {
crm_trace("Skipping update: inhibit broadcast");
- manage_counters = FALSE;
- }
-
- if (!pcmk_is_set(call_options, cib_dryrun)
- && pcmk__str_eq(section, XML_CIB_TAG_STATUS, pcmk__str_casei)) {
- // Copying large CIBs accounts for a huge percentage of our CIB usage
- cib__set_call_options(call_options, "call", cib_zero_copy);
- } else {
- cib__clear_call_options(call_options, "call", cib_zero_copy);
- }
-
-#define XPATH_CONFIG "//" XML_TAG_CIB "/" XML_CIB_TAG_CONFIGURATION
-#define XPATH_NODES XPATH_CONFIG "/" XML_CIB_TAG_NODES
-#define XPATH_ALERTS XPATH_CONFIG "/" XML_CIB_TAG_ALERTS
-#define XPATH_STATUS "//" XML_TAG_CIB "/" XML_CIB_TAG_STATUS
-
- // Calculate the hash value of the section before the change
- if (pcmk__str_eq(PCMK__CIB_REQUEST_REPLACE, op, pcmk__str_none)) {
- current_nodes_digest = calculate_section_digest(XPATH_NODES,
- current_cib);
- current_alerts_digest = calculate_section_digest(XPATH_ALERTS,
- current_cib);
- current_status_digest = calculate_section_digest(XPATH_STATUS,
- current_cib);
- crm_trace("current-digest %s:%s:%s", current_nodes_digest,
- current_alerts_digest, current_status_digest);
+ manage_counters = false;
}
// result_cib must not be modified after cib_perform_op() returns
- rc = cib_perform_op(op, call_options, cib_op_func(call_type), FALSE,
- section, request, input, manage_counters,
- &config_changed, current_cib, &result_cib, cib_diff,
- &output);
+ rc = cib_perform_op(op, call_options, op_function, false, section,
+ request, input, manage_counters, &config_changed,
+ &the_cib, &result_cib, cib_diff, &output);
+ // @COMPAT: Legacy code
if (!manage_counters) {
int format = 1;
- /* Legacy code
- * If the diff is NULL at this point, it's because nothing changed
- */
+ // If the diff is NULL at this point, it's because nothing changed
if (*cib_diff != NULL) {
- crm_element_value_int(*cib_diff, "format", &format);
+ crm_element_value_int(*cib_diff, PCMK_XA_FORMAT, &format);
}
if (format == 1) {
@@ -1334,92 +1408,60 @@ cib_process_command(xmlNode * request, xmlNode ** reply, xmlNode ** cib_diff, gb
}
}
- /* Always write to disk for successful replace and upgrade ops. This also
+ /* Always write to disk for successful ops with the flag set. This also
* negates the need to detect ordering changes.
*/
if ((rc == pcmk_ok)
- && pcmk__str_any_of(op,
- PCMK__CIB_REQUEST_REPLACE,
- PCMK__CIB_REQUEST_UPGRADE,
- NULL)) {
- config_changed = TRUE;
- }
-
- if (rc == pcmk_ok && !pcmk_is_set(call_options, cib_dryrun)) {
- crm_trace("Activating %s->%s%s%s",
- crm_element_value(current_cib, XML_ATTR_NUMUPDATES),
- crm_element_value(result_cib, XML_ATTR_NUMUPDATES),
- (pcmk_is_set(call_options, cib_zero_copy)? " zero-copy" : ""),
- (config_changed? " changed" : ""));
- if (!pcmk_is_set(call_options, cib_zero_copy)) {
- rc = activateCibXml(result_cib, config_changed, op);
- crm_trace("Activated %s (%d)",
- crm_element_value(current_cib, XML_ATTR_NUMUPDATES), rc);
- }
+ && pcmk_is_set(operation->flags, cib__op_attr_writes_through)) {
- if ((rc == pcmk_ok) && contains_config_change(*cib_diff)) {
- cib_read_config(config_hash, result_cib);
- }
+ config_changed = true;
+ }
- if (pcmk__str_eq(PCMK__CIB_REQUEST_REPLACE, op, pcmk__str_none)) {
- char *result_nodes_digest = NULL;
- char *result_alerts_digest = NULL;
- char *result_status_digest = NULL;
-
- /* Calculate the hash value of the changed section. */
- result_nodes_digest = calculate_section_digest(XPATH_NODES,
- result_cib);
- result_alerts_digest = calculate_section_digest(XPATH_ALERTS,
- result_cib);
- result_status_digest = calculate_section_digest(XPATH_STATUS,
- result_cib);
- crm_trace("result-digest %s:%s:%s", result_nodes_digest,
- result_alerts_digest, result_status_digest);
-
- if (pcmk__str_eq(current_nodes_digest, result_nodes_digest,
- pcmk__str_none)) {
- change_section =
- pcmk__clear_flags_as(__func__, __LINE__, LOG_TRACE,
- "CIB change section",
- "change_section", change_section,
- cib_change_section_nodes, "nodes");
- }
+ if ((rc == pcmk_ok)
+ && !pcmk_any_flags_set(call_options, cib_dryrun|cib_transaction)) {
- if (pcmk__str_eq(current_alerts_digest, result_alerts_digest,
- pcmk__str_none)) {
- change_section =
- pcmk__clear_flags_as(__func__, __LINE__, LOG_TRACE,
- "CIB change section",
- "change_section", change_section,
- cib_change_section_alerts, "alerts");
+ if (result_cib != the_cib) {
+ if (pcmk_is_set(operation->flags, cib__op_attr_writes_through)) {
+ config_changed = true;
}
- if (pcmk__str_eq(current_status_digest, result_status_digest,
- pcmk__str_none)) {
- change_section =
- pcmk__clear_flags_as(__func__, __LINE__, LOG_TRACE,
- "CIB change section",
- "change_section", change_section,
- cib_change_section_status, "status");
- }
+ crm_trace("Activating %s->%s%s",
+ crm_element_value(the_cib, XML_ATTR_NUMUPDATES),
+ crm_element_value(result_cib, XML_ATTR_NUMUPDATES),
+ (config_changed? " changed" : ""));
- if (change_section != cib_change_section_none) {
- send_r_notify = TRUE;
+ rc = activateCibXml(result_cib, config_changed, op);
+ if (rc != pcmk_ok) {
+ crm_err("Failed to activate new CIB: %s", pcmk_strerror(rc));
}
-
- free(result_nodes_digest);
- free(result_alerts_digest);
- free(result_status_digest);
+ }
+
+ if ((rc == pcmk_ok) && contains_config_change(*cib_diff)) {
+ cib_read_config(config_hash, result_cib);
+ }
- } else if (pcmk__str_eq(PCMK__CIB_REQUEST_ERASE, op, pcmk__str_none)) {
- send_r_notify = TRUE;
+ /* @COMPAT Nodes older than feature set 3.19.0 don't support
+ * transactions. In a mixed-version cluster with nodes <3.19.0, we must
+ * sync the updated CIB, so that the older nodes receive the changes.
+ * Any node that has already applied the transaction will ignore the
+ * synced CIB.
+ *
+ * To ensure the updated CIB is synced from only one node, we sync it
+ * from the originator.
+ */
+ if ((operation->type == cib__op_commit_transact)
+ && pcmk__str_eq(originator, OUR_NODENAME, pcmk__str_casei)
+ && compare_version(crm_element_value(the_cib, XML_ATTR_CRM_VERSION),
+ "3.19.0") < 0) {
+
+ sync_our_cib(request, TRUE);
}
mainloop_timer_stop(digest_timer);
mainloop_timer_start(digest_timer);
} else if (rc == -pcmk_err_schema_validation) {
- CRM_ASSERT(!pcmk_is_set(call_options, cib_zero_copy));
+ CRM_ASSERT(result_cib != the_cib);
if (output != NULL) {
crm_log_xml_info(output, "cib:output");
@@ -1432,61 +1474,31 @@ cib_process_command(xmlNode * request, xmlNode ** reply, xmlNode ** cib_diff, gb
crm_trace("Not activating %d %d %s", rc,
pcmk_is_set(call_options, cib_dryrun),
crm_element_value(result_cib, XML_ATTR_NUMUPDATES));
- if (!pcmk_is_set(call_options, cib_zero_copy)) {
+
+ if (result_cib != the_cib) {
free_xml(result_cib);
}
}
- if ((call_options & (cib_inhibit_notify|cib_dryrun)) == 0) {
+ if (!pcmk_any_flags_set(call_options,
+ cib_dryrun|cib_inhibit_notify|cib_transaction)) {
crm_trace("Sending notifications %d",
pcmk_is_set(call_options, cib_dryrun));
- cib_diff_notify(op, rc, call_id, client_id, client_name, origin, input,
- *cib_diff);
+ cib_diff_notify(op, rc, call_id, client_id, client_name, originator,
+ input, *cib_diff);
}
- if (send_r_notify) {
- cib_replace_notify(op, rc, call_id, client_id, client_name, origin,
- the_cib, *cib_diff, change_section);
- }
-
- pcmk__output_set_log_level(logger_out, LOG_TRACE);
- logger_out->message(logger_out, "xml-patchset", *cib_diff);
+ pcmk__log_xml_patchset(LOG_TRACE, *cib_diff);
done:
if (!pcmk_is_set(call_options, cib_discard_reply) || cib_legacy_mode()) {
- const char *caller = crm_element_value(request, F_CIB_CLIENTID);
-
- *reply = create_xml_node(NULL, "cib-reply");
- crm_xml_add(*reply, F_TYPE, T_CIB);
- crm_xml_add(*reply, F_CIB_OPERATION, op);
- crm_xml_add(*reply, F_CIB_CALLID, call_id);
- crm_xml_add(*reply, F_CIB_CLIENTID, caller);
- crm_xml_add_int(*reply, F_CIB_CALLOPTS, call_options);
- crm_xml_add_int(*reply, F_CIB_RC, rc);
-
- if (output != NULL) {
- crm_trace("Attaching reply output");
- add_message_xml(*reply, F_CIB_CALLDATA, output);
- }
-
- crm_log_xml_explicit(*reply, "cib:reply");
+ *reply = create_cib_reply(op, call_id, client_id, call_options, rc,
+ output);
}
- crm_trace("cleanup");
-
- if (cib_op_modifies(call_type) == FALSE && output != current_cib) {
+ if (output != the_cib) {
free_xml(output);
- output = NULL;
- }
-
- if (call_type >= 0) {
- cib_op_cleanup(call_type, call_options, &input, &output);
}
-
- free(current_nodes_digest);
- free(current_alerts_digest);
- free(current_status_digest);
-
crm_trace("done");
return rc;
}
@@ -1554,12 +1566,12 @@ initiate_exit(void)
xmlNode *leaving = NULL;
active = crm_active_peers();
- if (active < 2) {
+ if (active < 2) { // This is the last active node
terminate_cib(__func__, 0);
return;
}
- crm_info("Sending disconnect notification to %d peers...", active);
+ crm_info("Sending shutdown request to %d peers", active);
leaving = create_xml_node(NULL, "exit-notification");
crm_xml_add(leaving, F_TYPE, "cib");
@@ -1664,12 +1676,6 @@ terminate_cib(const char *caller, int fast)
uninitializeCib();
- if (logger_out != NULL) {
- logger_out->finish(logger_out, CRM_EX_OK, true, NULL);
- pcmk__output_free(logger_out);
- logger_out = NULL;
- }
-
if (fast > 0) {
/* Quit fast on error */
pcmk__stop_based_ipc(ipcs_ro, ipcs_rw, ipcs_shm);
diff --git a/daemons/based/based_common.c b/daemons/based/based_common.c
deleted file mode 100644
index 7e68cf0..0000000
--- a/daemons/based/based_common.c
+++ /dev/null
@@ -1,352 +0,0 @@
-/*
- * Copyright 2008-2023 the Pacemaker project contributors
- *
- * The version control history for this file may have further details.
- *
- * This source code is licensed under the GNU General Public License version 2
- * or later (GPLv2+) WITHOUT ANY WARRANTY.
- */
-
-#include <crm_internal.h>
-
-#include <sys/param.h>
-#include <stdio.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include <stdlib.h>
-#include <errno.h>
-#include <fcntl.h>
-
-#include <crm/crm.h>
-#include <crm/cib.h>
-#include <crm/msg_xml.h>
-#include <crm/common/ipc.h>
-#include <crm/cluster.h>
-
-#include <crm/common/xml.h>
-
-#include <pacemaker-based.h>
-
-gboolean stand_alone = FALSE;
-
-extern int cib_perform_command(xmlNode * request, xmlNode ** reply, xmlNode ** cib_diff,
- gboolean privileged);
-
-static xmlNode *
-cib_prepare_common(xmlNode * root, const char *section)
-{
- xmlNode *data = NULL;
-
- /* extract the CIB from the fragment */
- if (root == NULL) {
- return NULL;
-
- } else if (pcmk__strcase_any_of(crm_element_name(root), XML_TAG_FRAGMENT,
- F_CRM_DATA, F_CIB_CALLDATA, NULL)) {
- data = first_named_child(root, XML_TAG_CIB);
-
- } else {
- data = root;
- }
-
- /* grab the section specified for the command */
- if (section != NULL && data != NULL && pcmk__str_eq(crm_element_name(data), XML_TAG_CIB, pcmk__str_none)) {
- data = pcmk_find_cib_element(data, section);
- }
-
- /* crm_log_xml_trace(root, "cib:input"); */
- return data;
-}
-
-static int
-cib_prepare_none(xmlNode * request, xmlNode ** data, const char **section)
-{
- *data = NULL;
- *section = crm_element_value(request, F_CIB_SECTION);
- return pcmk_ok;
-}
-
-static int
-cib_prepare_data(xmlNode * request, xmlNode ** data, const char **section)
-{
- xmlNode *input_fragment = get_message_xml(request, F_CIB_CALLDATA);
-
- *section = crm_element_value(request, F_CIB_SECTION);
- *data = cib_prepare_common(input_fragment, *section);
- /* crm_log_xml_debug(*data, "data"); */
- return pcmk_ok;
-}
-
-static int
-cib_prepare_sync(xmlNode * request, xmlNode ** data, const char **section)
-{
- *data = NULL;
- *section = crm_element_value(request, F_CIB_SECTION);
- return pcmk_ok;
-}
-
-static int
-cib_prepare_diff(xmlNode * request, xmlNode ** data, const char **section)
-{
- xmlNode *input_fragment = NULL;
-
- *data = NULL;
- *section = NULL;
-
- if (pcmk__xe_attr_is_true(request, F_CIB_GLOBAL_UPDATE)) {
- input_fragment = get_message_xml(request, F_CIB_UPDATE_DIFF);
- } else {
- input_fragment = get_message_xml(request, F_CIB_CALLDATA);
- }
-
- CRM_CHECK(input_fragment != NULL, crm_log_xml_warn(request, "no input"));
- *data = cib_prepare_common(input_fragment, NULL);
- return pcmk_ok;
-}
-
-static int
-cib_cleanup_query(int options, xmlNode ** data, xmlNode ** output)
-{
- CRM_LOG_ASSERT(*data == NULL);
- if ((options & cib_no_children)
- || pcmk__str_eq(crm_element_name(*output), "xpath-query", pcmk__str_casei)) {
- free_xml(*output);
- }
- return pcmk_ok;
-}
-
-static int
-cib_cleanup_data(int options, xmlNode ** data, xmlNode ** output)
-{
- free_xml(*output);
- *data = NULL;
- return pcmk_ok;
-}
-
-static int
-cib_cleanup_output(int options, xmlNode ** data, xmlNode ** output)
-{
- free_xml(*output);
- return pcmk_ok;
-}
-
-static int
-cib_cleanup_none(int options, xmlNode ** data, xmlNode ** output)
-{
- CRM_LOG_ASSERT(*data == NULL);
- CRM_LOG_ASSERT(*output == NULL);
- return pcmk_ok;
-}
-
-static cib_operation_t cib_server_ops[] = {
- // Booleans are modifies_cib, needs_privileges
- {
- NULL, FALSE, FALSE,
- cib_prepare_none, cib_cleanup_none, cib_process_default
- },
- {
- PCMK__CIB_REQUEST_QUERY, FALSE, FALSE,
- cib_prepare_none, cib_cleanup_query, cib_process_query
- },
- {
- PCMK__CIB_REQUEST_MODIFY, TRUE, TRUE,
- cib_prepare_data, cib_cleanup_data, cib_process_modify
- },
- {
- PCMK__CIB_REQUEST_APPLY_PATCH, TRUE, TRUE,
- cib_prepare_diff, cib_cleanup_data, cib_server_process_diff
- },
- {
- PCMK__CIB_REQUEST_REPLACE, TRUE, TRUE,
- cib_prepare_data, cib_cleanup_data, cib_process_replace_svr
- },
- {
- PCMK__CIB_REQUEST_CREATE, TRUE, TRUE,
- cib_prepare_data, cib_cleanup_data, cib_process_create
- },
- {
- PCMK__CIB_REQUEST_DELETE, TRUE, TRUE,
- cib_prepare_data, cib_cleanup_data, cib_process_delete
- },
- {
- PCMK__CIB_REQUEST_SYNC_TO_ALL, FALSE, TRUE,
- cib_prepare_sync, cib_cleanup_none, cib_process_sync
- },
- {
- PCMK__CIB_REQUEST_BUMP, TRUE, TRUE,
- cib_prepare_none, cib_cleanup_output, cib_process_bump
- },
- {
- PCMK__CIB_REQUEST_ERASE, TRUE, TRUE,
- cib_prepare_none, cib_cleanup_output, cib_process_erase
- },
- {
- PCMK__CIB_REQUEST_NOOP, FALSE, FALSE,
- cib_prepare_none, cib_cleanup_none, cib_process_default
- },
- {
- PCMK__CIB_REQUEST_ABS_DELETE, TRUE, TRUE,
- cib_prepare_data, cib_cleanup_data, cib_process_delete_absolute
- },
- {
- PCMK__CIB_REQUEST_UPGRADE, TRUE, TRUE,
- cib_prepare_none, cib_cleanup_output, cib_process_upgrade_server
- },
- {
- PCMK__CIB_REQUEST_SECONDARY, FALSE, TRUE,
- cib_prepare_none, cib_cleanup_none, cib_process_readwrite
- },
- {
- PCMK__CIB_REQUEST_ALL_SECONDARY, FALSE, TRUE,
- cib_prepare_none, cib_cleanup_none, cib_process_readwrite
- },
- {
- PCMK__CIB_REQUEST_SYNC_TO_ONE, FALSE, TRUE,
- cib_prepare_sync, cib_cleanup_none, cib_process_sync_one
- },
- {
- PCMK__CIB_REQUEST_PRIMARY, TRUE, TRUE,
- cib_prepare_data, cib_cleanup_data, cib_process_readwrite
- },
- {
- PCMK__CIB_REQUEST_IS_PRIMARY, FALSE, TRUE,
- cib_prepare_none, cib_cleanup_none, cib_process_readwrite
- },
- {
- PCMK__CIB_REQUEST_SHUTDOWN, FALSE, TRUE,
- cib_prepare_sync, cib_cleanup_none, cib_process_shutdown_req
- },
- {
- CRM_OP_PING, FALSE, FALSE,
- cib_prepare_none, cib_cleanup_output, cib_process_ping
- },
-};
-
-int
-cib_get_operation_id(const char *op, int *operation)
-{
- static GHashTable *operation_hash = NULL;
-
- if (operation_hash == NULL) {
- int lpc = 0;
- int max_msg_types = PCMK__NELEM(cib_server_ops);
-
- operation_hash = pcmk__strkey_table(NULL, free);
- for (lpc = 1; lpc < max_msg_types; lpc++) {
- int *value = malloc(sizeof(int));
-
- if(value) {
- *value = lpc;
- g_hash_table_insert(operation_hash, (gpointer) cib_server_ops[lpc].operation, value);
- }
- }
- }
-
- if (op != NULL) {
- int *value = g_hash_table_lookup(operation_hash, op);
-
- if (value) {
- *operation = *value;
- return pcmk_ok;
- }
- }
- crm_err("Operation %s is not valid", op);
- *operation = -1;
- return -EINVAL;
-}
-
-xmlNode *
-cib_msg_copy(xmlNode * msg, gboolean with_data)
-{
- int lpc = 0;
- const char *field = NULL;
- const char *value = NULL;
- xmlNode *value_struct = NULL;
-
- static const char *field_list[] = {
- F_XML_TAGNAME,
- F_TYPE,
- F_CIB_CLIENTID,
- F_CIB_CALLOPTS,
- F_CIB_CALLID,
- F_CIB_OPERATION,
- F_CIB_ISREPLY,
- F_CIB_SECTION,
- F_CIB_HOST,
- F_CIB_RC,
- F_CIB_DELEGATED,
- F_CIB_OBJID,
- F_CIB_OBJTYPE,
- F_CIB_EXISTING,
- F_CIB_SEENCOUNT,
- F_CIB_TIMEOUT,
- F_CIB_GLOBAL_UPDATE,
- F_CIB_CLIENTNAME,
- F_CIB_USER,
- F_CIB_NOTIFY_TYPE,
- F_CIB_NOTIFY_ACTIVATE
- };
-
- static const char *data_list[] = {
- F_CIB_CALLDATA,
- F_CIB_UPDATE,
- F_CIB_UPDATE_RESULT
- };
-
- xmlNode *copy = create_xml_node(NULL, "copy");
-
- CRM_ASSERT(copy != NULL);
-
- for (lpc = 0; lpc < PCMK__NELEM(field_list); lpc++) {
- field = field_list[lpc];
- value = crm_element_value(msg, field);
- if (value != NULL) {
- crm_xml_add(copy, field, value);
- }
- }
- for (lpc = 0; with_data && lpc < PCMK__NELEM(data_list); lpc++) {
- field = data_list[lpc];
- value_struct = get_message_xml(msg, field);
- if (value_struct != NULL) {
- add_message_xml(copy, field, value_struct);
- }
- }
-
- return copy;
-}
-
-cib_op_t *
-cib_op_func(int call_type)
-{
- return &(cib_server_ops[call_type].fn);
-}
-
-gboolean
-cib_op_modifies(int call_type)
-{
- return cib_server_ops[call_type].modifies_cib;
-}
-
-int
-cib_op_can_run(int call_type, int call_options, bool privileged)
-{
- if (!privileged && cib_server_ops[call_type].needs_privileges) {
- return -EACCES;
- }
- return pcmk_ok;
-}
-
-int
-cib_op_prepare(int call_type, xmlNode * request, xmlNode ** input, const char **section)
-{
- crm_trace("Prepare %d", call_type);
- return cib_server_ops[call_type].prepare(request, input, section);
-}
-
-int
-cib_op_cleanup(int call_type, int options, xmlNode ** input, xmlNode ** output)
-{
- crm_trace("Cleanup %d", call_type);
- return cib_server_ops[call_type].cleanup(options, input, output);
-}
diff --git a/daemons/based/based_io.c b/daemons/based/based_io.c
index fc34f39..f252ac1 100644
--- a/daemons/based/based_io.c
+++ b/daemons/based/based_io.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2004-2022 the Pacemaker project contributors
+ * Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -22,6 +22,9 @@
#include <sys/wait.h>
#include <sys/stat.h>
+#include <glib.h>
+#include <libxml/tree.h>
+
#include <crm/crm.h>
#include <crm/cib.h>
@@ -45,12 +48,15 @@ cib_rename(const char *old)
umask(S_IWGRP | S_IWOTH | S_IROTH);
new_fd = mkstemp(new);
- crm_err("Archiving unusable file %s as %s", old, new);
+
if ((new_fd < 0) || (rename(old, new) < 0)) {
- crm_perror(LOG_ERR, "Couldn't rename %s as %s", old, new);
- crm_err("Disabling disk writes and continuing");
+ crm_err("Couldn't archive unusable file %s (disabling disk writes and continuing)",
+ old);
cib_writes_enabled = FALSE;
+ } else {
+ crm_err("Archived unusable file %s as %s", old, new);
}
+
if (new_fd > 0) {
close(new_fd);
}
@@ -107,7 +113,7 @@ static int cib_archive_filter(const struct dirent * a)
if(stat(a_path, &s) != 0) {
rc = errno;
- crm_trace("%s - stat failed: %s (%d)", a->d_name, pcmk_strerror(rc), rc);
+ crm_trace("%s - stat failed: %s (%d)", a->d_name, pcmk_rc_str(rc), rc);
rc = 0;
} else if ((s.st_mode & S_IFREG) != S_IFREG) {
@@ -189,7 +195,7 @@ readCibXmlFile(const char *dir, const char *file, gboolean discard_status)
const char *name = NULL;
const char *value = NULL;
const char *validation = NULL;
- const char *use_valgrind = getenv("PCMK_valgrind_enabled");
+ const char *use_valgrind = pcmk__env_option(PCMK__ENV_VALGRIND_ENABLED);
xmlNode *root = NULL;
xmlNode *status = NULL;
@@ -214,7 +220,7 @@ readCibXmlFile(const char *dir, const char *file, gboolean discard_status)
crm_warn("Primary configuration corrupt or unusable, trying backups in %s", cib_root);
lpc = scandir(cib_root, &namelist, cib_archive_filter, cib_archive_sort);
if (lpc < 0) {
- crm_perror(LOG_NOTICE, "scandir(%s) failed", cib_root);
+ crm_err("scandir(%s) failed: %s", cib_root, pcmk_rc_str(errno));
}
}
@@ -418,7 +424,7 @@ write_cib_contents(gpointer p)
pid = fork();
if (pid < 0) {
- crm_perror(LOG_ERR, "Disabling disk writes after fork failure");
+ crm_err("Disabling disk writes after fork failure: %s", pcmk_rc_str(errno));
cib_writes_enabled = FALSE;
return FALSE;
}
diff --git a/daemons/based/based_messages.c b/daemons/based/based_messages.c
index d46456c..35d639a 100644
--- a/daemons/based/based_messages.c
+++ b/daemons/based/based_messages.c
@@ -19,6 +19,9 @@
#include <sys/param.h>
#include <sys/types.h>
+#include <glib.h>
+#include <libxml/tree.h>
+
#include <crm/crm.h>
#include <crm/cib/internal.h>
#include <crm/msg_xml.h>
@@ -61,25 +64,15 @@ cib_process_shutdown_req(const char *op, int options, const char *section, xmlNo
return pcmk_ok;
}
+// @COMPAT: Remove when PCMK__CIB_REQUEST_NOOP is removed
int
-cib_process_default(const char *op, int options, const char *section, xmlNode * req,
- xmlNode * input, xmlNode * existing_cib, xmlNode ** result_cib,
- xmlNode ** answer)
+cib_process_noop(const char *op, int options, const char *section, xmlNode *req,
+ xmlNode *input, xmlNode *existing_cib, xmlNode **result_cib,
+ xmlNode **answer)
{
- int result = pcmk_ok;
-
crm_trace("Processing \"%s\" event", op);
*answer = NULL;
-
- if (op == NULL) {
- result = -EINVAL;
- crm_err("No operation specified");
-
- } else if (strcmp(PCMK__CIB_REQUEST_NOOP, op) != 0) {
- result = -EPROTONOSUPPORT;
- crm_err("Action [%s] is not supported by the CIB manager", op);
- }
- return result;
+ return pcmk_ok;
}
int
@@ -158,10 +151,10 @@ cib_process_ping(const char *op, int options, const char *section, xmlNode * req
// Append additional detail so the receiver can log the differences
add_message_xml(*answer, F_CIB_CALLDATA, the_cib);
},
- {
+ if (the_cib != NULL) {
// Always include at least the version details
- const char *tag = TYPE(the_cib);
- xmlNode *shallow = create_xml_node(NULL, tag);
+ xmlNode *shallow = create_xml_node(NULL,
+ (const char *) the_cib->name);
copy_in_properties(shallow, the_cib);
add_message_xml(*answer, F_CIB_CALLDATA, shallow);
@@ -250,7 +243,7 @@ cib_process_upgrade_server(const char *op, int options, const char *section, xml
if (rc != pcmk_ok) {
// Notify originating peer so it can notify its local clients
- crm_node_t *origin = pcmk__search_cluster_node_cache(0, host);
+ crm_node_t *origin = pcmk__search_cluster_node_cache(0, host, NULL);
crm_info("Rejecting upgrade request from %s: %s "
CRM_XS " rc=%d peer=%s", host, pcmk_strerror(rc), rc,
@@ -341,8 +334,7 @@ cib_server_process_diff(const char *op, int options, const char *section, xmlNod
crm_warn("Requesting full CIB refresh because update failed: %s"
CRM_XS " rc=%d", pcmk_strerror(rc), rc);
- pcmk__output_set_log_level(logger_out, LOG_INFO);
- logger_out->message(logger_out, "xml-patchset", input);
+ pcmk__log_xml_patchset(LOG_INFO, input);
free_xml(*result_cib);
*result_cib = NULL;
send_sync_request(NULL);
@@ -356,15 +348,16 @@ cib_process_replace_svr(const char *op, int options, const char *section, xmlNod
xmlNode * input, xmlNode * existing_cib, xmlNode ** result_cib,
xmlNode ** answer)
{
- const char *tag = crm_element_name(input);
int rc =
cib_process_replace(op, options, section, req, input, existing_cib, result_cib, answer);
- if (rc == pcmk_ok && pcmk__str_eq(tag, XML_TAG_CIB, pcmk__str_casei)) {
+
+ if ((rc == pcmk_ok) && pcmk__xe_is(input, XML_TAG_CIB)) {
sync_in_progress = 0;
}
return rc;
}
+// @COMPAT: Remove when PCMK__CIB_REQUEST_ABS_DELETE is removed
int
cib_process_delete_absolute(const char *op, int options, const char *section, xmlNode * req,
xmlNode * input, xmlNode * existing_cib, xmlNode ** result_cib,
@@ -373,6 +366,49 @@ cib_process_delete_absolute(const char *op, int options, const char *section, xm
return -EINVAL;
}
+static xmlNode *
+cib_msg_copy(xmlNode *msg)
+{
+ static const char *field_list[] = {
+ F_XML_TAGNAME,
+ F_TYPE,
+ F_CIB_CLIENTID,
+ F_CIB_CALLOPTS,
+ F_CIB_CALLID,
+ F_CIB_OPERATION,
+ F_CIB_ISREPLY,
+ F_CIB_SECTION,
+ F_CIB_HOST,
+ F_CIB_RC,
+ F_CIB_DELEGATED,
+ F_CIB_OBJID,
+ F_CIB_OBJTYPE,
+ F_CIB_EXISTING,
+ F_CIB_SEENCOUNT,
+ F_CIB_TIMEOUT,
+ F_CIB_GLOBAL_UPDATE,
+ F_CIB_CLIENTNAME,
+ F_CIB_USER,
+ F_CIB_NOTIFY_TYPE,
+ F_CIB_NOTIFY_ACTIVATE
+ };
+
+ xmlNode *copy = create_xml_node(NULL, "copy");
+
+ CRM_ASSERT(copy != NULL);
+
+ for (int lpc = 0; lpc < PCMK__NELEM(field_list); lpc++) {
+ const char *field = field_list[lpc];
+ const char *value = crm_element_value(msg, field);
+
+ if (value != NULL) {
+ crm_xml_add(copy, field, value);
+ }
+ }
+
+ return copy;
+}
+
int
sync_our_cib(xmlNode * request, gboolean all)
{
@@ -384,22 +420,12 @@ sync_our_cib(xmlNode * request, gboolean all)
xmlNode *replace_request = NULL;
CRM_CHECK(the_cib != NULL, return -EINVAL);
-
- replace_request = cib_msg_copy(request, FALSE);
- CRM_CHECK(replace_request != NULL, return -EINVAL);
+ CRM_CHECK(all || (host != NULL), return -EINVAL);
crm_debug("Syncing CIB to %s", all ? "all peers" : host);
- if (all == FALSE && host == NULL) {
- crm_log_xml_err(request, "bad sync");
- }
- /* remove the "all == FALSE" condition
- *
- * sync_from was failing, the local client wasn't being notified
- * because it didn't know it was a reply
- * setting this does not prevent the other nodes from applying it
- * if all == TRUE
- */
+ replace_request = cib_msg_copy(request);
+
if (host != NULL) {
crm_xml_add(replace_request, F_CIB_ISREPLY, host);
}
@@ -425,3 +451,30 @@ sync_our_cib(xmlNode * request, gboolean all)
free(digest);
return result;
}
+
+int
+cib_process_commit_transaction(const char *op, int options, const char *section,
+ xmlNode *req, xmlNode *input,
+ xmlNode *existing_cib, xmlNode **result_cib,
+ xmlNode **answer)
+{
+ /* On success, our caller will activate *result_cib locally, trigger a
+ * replace notification if appropriate, and sync *result_cib to all nodes.
+ * On failure, our caller will free *result_cib.
+ */
+ int rc = pcmk_rc_ok;
+ const char *client_id = crm_element_value(req, F_CIB_CLIENTID);
+ const char *origin = crm_element_value(req, F_ORIG);
+ pcmk__client_t *client = pcmk__find_client_by_id(client_id);
+
+ rc = based_commit_transaction(input, client, origin, result_cib);
+
+ if (rc != pcmk_rc_ok) {
+ char *source = based_transaction_source_str(client, origin);
+
+ crm_err("Could not commit transaction for %s: %s",
+ source, pcmk_rc_str(rc));
+ free(source);
+ }
+ return pcmk_rc2legacy(rc);
+}
diff --git a/daemons/based/based_notify.c b/daemons/based/based_notify.c
index 5881f6d..00a4c54 100644
--- a/daemons/based/based_notify.c
+++ b/daemons/based/based_notify.c
@@ -21,6 +21,9 @@
#include <time.h>
+#include <glib.h>
+#include <libxml/tree.h>
+
#include <crm/crm.h>
#include <crm/cib/internal.h>
#include <crm/msg_xml.h>
@@ -30,7 +33,7 @@
#include <pacemaker-based.h>
struct cib_notification_s {
- xmlNode *msg;
+ const xmlNode *msg;
struct iovec *iov;
int32_t iov_size;
};
@@ -58,10 +61,6 @@ cib_notify_send_one(gpointer key, gpointer value, gpointer user_data)
do_send = TRUE;
- } else if (pcmk_is_set(client->flags, cib_notify_replace)
- && pcmk__str_eq(type, T_CIB_REPLACE_NOTIFY, pcmk__str_casei)) {
- do_send = TRUE;
-
} else if (pcmk_is_set(client->flags, cib_notify_confirm)
&& pcmk__str_eq(type, T_CIB_UPDATE_CONFIRM, pcmk__str_casei)) {
do_send = TRUE;
@@ -104,7 +103,7 @@ cib_notify_send_one(gpointer key, gpointer value, gpointer user_data)
}
static void
-cib_notify_send(xmlNode * xml)
+cib_notify_send(const xmlNode *xml)
{
struct iovec *iov;
struct cib_notification_s update;
@@ -198,15 +197,16 @@ cib_diff_notify(const char *op, int result, const char *call_id,
crm_xml_add(update_msg, F_SUBTYPE, T_CIB_DIFF_NOTIFY);
crm_xml_add(update_msg, F_CIB_OPERATION, op);
crm_xml_add(update_msg, F_CIB_CLIENTID, client_id);
+ crm_xml_add(update_msg, F_CIB_CLIENTNAME, client_name);
crm_xml_add(update_msg, F_CIB_CALLID, call_id);
crm_xml_add(update_msg, F_ORIG, origin);
crm_xml_add_int(update_msg, F_CIB_RC, result);
if (update != NULL) {
- type = crm_element_name(update);
+ type = (const char *) update->name;
crm_trace("Setting type to update->name: %s", type);
} else {
- type = crm_element_name(diff);
+ type = (const char *) diff->name;
crm_trace("Setting type to new_obj->name: %s", type);
}
crm_xml_add(update_msg, F_CIB_OBJID, ID(diff));
@@ -218,88 +218,7 @@ cib_diff_notify(const char *op, int result, const char *call_id,
}
add_message_xml(update_msg, F_CIB_UPDATE_RESULT, diff);
+ crm_log_xml_trace(update_msg, "diff-notify");
cib_notify_send(update_msg);
free_xml(update_msg);
}
-
-void
-cib_replace_notify(const char *op, int result, const char *call_id,
- const char *client_id, const char *client_name,
- const char *origin, xmlNode *update, xmlNode *diff,
- uint32_t change_section)
-{
- xmlNode *replace_msg = NULL;
-
- int add_updates = 0;
- int add_epoch = 0;
- int add_admin_epoch = 0;
-
- int del_updates = 0;
- int del_epoch = 0;
- int del_admin_epoch = 0;
-
- uint8_t log_level = LOG_INFO;
-
- if (diff == NULL) {
- return;
- }
-
- if (result != pcmk_ok) {
- log_level = LOG_WARNING;
- }
-
- cib_diff_version_details(diff, &add_admin_epoch, &add_epoch, &add_updates,
- &del_admin_epoch, &del_epoch, &del_updates);
-
- if (del_updates < 0) {
- crm_log_xml_debug(diff, "Bad replace diff");
- }
-
- if ((add_admin_epoch != del_admin_epoch)
- || (add_epoch != del_epoch)
- || (add_updates != del_updates)) {
-
- do_crm_log(log_level,
- "Replaced CIB generation %d.%d.%d with %d.%d.%d from client "
- "%s%s%s (%s) (%s)",
- del_admin_epoch, del_epoch, del_updates,
- add_admin_epoch, add_epoch, add_updates,
- client_name,
- ((call_id != NULL)? " call " : ""), pcmk__s(call_id, ""),
- pcmk__s(origin, "unspecified peer"), pcmk_strerror(result));
-
- } else if ((add_admin_epoch != 0)
- || (add_epoch != 0)
- || (add_updates != 0)) {
-
- do_crm_log(log_level,
- "Local-only replace of CIB generation %d.%d.%d from client "
- "%s%s%s (%s) (%s)",
- add_admin_epoch, add_epoch, add_updates,
- client_name,
- ((call_id != NULL)? " call " : ""), pcmk__s(call_id, ""),
- pcmk__s(origin, "unspecified peer"), pcmk_strerror(result));
- }
-
- replace_msg = create_xml_node(NULL, "notify-replace");
-
- crm_xml_add(replace_msg, F_TYPE, T_CIB_NOTIFY);
- crm_xml_add(replace_msg, F_SUBTYPE, T_CIB_REPLACE_NOTIFY);
- crm_xml_add(replace_msg, F_CIB_OPERATION, op);
- crm_xml_add(replace_msg, F_CIB_CLIENTID, client_id);
- crm_xml_add(replace_msg, F_CIB_CALLID, call_id);
- crm_xml_add(replace_msg, F_ORIG, origin);
- crm_xml_add_int(replace_msg, F_CIB_RC, result);
- crm_xml_add_ll(replace_msg, F_CIB_CHANGE_SECTION,
- (long long) change_section);
- attach_cib_generation(replace_msg, "cib-replace-generation", update);
-
- /* We can include update and diff if a replace callback needs them. Until
- * then, avoid the overhead.
- */
-
- crm_log_xml_trace(replace_msg, "CIB replaced");
-
- cib_notify_send(replace_msg);
- free_xml(replace_msg);
-}
diff --git a/daemons/based/based_operation.c b/daemons/based/based_operation.c
new file mode 100644
index 0000000..736d425
--- /dev/null
+++ b/daemons/based/based_operation.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2008-2023 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU General Public License version 2
+ * or later (GPLv2+) WITHOUT ANY WARRANTY.
+ */
+
+#include <crm_internal.h>
+
+#include <glib.h>
+
+#include <crm/crm.h>
+#include <crm/cib.h>
+#include <pacemaker-based.h>
+
+static const cib__op_fn_t cib_op_functions[] = {
+ [cib__op_abs_delete] = cib_process_delete_absolute,
+ [cib__op_apply_patch] = cib_server_process_diff,
+ [cib__op_bump] = cib_process_bump,
+ [cib__op_commit_transact] = cib_process_commit_transaction,
+ [cib__op_create] = cib_process_create,
+ [cib__op_delete] = cib_process_delete,
+ [cib__op_erase] = cib_process_erase,
+ [cib__op_is_primary] = cib_process_readwrite,
+ [cib__op_modify] = cib_process_modify,
+ [cib__op_noop] = cib_process_noop,
+ [cib__op_ping] = cib_process_ping,
+ [cib__op_primary] = cib_process_readwrite,
+ [cib__op_query] = cib_process_query,
+ [cib__op_replace] = cib_process_replace_svr,
+ [cib__op_secondary] = cib_process_readwrite,
+ [cib__op_shutdown] = cib_process_shutdown_req,
+ [cib__op_sync_all] = cib_process_sync,
+ [cib__op_sync_one] = cib_process_sync_one,
+ [cib__op_upgrade] = cib_process_upgrade_server,
+};
+
+/*!
+ * \internal
+ * \brief Get the function that performs a given server-side CIB operation
+ *
+ * \param[in] operation Operation whose function to look up
+ *
+ * \return Function that performs \p operation within \c pacemaker-based
+ */
+cib__op_fn_t
+based_get_op_function(const cib__operation_t *operation)
+{
+ enum cib__op_type type = operation->type;
+
+ CRM_ASSERT(type >= 0);
+
+ if (type >= PCMK__NELEM(cib_op_functions)) {
+ return NULL;
+ }
+ return cib_op_functions[type];
+}
diff --git a/daemons/based/based_remote.c b/daemons/based/based_remote.c
index 38136d2..4aa41fa 100644
--- a/daemons/based/based_remote.c
+++ b/daemons/based/based_remote.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2004-2021 the Pacemaker project contributors
+ * Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -23,7 +23,9 @@
#include <stdlib.h>
#include <errno.h>
+
#include <glib.h>
+#include <libxml/tree.h>
#include <crm/msg_xml.h>
#include <crm/common/ipc.h>
@@ -126,13 +128,13 @@ init_remote_listener(int port, gboolean encrypted)
/* create server socket */
ssock = malloc(sizeof(int));
if(ssock == NULL) {
- crm_perror(LOG_ERR, "Listener socket allocation failed");
+ crm_err("Listener socket allocation failed: %s", pcmk_rc_str(errno));
return -1;
}
*ssock = socket(AF_INET, SOCK_STREAM, 0);
if (*ssock == -1) {
- crm_perror(LOG_ERR, "Listener socket creation failed");
+ crm_err("Listener socket creation failed: %s", pcmk_rc_str(errno));
free(ssock);
return -1;
}
@@ -141,8 +143,8 @@ init_remote_listener(int port, gboolean encrypted)
optval = 1;
rc = setsockopt(*ssock, SOL_SOCKET, SO_REUSEADDR, &optval, sizeof(optval));
if (rc < 0) {
- crm_perror(LOG_WARNING,
- "Local address reuse not allowed on listener socket");
+ crm_err("Local address reuse not allowed on listener socket: %s",
+ pcmk_rc_str(errno));
}
/* bind server socket */
@@ -151,13 +153,13 @@ init_remote_listener(int port, gboolean encrypted)
saddr.sin_addr.s_addr = INADDR_ANY;
saddr.sin_port = htons(port);
if (bind(*ssock, (struct sockaddr *)&saddr, sizeof(saddr)) == -1) {
- crm_perror(LOG_ERR, "Cannot bind to listener socket");
+ crm_err("Cannot bind to listener socket: %s", pcmk_rc_str(errno));
close(*ssock);
free(ssock);
return -2;
}
if (listen(*ssock, 10) == -1) {
- crm_perror(LOG_ERR, "Cannot listen on socket");
+ crm_err("Cannot listen on socket: %s", pcmk_rc_str(errno));
close(*ssock);
free(ssock);
return -3;
@@ -222,9 +224,9 @@ cib_remote_auth(xmlNode * login)
return FALSE;
}
- tmp = crm_element_name(login);
- if (!pcmk__str_eq(tmp, "cib_command", pcmk__str_casei)) {
- crm_err("Wrong tag: %s", tmp);
+ if (!pcmk__xe_is(login, T_CIB_COMMAND)) {
+ crm_err("Unrecognizable message from remote client");
+ crm_log_xml_info(login, "bad");
return FALSE;
}
@@ -296,7 +298,7 @@ cib_remote_listen(gpointer data)
memset(&addr, 0, sizeof(addr));
csock = accept(ssock, (struct sockaddr *)&addr, &laddr);
if (csock == -1) {
- crm_perror(LOG_ERR, "Could not accept socket connection");
+ crm_err("Could not accept socket connection: %s", pcmk_rc_str(errno));
return TRUE;
}
@@ -411,9 +413,8 @@ cib_handle_remote_msg(pcmk__client_t *client, xmlNode *command)
{
const char *value = NULL;
- value = crm_element_name(command);
- if (!pcmk__str_eq(value, "cib_command", pcmk__str_casei)) {
- crm_log_xml_trace(command, "Bad command: ");
+ if (!pcmk__xe_is(command, T_CIB_COMMAND)) {
+ crm_log_xml_trace(command, "bad");
return;
}
diff --git a/daemons/based/based_transaction.c b/daemons/based/based_transaction.c
new file mode 100644
index 0000000..89aea2e
--- /dev/null
+++ b/daemons/based/based_transaction.c
@@ -0,0 +1,167 @@
+/*
+ * Copyright 2023 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU General Public License version 2
+ * or later (GPLv2+) WITHOUT ANY WARRANTY.
+ */
+
+#include <crm_internal.h>
+
+#include <glib.h>
+#include <libxml/tree.h>
+
+#include "pacemaker-based.h"
+
+/*!
+ * \internal
+ * \brief Create a string describing the source of a commit-transaction request
+ *
+ * \param[in] client CIB client
+ * \param[in] origin Host where the commit request originated
+ *
+ * \return String describing the request source
+ *
+ * \note The caller is responsible for freeing the return value using \c free().
+ */
+char *
+based_transaction_source_str(const pcmk__client_t *client, const char *origin)
+{
+ char *source = NULL;
+
+ if (client != NULL) {
+ source = crm_strdup_printf("client %s (%s)%s%s",
+ pcmk__client_name(client),
+ pcmk__s(client->id, "unidentified"),
+ ((origin != NULL)? " on " : ""),
+ pcmk__s(origin, ""));
+
+ } else {
+ source = strdup((origin != NULL)? origin : "unknown source");
+ }
+
+ CRM_ASSERT(source != NULL);
+ return source;
+}
+
+/*!
+ * \internal
+ * \brief Process requests in a transaction
+ *
+ * Stop when a request fails or when all requests have been processed.
+ *
+ * \param[in,out] transaction Transaction to process
+ * \param[in] client CIB client
+ * \param[in] source String describing the commit request source
+ *
+ * \return Standard Pacemaker return code
+ */
+static int
+process_transaction_requests(xmlNodePtr transaction,
+ const pcmk__client_t *client, const char *source)
+{
+ for (xmlNodePtr request = first_named_child(transaction, T_CIB_COMMAND);
+ request != NULL; request = crm_next_same_xml(request)) {
+
+ const char *op = crm_element_value(request, F_CIB_OPERATION);
+ const char *host = crm_element_value(request, F_CIB_HOST);
+ const cib__operation_t *operation = NULL;
+ int rc = cib__get_operation(op, &operation);
+
+ if (rc == pcmk_rc_ok) {
+ if (!pcmk_is_set(operation->flags, cib__op_attr_transaction)
+ || (host != NULL)) {
+
+ rc = EOPNOTSUPP;
+ } else {
+ /* Commit-transaction is a privileged operation. If we reached
+ * this point, the request came from a privileged connection.
+ */
+ rc = cib_process_request(request, TRUE, client);
+ rc = pcmk_legacy2rc(rc);
+ }
+ }
+
+ if (rc != pcmk_rc_ok) {
+ crm_err("Aborting CIB transaction for %s due to failed %s request: "
+ "%s",
+ source, op, pcmk_rc_str(rc));
+ crm_log_xml_info(request, "Failed request");
+ return rc;
+ }
+
+ crm_trace("Applied %s request to transaction working CIB for %s",
+ op, source);
+ crm_log_xml_trace(request, "Successful request");
+ }
+
+ return pcmk_rc_ok;
+}
+
+/*!
+ * \internal
+ * \brief Commit a given CIB client's transaction to a working CIB copy
+ *
+ * \param[in] transaction Transaction to commit
+ * \param[in] client CIB client
+ * \param[in] origin Host where the commit request originated
+ * \param[in,out] result_cib Where to store result CIB
+ *
+ * \return Standard Pacemaker return code
+ *
+ * \note This function is expected to be called only by
+ * \p cib_process_commit_transaction().
+ * \note \p result_cib is expected to be a copy of the current CIB as created by
+ * \p cib_perform_op().
+ * \note The caller is responsible for activating and syncing \p result_cib on
+ * success, and for freeing it on failure.
+ */
+int
+based_commit_transaction(xmlNodePtr transaction, const pcmk__client_t *client,
+ const char *origin, xmlNodePtr *result_cib)
+{
+ xmlNodePtr saved_cib = the_cib;
+ int rc = pcmk_rc_ok;
+ char *source = NULL;
+
+ CRM_ASSERT(result_cib != NULL);
+
+ CRM_CHECK(pcmk__xe_is(transaction, T_CIB_TRANSACTION),
+ return pcmk_rc_no_transaction);
+
+ /* *result_cib should be a copy of the_cib (created by cib_perform_op()). If
+ * not, make a copy now. Change tracking isn't strictly required here
+ * because:
+ * * Each request in the transaction will have changes tracked and ACLs
+ * checked if appropriate.
+ * * cib_perform_op() will infer changes for the commit request at the end.
+ */
+ CRM_CHECK((*result_cib != NULL) && (*result_cib != the_cib),
+ *result_cib = copy_xml(the_cib));
+
+ source = based_transaction_source_str(client, origin);
+ crm_trace("Committing transaction for %s to working CIB", source);
+
+ // Apply all changes to a working copy of the CIB
+ the_cib = *result_cib;
+
+ rc = process_transaction_requests(transaction, client, origin);
+
+ crm_trace("Transaction commit %s for %s",
+ ((rc == pcmk_rc_ok)? "succeeded" : "failed"), source);
+
+ /* Some request types (for example, erase) may have freed the_cib (the
+ * working copy) and pointed it at a new XML object. In that case, it
+ * follows that *result_cib (the working copy) was freed.
+ *
+ * Point *result_cib at the updated working copy stored in the_cib.
+ */
+ *result_cib = the_cib;
+
+ // Point the_cib back to the unchanged original copy
+ the_cib = saved_cib;
+
+ free(source);
+ return rc;
+}
diff --git a/daemons/based/based_transaction.h b/daemons/based/based_transaction.h
new file mode 100644
index 0000000..9935c73
--- /dev/null
+++ b/daemons/based/based_transaction.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright 2023 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU Lesser General Public License
+ * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
+ */
+
+#ifndef BASED_TRANSACTION__H
+#define BASED_TRANSACTION__H
+
+#include <crm_internal.h>
+
+#include <libxml/tree.h>
+
+char *based_transaction_source_str(const pcmk__client_t *client,
+ const char *origin);
+
+int based_commit_transaction(xmlNodePtr transaction,
+ const pcmk__client_t *client,
+ const char *origin, xmlNodePtr *result_cib);
+
+#endif // BASED_TRANSACTION__H
diff --git a/daemons/based/pacemaker-based.c b/daemons/based/pacemaker-based.c
index 129997e..5dd7938 100644
--- a/daemons/based/pacemaker-based.c
+++ b/daemons/based/pacemaker-based.c
@@ -16,7 +16,8 @@
#include <bzlib.h>
#include <sys/types.h>
-#include <libxml/parser.h>
+#include <glib.h>
+#include <libxml/tree.h>
#include <crm/crm.h>
#include <crm/cib/internal.h>
@@ -42,6 +43,7 @@ gchar *cib_root = NULL;
static gboolean preserve_status = FALSE;
gboolean cib_writes_enabled = TRUE;
+gboolean stand_alone = FALSE;
int remote_fd = 0;
int remote_tls_fd = 0;
@@ -49,8 +51,6 @@ int remote_tls_fd = 0;
GHashTable *config_hash = NULL;
GHashTable *local_notify_queue = NULL;
-pcmk__output_t *logger_out = NULL;
-
static void cib_init(void);
void cib_shutdown(int nsig);
static bool startCib(const char *filename);
@@ -197,15 +197,6 @@ main(int argc, char **argv)
goto done;
}
- rc = pcmk__log_output_new(&logger_out);
- if (rc != pcmk_rc_ok) {
- exit_code = CRM_EX_ERROR;
- g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
- "Error creating output format log: %s", pcmk_rc_str(rc));
- goto done;
- }
- pcmk__output_set_log_level(logger_out, LOG_TRACE);
-
mainloop_add_signal(SIGTERM, cib_shutdown);
mainloop_add_signal(SIGPIPE, cib_enable_writes);
@@ -230,7 +221,7 @@ main(int argc, char **argv)
goto done;
}
- if (crm_ipc_connect(old_instance)) {
+ if (pcmk__connect_generic_ipc(old_instance) == pcmk_rc_ok) {
/* IPC end-point already up */
crm_ipc_close(old_instance);
crm_ipc_destroy(old_instance);
diff --git a/daemons/based/pacemaker-based.h b/daemons/based/pacemaker-based.h
index 05e49b3..33c7642 100644
--- a/daemons/based/pacemaker-based.h
+++ b/daemons/based/pacemaker-based.h
@@ -18,6 +18,9 @@
#include <errno.h>
#include <fcntl.h>
+#include <glib.h>
+#include <libxml/tree.h>
+
#include <crm/crm.h>
#include <crm/cib.h>
#include <crm/common/xml.h>
@@ -26,16 +29,19 @@
#include <crm/common/mainloop.h>
#include <crm/cib/internal.h>
+#include "based_transaction.h"
+
#ifdef HAVE_GNUTLS_GNUTLS_H
# include <gnutls/gnutls.h>
#endif
+#define OUR_NODENAME (stand_alone? "localhost" : crm_cluster->uname)
+
// CIB-specific client flags
enum cib_client_flags {
// Notifications
cib_notify_pre = (UINT64_C(1) << 0),
cib_notify_post = (UINT64_C(1) << 1),
- cib_notify_replace = (UINT64_C(1) << 2),
cib_notify_confirm = (UINT64_C(1) << 3),
cib_notify_diff = (UINT64_C(1) << 4),
@@ -43,16 +49,6 @@ enum cib_client_flags {
cib_is_daemon = (UINT64_C(1) << 12),
};
-typedef struct cib_operation_s {
- const char *operation;
- gboolean modifies_cib;
- gboolean needs_privileges;
- int (*prepare) (xmlNode *, xmlNode **, const char **);
- int (*cleanup) (int, xmlNode **, xmlNode **);
- int (*fn) (const char *, int, const char *, xmlNode *,
- xmlNode *, xmlNode *, xmlNode **, xmlNode **);
-} cib_operation_t;
-
extern bool based_is_primary;
extern GHashTable *config_hash;
extern xmlNode *the_cib;
@@ -67,7 +63,6 @@ extern gboolean stand_alone;
extern gboolean cib_shutdown_flag;
extern gchar *cib_root;
extern int cib_status;
-extern pcmk__output_t *logger_out;
extern struct qb_ipcs_service_handlers ipc_ro_callbacks;
extern struct qb_ipcs_service_handlers ipc_rw_callbacks;
@@ -79,6 +74,8 @@ void cib_peer_callback(xmlNode *msg, void *private_data);
void cib_common_callback_worker(uint32_t id, uint32_t flags,
xmlNode *op_request, pcmk__client_t *cib_client,
gboolean privileged);
+int cib_process_request(xmlNode *request, gboolean privileged,
+ const pcmk__client_t *cib_client);
void cib_shutdown(int nsig);
void terminate_cib(const char *caller, int fast);
gboolean cib_legacy_mode(void);
@@ -92,9 +89,9 @@ int cib_process_shutdown_req(const char *op, int options, const char *section,
xmlNode *req, xmlNode *input,
xmlNode *existing_cib, xmlNode **result_cib,
xmlNode **answer);
-int cib_process_default(const char *op, int options, const char *section,
- xmlNode *req, xmlNode *input, xmlNode *existing_cib,
- xmlNode **result_cib, xmlNode **answer);
+int cib_process_noop(const char *op, int options, const char *section,
+ xmlNode *req, xmlNode *input, xmlNode *existing_cib,
+ xmlNode **result_cib, xmlNode **answer);
int cib_process_ping(const char *op, int options, const char *section,
xmlNode *req, xmlNode *input, xmlNode *existing_cib,
xmlNode **result_cib, xmlNode **answer);
@@ -121,25 +118,17 @@ int cib_process_upgrade_server(const char *op, int options, const char *section,
xmlNode *req, xmlNode *input,
xmlNode *existing_cib, xmlNode **result_cib,
xmlNode **answer);
+int cib_process_commit_transaction(const char *op, int options,
+ const char *section, xmlNode *req,
+ xmlNode *input, xmlNode *existing_cib,
+ xmlNode **result_cib, xmlNode **answer);
void send_sync_request(const char *host);
int sync_our_cib(xmlNode *request, gboolean all);
-xmlNode *cib_msg_copy(xmlNode *msg, gboolean with_data);
-int cib_get_operation_id(const char *op, int *operation);
-cib_op_t *cib_op_func(int call_type);
-gboolean cib_op_modifies(int call_type);
-int cib_op_prepare(int call_type, xmlNode *request, xmlNode **input,
- const char **section);
-int cib_op_cleanup(int call_type, int options, xmlNode **input,
- xmlNode **output);
-int cib_op_can_run(int call_type, int call_options, bool privileged);
+cib__op_fn_t based_get_op_function(const cib__operation_t *operation);
void cib_diff_notify(const char *op, int result, const char *call_id,
const char *client_id, const char *client_name,
const char *origin, xmlNode *update, xmlNode *diff);
-void cib_replace_notify(const char *op, int result, const char *call_id,
- const char *client_id, const char *client_name,
- const char *origin, xmlNode *update, xmlNode *diff,
- uint32_t change_section);
static inline const char *
cib_config_lookup(const char *opt)
diff --git a/daemons/controld/Makefile.am b/daemons/controld/Makefile.am
index 08be1ff..1312090 100644
--- a/daemons/controld/Makefile.am
+++ b/daemons/controld/Makefile.am
@@ -14,34 +14,20 @@ halibdir = $(CRM_DAEMON_DIR)
halib_PROGRAMS = pacemaker-controld
-noinst_HEADERS = controld_alerts.h \
- controld_callbacks.h \
- controld_cib.h \
- controld_fencing.h \
- controld_fsa.h \
- controld_globals.h \
- controld_lrm.h \
- controld_membership.h \
- controld_messages.h \
- controld_metadata.h \
- controld_throttle.h \
- controld_timers.h \
- controld_transition.h \
- controld_utils.h \
- pacemaker-controld.h
+noinst_HEADERS = $(wildcard *.h)
pacemaker_controld_CFLAGS = $(CFLAGS_HARDENED_EXE)
pacemaker_controld_LDFLAGS = $(LDFLAGS_HARDENED_EXE)
-pacemaker_controld_LDADD = $(top_builddir)/lib/fencing/libstonithd.la \
- $(top_builddir)/lib/pacemaker/libpacemaker.la \
- $(top_builddir)/lib/pengine/libpe_rules.la \
- $(top_builddir)/lib/cib/libcib.la \
- $(top_builddir)/lib/cluster/libcrmcluster.la \
- $(top_builddir)/lib/common/libcrmcommon.la \
- $(top_builddir)/lib/services/libcrmservice.la \
- $(top_builddir)/lib/lrmd/liblrmd.la \
- $(CLUSTERLIBS)
+pacemaker_controld_LDADD = $(top_builddir)/lib/pacemaker/libpacemaker.la
+pacemaker_controld_LDADD += $(top_builddir)/lib/cib/libcib.la
+pacemaker_controld_LDADD += $(top_builddir)/lib/pengine/libpe_rules.la
+pacemaker_controld_LDADD += $(top_builddir)/lib/fencing/libstonithd.la
+pacemaker_controld_LDADD += $(top_builddir)/lib/cluster/libcrmcluster.la
+pacemaker_controld_LDADD += $(top_builddir)/lib/lrmd/liblrmd.la
+pacemaker_controld_LDADD += $(top_builddir)/lib/services/libcrmservice.la
+pacemaker_controld_LDADD += $(top_builddir)/lib/common/libcrmcommon.la
+pacemaker_controld_LDADD += $(CLUSTERLIBS)
pacemaker_controld_SOURCES = pacemaker-controld.c \
controld_alerts.c \
@@ -79,9 +65,11 @@ endif
CLEANFILES = $(man7_MANS)
if BUILD_LEGACY_LINKS
+.PHONY: install-exec-hook
install-exec-hook:
cd $(DESTDIR)$(CRM_DAEMON_DIR) && rm -f crmd && $(LN_S) pacemaker-controld crmd
+.PHONY: uninstall-hook
uninstall-hook:
cd $(DESTDIR)$(CRM_DAEMON_DIR) && rm -f crmd
endif
diff --git a/daemons/controld/controld_callbacks.c b/daemons/controld/controld_callbacks.c
index d578adc..7078739 100644
--- a/daemons/controld/controld_callbacks.c
+++ b/daemons/controld/controld_callbacks.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2004-2022 the Pacemaker project contributors
+ * Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -107,6 +107,8 @@ peer_update_callback(enum crm_status_type type, crm_node_t * node, const void *d
bool appeared = FALSE;
bool is_remote = pcmk_is_set(node->flags, crm_remote_node);
+ controld_node_pending_timer(node);
+
/* The controller waits to receive some information from the membership
* layer before declaring itself operational. If this is being called for a
* cluster node, indicate that we have it.
@@ -274,13 +276,14 @@ peer_update_callback(enum crm_status_type type, crm_node_t * node, const void *d
if (down) {
const char *task = crm_element_value(down->xml, XML_LRM_ATTR_TASK);
- if (pcmk__str_eq(task, CRM_OP_FENCE, pcmk__str_casei)) {
+ if (pcmk__str_eq(task, PCMK_ACTION_STONITH, pcmk__str_casei)) {
/* tengine_stonith_callback() confirms fence actions */
crm_trace("Updating CIB %s fencer reported fencing of %s complete",
(pcmk_is_set(down->flags, pcmk__graph_action_confirmed)? "after" : "before"), node->uname);
- } else if (!appeared && pcmk__str_eq(task, CRM_OP_SHUTDOWN, pcmk__str_casei)) {
+ } else if (!appeared && pcmk__str_eq(task, PCMK_ACTION_DO_SHUTDOWN,
+ pcmk__str_casei)) {
// Shutdown actions are immediately confirmed (i.e. no_wait)
if (!is_remote) {
@@ -342,6 +345,17 @@ peer_update_callback(enum crm_status_type type, crm_node_t * node, const void *d
}
}
+ if (!appeared && (type == crm_status_processes)
+ && (node->when_member > 1)) {
+ /* The node left CPG but is still a cluster member. Set its
+ * membership time to 1 to record it in the cluster state as a
+ * boolean, so we don't fence it due to node-pending-timeout.
+ */
+ node->when_member = 1;
+ flags |= node_update_cluster;
+ controld_node_pending_timer(node);
+ }
+
/* Update the CIB node state */
update = create_node_state_update(node, flags, NULL, __func__);
if (update == NULL) {
diff --git a/daemons/controld/controld_cib.c b/daemons/controld/controld_cib.c
index 94b99dd..865e41f 100644
--- a/daemons/controld/controld_cib.c
+++ b/daemons/controld/controld_cib.c
@@ -22,90 +22,6 @@
// Call ID of the most recent in-progress CIB resource update (or 0 if none)
static int pending_rsc_update = 0;
-// Call IDs of requested CIB replacements that won't trigger a new election
-// (used as a set of gint values)
-static GHashTable *cib_replacements = NULL;
-
-/*!
- * \internal
- * \brief Store the call ID of a CIB replacement that the controller requested
- *
- * The \p do_cib_replaced() callback function will avoid triggering a new
- * election when we're notified of one of these expected replacements.
- *
- * \param[in] call_id CIB call ID (or 0 for a synchronous call)
- *
- * \note This function should be called after making any asynchronous CIB
- * request (or before making any synchronous CIB request) that may replace
- * part of the nodes or status section. This may include CIB sync calls.
- */
-void
-controld_record_cib_replace_call(int call_id)
-{
- CRM_CHECK(call_id >= 0, return);
-
- if (cib_replacements == NULL) {
- cib_replacements = g_hash_table_new(NULL, NULL);
- }
-
- /* If the call ID is already present in the table, then it's old. We may not
- * be removing them properly, and we could improperly ignore replacement
- * notifications if cib_t:call_id wraps around.
- */
- CRM_LOG_ASSERT(g_hash_table_add(cib_replacements,
- GINT_TO_POINTER((gint) call_id)));
-}
-
-/*!
- * \internal
- * \brief Remove the call ID of a CIB replacement from the replacements table
- *
- * \param[in] call_id CIB call ID (or 0 for a synchronous call)
- *
- * \return \p true if \p call_id was found in the table, or \p false otherwise
- *
- * \note CIB notifications run before CIB callbacks. If this function is called
- * from within a callback, \p do_cib_replaced() will have removed
- * \p call_id from the table first if relevant changes triggered a
- * notification.
- */
-bool
-controld_forget_cib_replace_call(int call_id)
-{
- CRM_CHECK(call_id >= 0, return false);
-
- if (cib_replacements == NULL) {
- return false;
- }
- return g_hash_table_remove(cib_replacements,
- GINT_TO_POINTER((gint) call_id));
-}
-
-/*!
- * \internal
- * \brief Empty the hash table containing call IDs of CIB replacement requests
- */
-void
-controld_forget_all_cib_replace_calls(void)
-{
- if (cib_replacements != NULL) {
- g_hash_table_remove_all(cib_replacements);
- }
-}
-
-/*!
- * \internal
- * \brief Free the hash table containing call IDs of CIB replacement requests
- */
-void
-controld_destroy_cib_replacements_table(void)
-{
- if (cib_replacements != NULL) {
- g_hash_table_destroy(cib_replacements);
- cib_replacements = NULL;
- }
-}
-
/*!
* \internal
* \brief Respond to a dropped CIB connection
@@ -127,54 +43,54 @@ handle_cib_disconnect(gpointer user_data)
controld_clear_fsa_input_flags(R_CIB_CONNECTED);
} else { // Expected
- crm_info("Connection to the CIB manager terminated");
+ crm_info("Disconnected from the CIB manager");
}
}
static void
do_cib_updated(const char *event, xmlNode * msg)
{
- if (pcmk__alert_in_patchset(msg, TRUE)) {
- controld_trigger_config();
+ const xmlNode *patchset = NULL;
+ const char *client_name = NULL;
+
+ crm_debug("Received CIB diff notification: DC=%s", pcmk__btoa(AM_I_DC));
+
+ if (cib__get_notify_patchset(msg, &patchset) != pcmk_rc_ok) {
+ return;
}
-}
-static void
-do_cib_replaced(const char *event, xmlNode * msg)
-{
- int call_id = 0;
- const char *client_id = crm_element_value(msg, F_CIB_CLIENTID);
- uint32_t change_section = cib_change_section_nodes
- |cib_change_section_status;
- long long value = 0;
+ if (cib__element_in_patchset(patchset, XML_CIB_TAG_ALERTS)
+ || cib__element_in_patchset(patchset, XML_CIB_TAG_CRMCONFIG)) {
+
+ controld_trigger_config();
+ }
- crm_debug("Updating the CIB after a replace: DC=%s", pcmk__btoa(AM_I_DC));
if (!AM_I_DC) {
+ // We're not in control of the join sequence
return;
}
- if ((crm_element_value_int(msg, F_CIB_CALLID, &call_id) == 0)
- && pcmk__str_eq(client_id, controld_globals.cib_client_id,
- pcmk__str_none)
- && controld_forget_cib_replace_call(call_id)) {
- // We requested this replace op. No need to restart the join.
+ client_name = crm_element_value(msg, F_CIB_CLIENTNAME);
+ if (!cib__client_triggers_refresh(client_name)) {
+ // The CIB is still accurate
return;
}
- if ((crm_element_value_ll(msg, F_CIB_CHANGE_SECTION, &value) < 0)
- || (value < 0) || (value > UINT32_MAX)) {
+ if (cib__element_in_patchset(patchset, XML_CIB_TAG_NODES)
+ || cib__element_in_patchset(patchset, XML_CIB_TAG_STATUS)) {
- crm_trace("Couldn't parse '%s' from message", F_CIB_CHANGE_SECTION);
- } else {
- change_section = (uint32_t) value;
- }
-
- if (pcmk_any_flags_set(change_section, cib_change_section_nodes
- |cib_change_section_status)) {
+ /* An unsafe client modified the nodes or status section. Ensure the
+ * node list is up-to-date, and start the join process again so we get
+ * everyone's current resource history.
+ */
+ if (client_name == NULL) {
+ client_name = crm_element_value(msg, F_CIB_CLIENTID);
+ }
+ crm_notice("Populating nodes and starting an election after %s event "
+ "triggered by %s",
+ event, pcmk__s(client_name, "(unidentified client)"));
- /* start the join process again so we get everyone's LRM status */
populate_cib_nodes(node_update_quick|node_update_all, __func__);
-
register_fsa_input(C_FSA_INTERNAL, I_ELECTION, NULL);
}
}
@@ -186,12 +102,10 @@ controld_disconnect_cib_manager(void)
CRM_ASSERT(cib_conn != NULL);
- crm_info("Disconnecting from the CIB manager");
+ crm_debug("Disconnecting from the CIB manager");
controld_clear_fsa_input_flags(R_CIB_CONNECTED);
- cib_conn->cmds->del_notify_callback(cib_conn, T_CIB_REPLACE_NOTIFY,
- do_cib_replaced);
cib_conn->cmds->del_notify_callback(cib_conn, T_CIB_DIFF_NOTIFY,
do_cib_updated);
cib_free_callbacks(cib_conn);
@@ -201,8 +115,6 @@ controld_disconnect_cib_manager(void)
cib_scope_local|cib_discard_reply);
cib_conn->cmds->signoff(cib_conn);
}
-
- crm_notice("Disconnected from the CIB manager");
}
/* A_CIB_STOP, A_CIB_START, O_CIB_RESTART */
@@ -217,7 +129,6 @@ do_cib_control(long long action,
cib_t *cib_conn = controld_globals.cib_conn;
void (*dnotify_fn) (gpointer user_data) = handle_cib_disconnect;
- void (*replace_cb) (const char *event, xmlNodePtr msg) = do_cib_replaced;
void (*update_cb) (const char *event, xmlNodePtr msg) = do_cib_updated;
int rc = pcmk_ok;
@@ -264,11 +175,6 @@ do_cib_control(long long action,
crm_err("Could not set dnotify callback");
} else if (cib_conn->cmds->add_notify_callback(cib_conn,
- T_CIB_REPLACE_NOTIFY,
- replace_cb) != pcmk_ok) {
- crm_err("Could not set CIB notification callback (replace)");
-
- } else if (cib_conn->cmds->add_notify_callback(cib_conn,
T_CIB_DIFF_NOTIFY,
update_cb) != pcmk_ok) {
crm_err("Could not set CIB notification callback (update)");
@@ -276,8 +182,6 @@ do_cib_control(long long action,
} else {
controld_set_fsa_input_flags(R_CIB_CONNECTED);
cib_retries = 0;
- cib_conn->cmds->client_id(cib_conn, &controld_globals.cib_client_id,
- NULL);
}
if (!pcmk_is_set(controld_globals.fsa_input_register, R_CIB_CONNECTED)) {
@@ -310,11 +214,12 @@ do_cib_control(long long action,
unsigned int
cib_op_timeout(void)
{
+ // @COMPAT: Drop env_timeout at 3.0.0
static int env_timeout = -1;
unsigned int calculated_timeout = 0;
if (env_timeout == -1) {
- const char *env = getenv("PCMK_cib_timeout");
+ const char *env = pcmk__env_option(PCMK__ENV_CIB_TIMEOUT);
pcmk__scan_min_int(env, &env_timeout, MIN_CIB_OP_TIMEOUT);
crm_trace("Minimum CIB op timeout: %ds (environment: %s)",
@@ -401,67 +306,87 @@ cib_delete_callback(xmlNode *msg, int call_id, int rc, xmlNode *output,
/*!
* \internal
- * \brief Delete subsection of a node's CIB node_state
+ * \brief Get the XPath and description of a node state section to be deleted
*
- * \param[in] uname Desired node
- * \param[in] section Subsection of node_state to delete
- * \param[in] options CIB call options to use
+ * \param[in] uname Desired node
+ * \param[in] section Subsection of node_state to be deleted
+ * \param[out] xpath Where to store XPath of \p section
+ * \param[out] desc If not \c NULL, where to store description of \p section
*/
void
-controld_delete_node_state(const char *uname, enum controld_section_e section,
- int options)
+controld_node_state_deletion_strings(const char *uname,
+ enum controld_section_e section,
+ char **xpath, char **desc)
{
- cib_t *cib_conn = controld_globals.cib_conn;
-
- char *xpath = NULL;
- char *desc = NULL;
+ const char *desc_pre = NULL;
// Shutdown locks that started before this time are expired
long long expire = (long long) time(NULL)
- controld_globals.shutdown_lock_limit;
- CRM_CHECK(uname != NULL, return);
switch (section) {
case controld_section_lrm:
- xpath = crm_strdup_printf(XPATH_NODE_LRM, uname);
- desc = crm_strdup_printf("resource history for node %s", uname);
+ *xpath = crm_strdup_printf(XPATH_NODE_LRM, uname);
+ desc_pre = "resource history";
break;
case controld_section_lrm_unlocked:
- xpath = crm_strdup_printf(XPATH_NODE_LRM_UNLOCKED,
- uname, uname, expire);
- desc = crm_strdup_printf("resource history (other than shutdown "
- "locks) for node %s", uname);
+ *xpath = crm_strdup_printf(XPATH_NODE_LRM_UNLOCKED,
+ uname, uname, expire);
+ desc_pre = "resource history (other than shutdown locks)";
break;
case controld_section_attrs:
- xpath = crm_strdup_printf(XPATH_NODE_ATTRS, uname);
- desc = crm_strdup_printf("transient attributes for node %s", uname);
+ *xpath = crm_strdup_printf(XPATH_NODE_ATTRS, uname);
+ desc_pre = "transient attributes";
break;
case controld_section_all:
- xpath = crm_strdup_printf(XPATH_NODE_ALL, uname);
- desc = crm_strdup_printf("all state for node %s", uname);
+ *xpath = crm_strdup_printf(XPATH_NODE_ALL, uname);
+ desc_pre = "all state";
break;
case controld_section_all_unlocked:
- xpath = crm_strdup_printf(XPATH_NODE_ALL_UNLOCKED,
- uname, uname, expire, uname);
- desc = crm_strdup_printf("all state (other than shutdown locks) "
- "for node %s", uname);
+ *xpath = crm_strdup_printf(XPATH_NODE_ALL_UNLOCKED,
+ uname, uname, expire, uname);
+ desc_pre = "all state (other than shutdown locks)";
+ break;
+ default:
+ // We called this function incorrectly
+ CRM_ASSERT(false);
break;
}
- if (cib_conn == NULL) {
- crm_warn("Unable to delete %s: no CIB connection", desc);
- free(desc);
- } else {
- int call_id;
-
- cib__set_call_options(options, "node state deletion",
- cib_xpath|cib_multiple);
- call_id = cib_conn->cmds->remove(cib_conn, xpath, NULL, options);
- crm_info("Deleting %s (via CIB call %d) " CRM_XS " xpath=%s",
- desc, call_id, xpath);
- fsa_register_cib_callback(call_id, desc, cib_delete_callback);
- // CIB library handles freeing desc
+ if (desc != NULL) {
+ *desc = crm_strdup_printf("%s for node %s", desc_pre, uname);
}
+}
+
+/*!
+ * \internal
+ * \brief Delete subsection of a node's CIB node_state
+ *
+ * \param[in] uname Desired node
+ * \param[in] section Subsection of node_state to delete
+ * \param[in] options CIB call options to use
+ */
+void
+controld_delete_node_state(const char *uname, enum controld_section_e section,
+ int options)
+{
+ cib_t *cib = controld_globals.cib_conn;
+ char *xpath = NULL;
+ char *desc = NULL;
+ int cib_rc = pcmk_ok;
+
+ CRM_ASSERT((uname != NULL) && (cib != NULL));
+
+ controld_node_state_deletion_strings(uname, section, &xpath, &desc);
+
+ cib__set_call_options(options, "node state deletion",
+ cib_xpath|cib_multiple);
+ cib_rc = cib->cmds->remove(cib, xpath, NULL, options);
+ fsa_register_cib_callback(cib_rc, desc, cib_delete_callback);
+ crm_info("Deleting %s (via CIB call %d) " CRM_XS " xpath=%s",
+ desc, cib_rc, xpath);
+
+ // CIB library handles freeing desc
free(xpath);
}
@@ -491,11 +416,12 @@ controld_delete_resource_history(const char *rsc_id, const char *node,
char *desc = NULL;
char *xpath = NULL;
int rc = pcmk_rc_ok;
+ cib_t *cib = controld_globals.cib_conn;
CRM_CHECK((rsc_id != NULL) && (node != NULL), return EINVAL);
desc = crm_strdup_printf("resource history for %s on %s", rsc_id, node);
- if (controld_globals.cib_conn == NULL) {
+ if (cib == NULL) {
crm_err("Unable to clear %s: no CIB connection", desc);
free(desc);
return ENOTCONN;
@@ -503,9 +429,10 @@ controld_delete_resource_history(const char *rsc_id, const char *node,
// Ask CIB to delete the entry
xpath = crm_strdup_printf(XPATH_RESOURCE_HISTORY, node, rsc_id);
- rc = cib_internal_op(controld_globals.cib_conn, PCMK__CIB_REQUEST_DELETE,
- NULL, xpath, NULL, NULL, call_options|cib_xpath,
- user_name);
+
+ cib->cmds->set_user(cib, user_name);
+ rc = cib->cmds->remove(cib, xpath, NULL, call_options|cib_xpath);
+ cib->cmds->set_user(cib, NULL);
if (rc < 0) {
rc = pcmk_legacy2rc(rc);
@@ -841,10 +768,17 @@ cib_rsc_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *use
case pcmk_ok:
case -pcmk_err_diff_failed:
case -pcmk_err_diff_resync:
- crm_trace("Resource update %d complete: rc=%d", call_id, rc);
+ crm_trace("Resource history update completed (call=%d rc=%d)",
+ call_id, rc);
break;
default:
- crm_warn("Resource update %d failed: (rc=%d) %s", call_id, rc, pcmk_strerror(rc));
+ if (call_id > 0) {
+ crm_warn("Resource history update %d failed: %s "
+ CRM_XS " rc=%d", call_id, pcmk_strerror(rc), rc);
+ } else {
+ crm_warn("Resource history update failed: %s " CRM_XS " rc=%d",
+ pcmk_strerror(rc), rc);
+ }
}
if (call_id == pending_rsc_update) {
@@ -863,10 +797,11 @@ should_preserve_lock(lrmd_event_data_t *op)
if (!pcmk_is_set(controld_globals.flags, controld_shutdown_lock_enabled)) {
return false;
}
- if (!strcmp(op->op_type, RSC_STOP) && (op->rc == PCMK_OCF_OK)) {
+ if (!strcmp(op->op_type, PCMK_ACTION_STOP) && (op->rc == PCMK_OCF_OK)) {
return true;
}
- if (!strcmp(op->op_type, RSC_STATUS) && (op->rc == PCMK_OCF_NOT_RUNNING)) {
+ if (!strcmp(op->op_type, PCMK_ACTION_MONITOR)
+ && (op->rc == PCMK_OCF_NOT_RUNNING)) {
return true;
}
return false;
@@ -876,10 +811,10 @@ should_preserve_lock(lrmd_event_data_t *op)
* \internal
* \brief Request a CIB update
*
- * \param[in] section Section of CIB to update
- * \param[in,out] data New XML of CIB section to update
- * \param[in] options CIB call options
- * \param[in] callback If not NULL, set this as the operation callback
+ * \param[in] section Section of CIB to update
+ * \param[in] data New XML of CIB section to update
+ * \param[in] options CIB call options
+ * \param[in] callback If not \c NULL, set this as the operation callback
*
* \return Standard Pacemaker return code
*
@@ -890,14 +825,13 @@ int
controld_update_cib(const char *section, xmlNode *data, int options,
void (*callback)(xmlNode *, int, int, xmlNode *, void *))
{
+ cib_t *cib = controld_globals.cib_conn;
int cib_rc = -ENOTCONN;
CRM_ASSERT(data != NULL);
- if (controld_globals.cib_conn != NULL) {
- cib_rc = cib_internal_op(controld_globals.cib_conn,
- PCMK__CIB_REQUEST_MODIFY, NULL, section,
- data, NULL, options, NULL);
+ if (cib != NULL) {
+ cib_rc = cib->cmds->modify(cib, section, data, options);
if (cib_rc >= 0) {
crm_debug("Submitted CIB update %d for %s section",
cib_rc, section);
@@ -1047,7 +981,6 @@ controld_delete_action_history(const lrmd_event_data_t *op)
controld_globals.cib_conn->cmds->remove(controld_globals.cib_conn,
XML_CIB_TAG_STATUS, xml_top,
cib_none);
-
crm_log_xml_trace(xml_top, "op:cancel");
free_xml(xml_top);
}
@@ -1087,7 +1020,6 @@ controld_cib_delete_last_failure(const char *rsc_id, const char *node,
{
char *xpath = NULL;
char *last_failure_key = NULL;
-
CRM_CHECK((rsc_id != NULL) && (node != NULL), return);
// Generate XPath to match desired entry
diff --git a/daemons/controld/controld_cib.h b/daemons/controld/controld_cib.h
index bd9492a..dcc5a48 100644
--- a/daemons/controld/controld_cib.h
+++ b/daemons/controld/controld_cib.h
@@ -43,11 +43,6 @@ fsa_cib_anon_update_discard_reply(const char *section, xmlNode *data) {
}
}
-void controld_record_cib_replace_call(int call_id);
-bool controld_forget_cib_replace_call(int call_id);
-void controld_forget_all_cib_replace_calls(void);
-void controld_destroy_cib_replacements_table(void);
-
int controld_update_cib(const char *section, xmlNode *data, int options,
void (*callback)(xmlNode *, int, int, xmlNode *,
void *));
@@ -62,6 +57,9 @@ enum controld_section_e {
controld_section_all_unlocked
};
+void controld_node_state_deletion_strings(const char *uname,
+ enum controld_section_e section,
+ char **xpath, char **desc);
void controld_delete_node_state(const char *uname,
enum controld_section_e section, int options);
int controld_delete_resource_history(const char *rsc_id, const char *node,
@@ -118,8 +116,8 @@ int crmd_cib_smart_opt(void);
static inline bool
controld_action_is_recordable(const char *action)
{
- return !pcmk__str_any_of(action, CRMD_ACTION_CANCEL, CRMD_ACTION_DELETE,
- CRMD_ACTION_NOTIFY, CRMD_ACTION_METADATA, NULL);
+ return !pcmk__str_any_of(action, PCMK_ACTION_CANCEL, PCMK_ACTION_DELETE,
+ PCMK_ACTION_NOTIFY, PCMK_ACTION_META_DATA, NULL);
}
#endif // PCMK__CONTROLD_CIB__H
diff --git a/daemons/controld/controld_control.c b/daemons/controld/controld_control.c
index ffc62a0..644d686 100644
--- a/daemons/controld/controld_control.c
+++ b/daemons/controld/controld_control.c
@@ -221,6 +221,7 @@ crmd_exit(crm_exit_t exit_code)
g_list_free(controld_globals.fsa_message_queue);
controld_globals.fsa_message_queue = NULL;
+ controld_free_node_pending_timers();
controld_election_fini();
/* Tear down the CIB manager connection, but don't free it yet -- it could
@@ -265,7 +266,6 @@ crmd_exit(crm_exit_t exit_code)
controld_globals.te_uuid = NULL;
free_max_generation();
- controld_destroy_cib_replacements_table();
controld_destroy_failed_sync_table();
controld_destroy_outside_events_table();
@@ -323,20 +323,12 @@ do_exit(long long action,
enum crmd_fsa_state cur_state, enum crmd_fsa_input current_input, fsa_data_t * msg_data)
{
crm_exit_t exit_code = CRM_EX_OK;
- int log_level = LOG_INFO;
- const char *exit_type = "gracefully";
- if (action & A_EXIT_1) {
- log_level = LOG_ERR;
- exit_type = "forcefully";
+ if (pcmk_is_set(action, A_EXIT_1)) {
exit_code = CRM_EX_ERROR;
+ crm_err("Exiting now due to errors");
}
-
verify_stopped(cur_state, LOG_ERR);
- do_crm_log(log_level, "Performing %s - %s exiting the controller",
- fsa_action2string(action), exit_type);
-
- crm_info("[%s] stopped (%d)", crm_system_name, exit_code);
crmd_exit(exit_code);
}
@@ -504,7 +496,8 @@ do_started(long long action,
} else {
crm_notice("Pacemaker controller successfully started and accepting connections");
}
- controld_trigger_fencer_connect();
+ controld_set_fsa_input_flags(R_ST_REQUIRED);
+ controld_timer_fencer_connect(GINT_TO_POINTER(TRUE));
controld_clear_fsa_input_flags(R_STARTING);
register_fsa_input(msg_data->fsa_cause, I_PENDING, NULL);
@@ -684,6 +677,17 @@ static pcmk__cluster_option_t controller_options[] = {
"passed since the shutdown was initiated, even if the node has not "
"rejoined.")
},
+ {
+ XML_CONFIG_ATTR_NODE_PENDING_TIMEOUT, NULL, "time", NULL,
+ "0", pcmk__valid_interval_spec,
+ N_("How long to wait for a node that has joined the cluster to join "
+ "the controller process group"),
+ N_("Fence nodes that do not join the controller process group within "
+ "this much time after joining the cluster, to allow the cluster "
+ "to continue managing resources. A value of 0 means never fence "
+ "pending nodes. Setting the value to 2h means fence nodes after "
+ "2 hours.")
+ },
};
void
@@ -722,9 +726,8 @@ config_query_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void
}
crmconfig = output;
- if ((crmconfig) &&
- (crm_element_name(crmconfig)) &&
- (strcmp(crm_element_name(crmconfig), XML_CIB_TAG_CRMCONFIG) != 0)) {
+ if ((crmconfig != NULL)
+ && !pcmk__xe_is(crmconfig, XML_CIB_TAG_CRMCONFIG)) {
crmconfig = first_named_child(crmconfig, XML_CIB_TAG_CRMCONFIG);
}
if (!crmconfig) {
@@ -761,6 +764,10 @@ config_query_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void
controld_globals.shutdown_lock_limit = crm_parse_interval_spec(value)
/ 1000;
+ value = g_hash_table_lookup(config_hash,
+ XML_CONFIG_ATTR_NODE_PENDING_TIMEOUT);
+ controld_globals.node_pending_timeout = crm_parse_interval_spec(value) / 1000;
+
value = g_hash_table_lookup(config_hash, "cluster-name");
pcmk__str_update(&(controld_globals.cluster_name), value);
diff --git a/daemons/controld/controld_corosync.c b/daemons/controld/controld_corosync.c
index 4378b30..b69e821 100644
--- a/daemons/controld/controld_corosync.c
+++ b/daemons/controld/controld_corosync.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2004-2022 the Pacemaker project contributors
+ * Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -81,9 +81,6 @@ crmd_cs_destroy(gpointer user_data)
if (!pcmk_is_set(controld_globals.fsa_input_register, R_HA_DISCONNECTED)) {
crm_crit("Lost connection to cluster layer, shutting down");
crmd_exit(CRM_EX_DISCONNECT);
-
- } else {
- crm_info("Corosync connection closed");
}
}
@@ -122,7 +119,8 @@ cpg_membership_callback(cpg_handle_t handle, const struct cpg_name *cpg_name,
if (controld_globals.dc_name != NULL) {
crm_node_t *peer = NULL;
- peer = pcmk__search_cluster_node_cache(0, controld_globals.dc_name);
+ peer = pcmk__search_cluster_node_cache(0, controld_globals.dc_name,
+ NULL);
if (peer != NULL) {
for (int i = 0; i < left_list_entries; ++i) {
if (left_list[i].nodeid == peer->id) {
diff --git a/daemons/controld/controld_election.c b/daemons/controld/controld_election.c
index 5f33d5b..70ffecc 100644
--- a/daemons/controld/controld_election.c
+++ b/daemons/controld/controld_election.c
@@ -263,13 +263,6 @@ do_dc_release(long long action,
} else if (action & A_DC_RELEASED) {
crm_info("DC role released");
-#if 0
- if (are there errors) {
- /* we can't stay up if not healthy */
- /* or perhaps I_ERROR and go to S_RECOVER? */
- result = I_SHUTDOWN;
- }
-#endif
if (pcmk_is_set(controld_globals.fsa_input_register, R_SHUTDOWN)) {
xmlNode *update = NULL;
crm_node_t *node = crm_get_peer(0, controld_globals.our_nodename);
diff --git a/daemons/controld/controld_execd.c b/daemons/controld/controld_execd.c
index 0de399c..480d37d 100644
--- a/daemons/controld/controld_execd.c
+++ b/daemons/controld/controld_execd.c
@@ -52,14 +52,10 @@ static void
lrm_connection_destroy(void)
{
if (pcmk_is_set(controld_globals.fsa_input_register, R_LRM_CONNECTED)) {
- crm_crit("Connection to executor failed");
+ crm_crit("Lost connection to local executor");
register_fsa_input(C_FSA_INTERNAL, I_ERROR, NULL);
controld_clear_fsa_input_flags(R_LRM_CONNECTED);
-
- } else {
- crm_info("Disconnected from executor");
}
-
}
static char *
@@ -171,7 +167,7 @@ update_history_cache(lrm_state_t * lrm_state, lrmd_rsc_info_t * rsc, lrmd_event_
return;
}
- if (pcmk__str_eq(op->op_type, RSC_NOTIFY, pcmk__str_casei)) {
+ if (pcmk__str_eq(op->op_type, PCMK_ACTION_NOTIFY, pcmk__str_casei)) {
return;
}
@@ -222,10 +218,10 @@ update_history_cache(lrm_state_t * lrm_state, lrmd_rsc_info_t * rsc, lrmd_event_
}
entry->last = lrmd_copy_event(op);
- if (op->params && pcmk__strcase_any_of(op->op_type, CRMD_ACTION_START,
- CRMD_ACTION_RELOAD,
- CRMD_ACTION_RELOAD_AGENT,
- CRMD_ACTION_STATUS, NULL)) {
+ if (op->params && pcmk__strcase_any_of(op->op_type, PCMK_ACTION_START,
+ PCMK_ACTION_RELOAD,
+ PCMK_ACTION_RELOAD_AGENT,
+ PCMK_ACTION_MONITOR, NULL)) {
if (entry->stop_params) {
g_hash_table_destroy(entry->stop_params);
}
@@ -243,7 +239,9 @@ update_history_cache(lrm_state_t * lrm_state, lrmd_rsc_info_t * rsc, lrmd_event_
op->rsc_id, op->op_type, op->interval_ms);
entry->recurring_op_list = g_list_prepend(entry->recurring_op_list, lrmd_copy_event(op));
- } else if (entry->recurring_op_list && !pcmk__str_eq(op->op_type, RSC_STATUS, pcmk__str_casei)) {
+ } else if ((entry->recurring_op_list != NULL)
+ && !pcmk__str_eq(op->op_type, PCMK_ACTION_MONITOR,
+ pcmk__str_casei)) {
crm_trace("Dropping %d recurring ops because of: " PCMK__OP_FMT,
g_list_length(entry->recurring_op_list), op->rsc_id,
op->op_type, op->interval_ms);
@@ -376,10 +374,8 @@ do_lrm_control(long long action,
}
controld_clear_fsa_input_flags(R_LRM_CONNECTED);
- crm_info("Disconnecting from the executor");
lrm_state_disconnect(lrm_state);
lrm_state_reset_tables(lrm_state, FALSE);
- crm_notice("Disconnected from the executor");
}
if (action & A_LRM_CONNECT) {
@@ -510,11 +506,14 @@ is_rsc_active(lrm_state_t * lrm_state, const char *rsc_id)
crm_trace("Processing %s: %s.%d=%d", rsc_id, entry->last->op_type,
entry->last->interval_ms, entry->last->rc);
- if (entry->last->rc == PCMK_OCF_OK && pcmk__str_eq(entry->last->op_type, CRMD_ACTION_STOP, pcmk__str_casei)) {
+ if ((entry->last->rc == PCMK_OCF_OK)
+ && pcmk__str_eq(entry->last->op_type, PCMK_ACTION_STOP,
+ pcmk__str_casei)) {
return FALSE;
} else if (entry->last->rc == PCMK_OCF_OK
- && pcmk__str_eq(entry->last->op_type, CRMD_ACTION_MIGRATE, pcmk__str_casei)) {
+ && pcmk__str_eq(entry->last->op_type, PCMK_ACTION_MIGRATE_TO,
+ pcmk__str_casei)) {
// A stricter check is too complex ... leave that to the scheduler
return FALSE;
@@ -668,7 +667,7 @@ notify_deleted(lrm_state_t * lrm_state, ha_msg_input_t * input, const char *rsc_
crm_info("Notifying %s on %s that %s was%s deleted",
from_sys, (from_host? from_host : "localhost"), rsc_id,
((rc == pcmk_ok)? "" : " not"));
- op = construct_op(lrm_state, input->xml, rsc_id, CRMD_ACTION_DELETE);
+ op = construct_op(lrm_state, input->xml, rsc_id, PCMK_ACTION_DELETE);
controld_rc2event(op, pcmk_legacy2rc(rc));
controld_ack_event_directly(from_host, from_sys, NULL, op, rsc_id);
lrmd_free_event(op);
@@ -1117,7 +1116,8 @@ synthesize_lrmd_failure(lrm_state_t *lrm_state, const xmlNode *action,
op = construct_op(lrm_state, action, ID(xml_rsc), operation);
- if (pcmk__str_eq(operation, RSC_NOTIFY, pcmk__str_casei)) { // Notifications can't fail
+ if (pcmk__str_eq(operation, PCMK_ACTION_NOTIFY, pcmk__str_casei)) {
+ // Notifications can't fail
fake_op_status(lrm_state, op, PCMK_EXEC_DONE, PCMK_OCF_OK, NULL);
} else {
fake_op_status(lrm_state, op, op_status, rc, exit_reason);
@@ -1329,7 +1329,7 @@ do_lrm_delete(ha_msg_input_t *input, lrm_state_t *lrm_state,
if (cib_rc != pcmk_rc_ok) {
lrmd_event_data_t *op = NULL;
- op = construct_op(lrm_state, input->xml, rsc->id, CRMD_ACTION_DELETE);
+ op = construct_op(lrm_state, input->xml, rsc->id, PCMK_ACTION_DELETE);
/* These are resource clean-ups, not actions, so no exit reason is
* needed.
@@ -1394,7 +1394,9 @@ metadata_complete(int pid, const pcmk__action_result_t *result, void *user_data)
md = controld_cache_metadata(lrm_state->metadata_cache, data->rsc,
result->action_stdout);
}
- do_lrm_rsc_op(lrm_state, data->rsc, data->input_xml, md);
+ if (!pcmk_is_set(controld_globals.fsa_input_register, R_HA_DISCONNECTED)) {
+ do_lrm_rsc_op(lrm_state, data->rsc, data->input_xml, md);
+ }
free_metadata_cb_data(data);
}
@@ -1438,11 +1440,11 @@ do_lrm_invoke(long long action,
from_host = crm_element_value(input->msg, F_CRM_HOST_FROM);
}
- if (pcmk__str_eq(crm_op, CRM_OP_LRM_DELETE, pcmk__str_none)) {
+ if (pcmk__str_eq(crm_op, PCMK_ACTION_LRM_DELETE, pcmk__str_none)) {
if (!pcmk__str_eq(from_sys, CRM_SYSTEM_TENGINE, pcmk__str_none)) {
crm_rsc_delete = TRUE; // from crm_resource
}
- operation = CRMD_ACTION_DELETE;
+ operation = PCMK_ACTION_DELETE;
} else if (input->xml != NULL) {
operation = crm_element_value(input->xml, XML_LRM_ATTR_TASK);
@@ -1486,7 +1488,7 @@ do_lrm_invoke(long long action,
} else if (operation != NULL) {
lrmd_rsc_info_t *rsc = NULL;
xmlNode *xml_rsc = find_xml_node(input->xml, XML_CIB_TAG_RESOURCE, TRUE);
- gboolean create_rsc = !pcmk__str_eq(operation, CRMD_ACTION_DELETE,
+ gboolean create_rsc = !pcmk__str_eq(operation, PCMK_ACTION_DELETE,
pcmk__str_none);
int rc;
@@ -1534,12 +1536,13 @@ do_lrm_invoke(long long action,
return;
}
- if (pcmk__str_eq(operation, CRMD_ACTION_CANCEL, pcmk__str_none)) {
+ if (pcmk__str_eq(operation, PCMK_ACTION_CANCEL, pcmk__str_none)) {
if (!do_lrm_cancel(input, lrm_state, rsc, from_host, from_sys)) {
crm_log_xml_warn(input->xml, "Bad command");
}
- } else if (pcmk__str_eq(operation, CRMD_ACTION_DELETE, pcmk__str_none)) {
+ } else if (pcmk__str_eq(operation, PCMK_ACTION_DELETE,
+ pcmk__str_none)) {
do_lrm_delete(input, lrm_state, rsc, from_sys, from_host,
crm_rsc_delete, user_name);
@@ -1554,7 +1557,7 @@ do_lrm_invoke(long long action,
* changed (using something like inotify, or a hash or modification
* time of the agent executable).
*/
- if (strcmp(operation, CRMD_ACTION_START) != 0) {
+ if (strcmp(operation, PCMK_ACTION_START) != 0) {
md = controld_get_rsc_metadata(lrm_state, rsc,
controld_metadata_from_cache);
}
@@ -1619,7 +1622,8 @@ construct_op(const lrm_state_t *lrm_state, const xmlNode *rsc_op,
lrmd__set_result(op, PCMK_OCF_UNKNOWN, PCMK_EXEC_PENDING, NULL);
if (rsc_op == NULL) {
- CRM_LOG_ASSERT(pcmk__str_eq(CRMD_ACTION_STOP, operation, pcmk__str_casei));
+ CRM_LOG_ASSERT(pcmk__str_eq(operation, PCMK_ACTION_STOP,
+ pcmk__str_casei));
op->user_data = NULL;
/* the stop_all_resources() case
* by definition there is no DC (or they'd be shutting
@@ -1654,7 +1658,7 @@ construct_op(const lrm_state_t *lrm_state, const xmlNode *rsc_op,
class = crm_element_value(primitive, XML_AGENT_ATTR_CLASS);
if (pcmk_is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_fence_params)
- && pcmk__str_eq(operation, CRMD_ACTION_STATUS, pcmk__str_casei)
+ && pcmk__str_eq(operation, PCMK_ACTION_MONITOR, pcmk__str_casei)
&& (op->interval_ms > 0)) {
op_timeout = g_hash_table_lookup(params, "pcmk_monitor_timeout");
@@ -1663,7 +1667,7 @@ construct_op(const lrm_state_t *lrm_state, const xmlNode *rsc_op,
}
}
- if (!pcmk__str_eq(operation, RSC_STOP, pcmk__str_casei)) {
+ if (!pcmk__str_eq(operation, PCMK_ACTION_STOP, pcmk__str_casei)) {
op->params = params;
} else {
@@ -1703,7 +1707,8 @@ construct_op(const lrm_state_t *lrm_state, const xmlNode *rsc_op,
op->user_data = strdup(transition);
if (op->interval_ms != 0) {
- if (pcmk__strcase_any_of(operation, CRMD_ACTION_START, CRMD_ACTION_STOP, NULL)) {
+ if (pcmk__strcase_any_of(operation, PCMK_ACTION_START, PCMK_ACTION_STOP,
+ NULL)) {
crm_err("Start and Stop actions cannot have an interval: %u",
op->interval_ms);
op->interval_ms = 0;
@@ -1849,7 +1854,7 @@ static bool
should_cancel_recurring(const char *rsc_id, const char *action, guint interval_ms)
{
if (is_remote_lrmd_ra(NULL, NULL, rsc_id) && (interval_ms == 0)
- && (strcmp(action, CRMD_ACTION_MIGRATE) == 0)) {
+ && (strcmp(action, PCMK_ACTION_MIGRATE_TO) == 0)) {
/* Don't stop monitoring a migrating Pacemaker Remote connection
* resource until the entire migration has completed. We must detect if
* the connection is unexpectedly severed, even during a migration.
@@ -1859,8 +1864,8 @@ should_cancel_recurring(const char *rsc_id, const char *action, guint interval_m
// Cancel recurring actions before changing resource state
return (interval_ms == 0)
- && !pcmk__str_any_of(action, CRMD_ACTION_STATUS, CRMD_ACTION_NOTIFY,
- NULL);
+ && !pcmk__str_any_of(action, PCMK_ACTION_MONITOR,
+ PCMK_ACTION_NOTIFY, NULL);
}
/*!
@@ -1876,7 +1881,7 @@ static const char *
should_nack_action(const char *action)
{
if (pcmk_is_set(controld_globals.fsa_input_register, R_SHUTDOWN)
- && pcmk__str_eq(action, RSC_START, pcmk__str_none)) {
+ && pcmk__str_eq(action, PCMK_ACTION_START, pcmk__str_none)) {
register_fsa_input(C_SHUTDOWN, I_SHUTDOWN, NULL);
return "Not attempting start due to shutdown in progress";
@@ -1888,7 +1893,7 @@ should_nack_action(const char *action)
case S_TRANSITION_ENGINE:
break;
default:
- if (!pcmk__str_eq(action, CRMD_ACTION_STOP, pcmk__str_none)) {
+ if (!pcmk__str_eq(action, PCMK_ACTION_STOP, pcmk__str_none)) {
return "Controller cannot attempt actions at this time";
}
break;
@@ -1930,8 +1935,8 @@ do_lrm_rsc_op(lrm_state_t *lrm_state, lrmd_rsc_info_t *rsc, xmlNode *msg,
return;
}
- if (pcmk__str_any_of(operation, CRMD_ACTION_RELOAD,
- CRMD_ACTION_RELOAD_AGENT, NULL)) {
+ if (pcmk__str_any_of(operation, PCMK_ACTION_RELOAD,
+ PCMK_ACTION_RELOAD_AGENT, NULL)) {
/* Pre-2.1.0 DCs will schedule reload actions only, and 2.1.0+ DCs
* will schedule reload-agent actions only. In either case, we need
* to map that to whatever the resource agent actually supports.
@@ -1939,9 +1944,9 @@ do_lrm_rsc_op(lrm_state_t *lrm_state, lrmd_rsc_info_t *rsc, xmlNode *msg,
*/
if ((md != NULL)
&& pcmk_is_set(md->ra_flags, ra_supports_legacy_reload)) {
- operation = CRMD_ACTION_RELOAD;
+ operation = PCMK_ACTION_RELOAD;
} else {
- operation = CRMD_ACTION_RELOAD_AGENT;
+ operation = PCMK_ACTION_RELOAD_AGENT;
}
}
@@ -1968,8 +1973,9 @@ do_lrm_rsc_op(lrm_state_t *lrm_state, lrmd_rsc_info_t *rsc, xmlNode *msg,
/* now do the op */
crm_notice("Requesting local execution of %s operation for %s on %s "
CRM_XS " transition_key=%s op_key=" PCMK__OP_FMT,
- crm_action_str(op->op_type, op->interval_ms), rsc->id, lrm_state->node_name,
- pcmk__s(transition, ""), rsc->id, operation, op->interval_ms);
+ pcmk__readable_action(op->op_type, op->interval_ms), rsc->id,
+ lrm_state->node_name, pcmk__s(transition, ""), rsc->id,
+ operation, op->interval_ms);
nack_reason = should_nack_action(operation);
if (nack_reason != NULL) {
@@ -2131,7 +2137,8 @@ log_executor_event(const lrmd_event_data_t *op, const char *op_key,
GString *str = g_string_sized_new(100); // reasonable starting size
pcmk__g_strcat(str,
- "Result of ", crm_action_str(op->op_type, op->interval_ms),
+ "Result of ",
+ pcmk__readable_action(op->op_type, op->interval_ms),
" operation for ", op->rsc_id, NULL);
if (node_name != NULL) {
@@ -2401,7 +2408,8 @@ process_lrm_event(lrm_state_t *lrm_state, lrmd_event_data_t *op,
log_executor_event(op, op_key, node_name, removed);
if (lrm_state) {
- if (!pcmk__str_eq(op->op_type, RSC_METADATA, pcmk__str_casei)) {
+ if (!pcmk__str_eq(op->op_type, PCMK_ACTION_META_DATA,
+ pcmk__str_casei)) {
crmd_alert_resource_op(lrm_state->node_name, op);
} else if (rsc && (op->rc == PCMK_OCF_OK)) {
char *metadata = unescape_newlines(op->output);
diff --git a/daemons/controld/controld_execd_state.c b/daemons/controld/controld_execd_state.c
index 8c68bfc..b90cc5e 100644
--- a/daemons/controld/controld_execd_state.c
+++ b/daemons/controld/controld_execd_state.c
@@ -132,12 +132,6 @@ lrm_state_create(const char *node_name)
return state;
}
-void
-lrm_state_destroy(const char *node_name)
-{
- g_hash_table_remove(lrm_state_table, node_name);
-}
-
static gboolean
remote_proxy_remove_by_node(gpointer key, gpointer value, gpointer user_data)
{
@@ -307,7 +301,7 @@ lrm_state_destroy_all(void)
lrm_state_t *
lrm_state_find(const char *node_name)
{
- if (!node_name) {
+ if ((node_name == NULL) || (lrm_state_table == NULL)) {
return NULL;
}
return g_hash_table_lookup(lrm_state_table, node_name);
@@ -318,6 +312,8 @@ lrm_state_find_or_create(const char *node_name)
{
lrm_state_t *lrm_state;
+ CRM_CHECK(lrm_state_table != NULL, return NULL);
+
lrm_state = g_hash_table_lookup(lrm_state_table, node_name);
if (!lrm_state) {
lrm_state = lrm_state_create(node_name);
@@ -329,6 +325,9 @@ lrm_state_find_or_create(const char *node_name)
GList *
lrm_state_get_list(void)
{
+ if (lrm_state_table == NULL) {
+ return NULL;
+ }
return g_hash_table_get_values(lrm_state_table);
}
@@ -799,7 +798,7 @@ lrm_state_unregister_rsc(lrm_state_t * lrm_state,
}
if (is_remote_lrmd_ra(NULL, NULL, rsc_id)) {
- lrm_state_destroy(rsc_id);
+ g_hash_table_remove(lrm_state_table, rsc_id);
return pcmk_ok;
}
diff --git a/daemons/controld/controld_fencing.c b/daemons/controld/controld_fencing.c
index 89cb61f..9557d9e 100644
--- a/daemons/controld/controld_fencing.c
+++ b/daemons/controld/controld_fencing.c
@@ -218,8 +218,11 @@ send_stonith_update(pcmk__graph_action_t *action, const char *target,
CRM_CHECK(target != NULL, return);
CRM_CHECK(uuid != NULL, return);
- /* Make sure the membership and join caches are accurate */
- peer = crm_get_peer_full(0, target, CRM_GET_PEER_ANY);
+ /* Make sure the membership and join caches are accurate.
+ * Try getting any existing node cache entry also by node uuid in case it
+ * doesn't have an uname yet.
+ */
+ peer = pcmk__get_peer_full(0, target, uuid, CRM_GET_PEER_ANY);
CRM_CHECK(peer != NULL, return);
@@ -391,7 +394,7 @@ execute_stonith_cleanup(void)
*/
static stonith_t *stonith_api = NULL;
-static crm_trigger_t *stonith_reconnect = NULL;
+static mainloop_timer_t *controld_fencer_connect_timer = NULL;
static char *te_client_id = NULL;
static gboolean
@@ -422,7 +425,7 @@ fail_incompletable_stonith(pcmk__graph_t *graph)
}
task = crm_element_value(action->xml, XML_LRM_ATTR_TASK);
- if (task && pcmk__str_eq(task, CRM_OP_FENCE, pcmk__str_casei)) {
+ if (pcmk__str_eq(task, PCMK_ACTION_STONITH, pcmk__str_casei)) {
pcmk__set_graph_action_flags(action, pcmk__graph_action_failed);
last_action = action->xml;
pcmk__update_graph(graph, action);
@@ -447,11 +450,12 @@ tengine_stonith_connection_destroy(stonith_t *st, stonith_event_t *e)
te_cleanup_stonith_history_sync(st, FALSE);
if (pcmk_is_set(controld_globals.fsa_input_register, R_ST_REQUIRED)) {
- crm_crit("Fencing daemon connection failed");
- mainloop_set_trigger(stonith_reconnect);
-
+ crm_err("Lost fencer connection (will attempt to reconnect)");
+ if (!mainloop_timer_running(controld_fencer_connect_timer)) {
+ mainloop_timer_start(controld_fencer_connect_timer);
+ }
} else {
- crm_info("Fencing daemon disconnected");
+ crm_info("Disconnected from fencer");
}
if (stonith_api) {
@@ -515,7 +519,7 @@ handle_fence_notification(stonith_t *st, stonith_event_t *event)
crmd_alert_fencing_op(event);
- if (pcmk__str_eq("on", event->action, pcmk__str_none)) {
+ if (pcmk__str_eq(PCMK_ACTION_ON, event->action, pcmk__str_none)) {
// Unfencing doesn't need special handling, just a log message
if (succeeded) {
crm_notice("%s was unfenced by %s at the request of %s@%s",
@@ -647,14 +651,14 @@ handle_fence_notification(stonith_t *st, stonith_event_t *event)
/*!
* \brief Connect to fencer
*
- * \param[in] user_data If NULL, retry failures now, otherwise retry in main loop
+ * \param[in] user_data If NULL, retry failures now, otherwise retry in mainloop timer
*
- * \return TRUE
+ * \return G_SOURCE_REMOVE on success, G_SOURCE_CONTINUE to retry
* \note If user_data is NULL, this will wait 2s between attempts, for up to
* 30 attempts, meaning the controller could be blocked as long as 58s.
*/
-static gboolean
-te_connect_stonith(gpointer user_data)
+gboolean
+controld_timer_fencer_connect(gpointer user_data)
{
int rc = pcmk_ok;
@@ -662,13 +666,13 @@ te_connect_stonith(gpointer user_data)
stonith_api = stonith_api_new();
if (stonith_api == NULL) {
crm_err("Could not connect to fencer: API memory allocation failed");
- return TRUE;
+ return G_SOURCE_REMOVE;
}
}
if (stonith_api->state != stonith_disconnected) {
crm_trace("Already connected to fencer, no need to retry");
- return TRUE;
+ return G_SOURCE_REMOVE;
}
if (user_data == NULL) {
@@ -681,17 +685,30 @@ te_connect_stonith(gpointer user_data)
} else {
// Non-blocking (retry failures later in main loop)
rc = stonith_api->cmds->connect(stonith_api, crm_system_name, NULL);
+
+ if (controld_fencer_connect_timer == NULL) {
+ controld_fencer_connect_timer =
+ mainloop_timer_add("controld_fencer_connect", 1000,
+ TRUE, controld_timer_fencer_connect,
+ GINT_TO_POINTER(TRUE));
+ }
+
if (rc != pcmk_ok) {
if (pcmk_is_set(controld_globals.fsa_input_register,
R_ST_REQUIRED)) {
crm_notice("Fencer connection failed (will retry): %s "
CRM_XS " rc=%d", pcmk_strerror(rc), rc);
- mainloop_set_trigger(stonith_reconnect);
+
+ if (!mainloop_timer_running(controld_fencer_connect_timer)) {
+ mainloop_timer_start(controld_fencer_connect_timer);
+ }
+
+ return G_SOURCE_CONTINUE;
} else {
crm_info("Fencer connection failed (ignoring because no longer required): %s "
CRM_XS " rc=%d", pcmk_strerror(rc), rc);
}
- return TRUE;
+ return G_SOURCE_REMOVE;
}
}
@@ -709,23 +726,7 @@ te_connect_stonith(gpointer user_data)
crm_notice("Fencer successfully connected");
}
- return TRUE;
-}
-
-/*!
- \internal
- \brief Schedule fencer connection attempt in main loop
-*/
-void
-controld_trigger_fencer_connect(void)
-{
- if (stonith_reconnect == NULL) {
- stonith_reconnect = mainloop_add_trigger(G_PRIORITY_LOW,
- te_connect_stonith,
- GINT_TO_POINTER(TRUE));
- }
- controld_set_fsa_input_flags(R_ST_REQUIRED);
- mainloop_set_trigger(stonith_reconnect);
+ return G_SOURCE_REMOVE;
}
void
@@ -745,9 +746,9 @@ controld_disconnect_fencer(bool destroy)
stonith_api->cmds->free(stonith_api);
stonith_api = NULL;
}
- if (stonith_reconnect) {
- mainloop_destroy_trigger(stonith_reconnect);
- stonith_reconnect = NULL;
+ if (controld_fencer_connect_timer) {
+ mainloop_timer_del(controld_fencer_connect_timer);
+ controld_fencer_connect_timer = NULL;
}
if (te_client_id) {
free(te_client_id);
@@ -843,7 +844,7 @@ tengine_stonith_callback(stonith_t *stonith, stonith_callback_data_t *data)
crm_info("Fence operation %d for %s succeeded", data->call_id, target);
if (!(pcmk_is_set(action->flags, pcmk__graph_action_confirmed))) {
te_action_confirmed(action, NULL);
- if (pcmk__str_eq("on", op, pcmk__str_casei)) {
+ if (pcmk__str_eq(PCMK_ACTION_ON, op, pcmk__str_casei)) {
const char *value = NULL;
char *now = pcmk__ttoa(time(NULL));
gboolean is_remote_node = FALSE;
@@ -981,7 +982,7 @@ controld_execute_fence_action(pcmk__graph_t *graph,
priority_delay ? priority_delay : "");
/* Passing NULL means block until we can connect... */
- te_connect_stonith(NULL);
+ controld_timer_fencer_connect(NULL);
pcmk__scan_min_int(priority_delay, &delay_i, 0);
rc = fence_with_delay(target, type, delay_i);
@@ -1000,12 +1001,14 @@ controld_execute_fence_action(pcmk__graph_t *graph,
bool
controld_verify_stonith_watchdog_timeout(const char *value)
{
+ long st_timeout = value? crm_get_msec(value) : 0;
const char *our_nodename = controld_globals.our_nodename;
gboolean rv = TRUE;
- if (stonith_api && (stonith_api->state != stonith_disconnected) &&
- stonith__watchdog_fencing_enabled_for_node_api(stonith_api,
- our_nodename)) {
+ if (st_timeout == 0
+ || (stonith_api && (stonith_api->state != stonith_disconnected) &&
+ stonith__watchdog_fencing_enabled_for_node_api(stonith_api,
+ our_nodename))) {
rv = pcmk__valid_sbd_timeout(value);
}
return rv;
diff --git a/daemons/controld/controld_fencing.h b/daemons/controld/controld_fencing.h
index 86a5050..76779c6 100644
--- a/daemons/controld/controld_fencing.h
+++ b/daemons/controld/controld_fencing.h
@@ -19,7 +19,7 @@ void controld_configure_fencing(GHashTable *options);
void st_fail_count_reset(const char * target);
// stonith API client
-void controld_trigger_fencer_connect(void);
+gboolean controld_timer_fencer_connect(gpointer user_data);
void controld_disconnect_fencer(bool destroy);
int controld_execute_fence_action(pcmk__graph_t *graph,
pcmk__graph_action_t *action);
diff --git a/daemons/controld/controld_fsa.c b/daemons/controld/controld_fsa.c
index 622d1c8..06559b8 100644
--- a/daemons/controld/controld_fsa.c
+++ b/daemons/controld/controld_fsa.c
@@ -205,7 +205,6 @@ s_crmd_fsa(enum crmd_fsa_cause cause)
fsa_data->data_type = fsa_dt_none;
controld_globals.fsa_message_queue
= g_list_append(controld_globals.fsa_message_queue, fsa_data);
- fsa_data = NULL;
}
while ((controld_globals.fsa_message_queue != NULL)
&& !pcmk_is_set(controld_globals.flags, controld_fsa_is_stalled)) {
@@ -275,7 +274,6 @@ s_crmd_fsa(enum crmd_fsa_cause cause)
/* start doing things... */
s_crmd_fsa_actions(fsa_data);
delete_fsa_input(fsa_data);
- fsa_data = NULL;
}
if ((controld_globals.fsa_message_queue != NULL)
@@ -620,11 +618,6 @@ do_state_transition(enum crmd_fsa_state cur_state,
if (next_state != S_ELECTION && cur_state != S_RELEASE_DC) {
controld_stop_current_election_timeout();
}
-#if 0
- if ((controld_globals.fsa_input_register & R_SHUTDOWN)) {
- controld_set_fsa_action_flags(A_DC_TIMER_STOP);
- }
-#endif
if (next_state == S_INTEGRATION) {
controld_set_fsa_action_flags(A_INTEGRATE_TIMER_START);
} else {
diff --git a/daemons/controld/controld_globals.h b/daemons/controld/controld_globals.h
index eff1607..2ff8a57 100644
--- a/daemons/controld/controld_globals.h
+++ b/daemons/controld/controld_globals.h
@@ -45,9 +45,6 @@ typedef struct {
//! Connection to the CIB
cib_t *cib_conn;
- //! CIB connection's client ID
- const char *cib_client_id;
-
// Scheduler
@@ -93,6 +90,9 @@ typedef struct {
//! Max lifetime (in seconds) of a resource's shutdown lock to a node
guint shutdown_lock_limit;
+ //! Node pending timeout
+ guint node_pending_timeout;
+
//! Main event loop
GMainLoop *mainloop;
} controld_globals_t;
diff --git a/daemons/controld/controld_join_client.c b/daemons/controld/controld_join_client.c
index da6a9d6..805ecbd 100644
--- a/daemons/controld/controld_join_client.c
+++ b/daemons/controld/controld_join_client.c
@@ -112,15 +112,6 @@ do_cl_join_offer_respond(long long action,
CRM_CHECK(input != NULL, return);
-#if 0
- if (we are sick) {
- log error;
-
- /* save the request for later? */
- return;
- }
-#endif
-
welcome_from = crm_element_value(input->msg, F_CRM_HOST_FROM);
join_id = crm_element_value(input->msg, F_CRM_JOIN_ID);
crm_trace("Accepting cluster join offer from node %s "CRM_XS" join-%s",
@@ -195,32 +186,34 @@ join_query_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *
free_xml(generation);
}
-static void
-set_join_state(const char * start_state)
+void
+set_join_state(const char *start_state, const char *node_name, const char *node_uuid,
+ bool remote)
{
if (pcmk__str_eq(start_state, "standby", pcmk__str_casei)) {
crm_notice("Forcing node %s to join in %s state per configured "
- "environment", controld_globals.our_nodename, start_state);
+ "environment", node_name, start_state);
cib__update_node_attr(controld_globals.logger_out,
controld_globals.cib_conn, cib_sync_call,
- XML_CIB_TAG_NODES, controld_globals.our_uuid,
- NULL, NULL, NULL, "standby", "on", NULL, NULL);
+ XML_CIB_TAG_NODES, node_uuid,
+ NULL, NULL, NULL, "standby", "on", NULL,
+ remote ? "remote" : NULL);
} else if (pcmk__str_eq(start_state, "online", pcmk__str_casei)) {
crm_notice("Forcing node %s to join in %s state per configured "
- "environment", controld_globals.our_nodename, start_state);
+ "environment", node_name, start_state);
cib__update_node_attr(controld_globals.logger_out,
controld_globals.cib_conn, cib_sync_call,
- XML_CIB_TAG_NODES, controld_globals.our_uuid,
- NULL, NULL, NULL, "standby", "off", NULL, NULL);
+ XML_CIB_TAG_NODES, node_uuid,
+ NULL, NULL, NULL, "standby", "off", NULL,
+ remote ? "remote" : NULL);
} else if (pcmk__str_eq(start_state, "default", pcmk__str_casei)) {
- crm_debug("Not forcing a starting state on node %s",
- controld_globals.our_nodename);
+ crm_debug("Not forcing a starting state on node %s", node_name);
} else {
crm_warn("Unrecognized start state '%s', using 'default' (%s)",
- start_state, controld_globals.our_nodename);
+ start_state, node_name);
}
}
@@ -335,7 +328,8 @@ do_cl_join_finalize_respond(long long action,
first_join = FALSE;
if (start_state) {
- set_join_state(start_state);
+ set_join_state(start_state, controld_globals.our_nodename,
+ controld_globals.our_uuid, false);
}
}
diff --git a/daemons/controld/controld_join_dc.c b/daemons/controld/controld_join_dc.c
index f82b132..2fe6710 100644
--- a/daemons/controld/controld_join_dc.c
+++ b/daemons/controld/controld_join_dc.c
@@ -172,7 +172,6 @@ start_join_round(void)
max_generation_xml = NULL;
}
controld_clear_fsa_input_flags(R_HAVE_CIB);
- controld_forget_all_cib_replace_calls();
}
/*!
@@ -607,10 +606,6 @@ do_dc_join_finalize(long long action,
rc = controld_globals.cib_conn->cmds->sync_from(controld_globals.cib_conn,
sync_from, NULL, cib_none);
-
- if (pcmk_is_set(controld_globals.fsa_input_register, R_HAVE_CIB)) {
- controld_record_cib_replace_call(rc);
- }
fsa_register_cib_callback(rc, sync_from, finalize_sync_callback);
}
@@ -629,8 +624,6 @@ finalize_sync_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, voi
{
CRM_LOG_ASSERT(-EPERM != rc);
- controld_forget_cib_replace_call(call_id);
-
if (rc != pcmk_ok) {
const char *sync_from = (const char *) user_data;
@@ -674,22 +667,25 @@ finalize_sync_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, voi
}
static void
-join_update_complete_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data)
+join_node_state_commit_callback(xmlNode *msg, int call_id, int rc,
+ xmlNode *output, void *user_data)
{
- fsa_data_t *msg_data = NULL;
+ const char *node = user_data;
- if (rc == pcmk_ok) {
- crm_debug("join-%d node history update (via CIB call %d) complete",
- current_join_id, call_id);
- check_join_state(controld_globals.fsa_state, __func__);
+ if (rc != pcmk_ok) {
+ fsa_data_t *msg_data = NULL; // for register_fsa_error() macro
- } else {
- crm_err("join-%d node history update (via CIB call %d) failed: %s "
- "(next transition may determine resource status incorrectly)",
- current_join_id, call_id, pcmk_strerror(rc));
+ crm_crit("join-%d node history update (via CIB call %d) for node %s "
+ "failed: %s",
+ current_join_id, call_id, node, pcmk_strerror(rc));
crm_log_xml_debug(msg, "failed");
register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL);
}
+
+ crm_debug("join-%d node history update (via CIB call %d) for node %s "
+ "complete",
+ current_join_id, call_id, node);
+ check_join_state(controld_globals.fsa_state, __func__);
}
/* A_DC_JOIN_PROCESS_ACK */
@@ -701,33 +697,39 @@ do_dc_join_ack(long long action,
{
int join_id = -1;
ha_msg_input_t *join_ack = fsa_typed_data(fsa_dt_ha_msg);
- enum controld_section_e section = controld_section_lrm;
- const int cib_opts = cib_scope_local|cib_can_create;
const char *op = crm_element_value(join_ack->msg, F_CRM_TASK);
- const char *join_from = crm_element_value(join_ack->msg, F_CRM_HOST_FROM);
+ char *join_from = crm_element_value_copy(join_ack->msg, F_CRM_HOST_FROM);
crm_node_t *peer = NULL;
+ enum controld_section_e section = controld_section_lrm;
+ char *xpath = NULL;
+ xmlNode *state = join_ack->xml;
+ xmlNode *execd_state = NULL;
+
+ cib_t *cib = controld_globals.cib_conn;
+ int rc = pcmk_ok;
+
// Sanity checks
if (join_from == NULL) {
crm_warn("Ignoring message received without node identification");
- return;
+ goto done;
}
if (op == NULL) {
crm_warn("Ignoring message received from %s without task", join_from);
- return;
+ goto done;
}
if (strcmp(op, CRM_OP_JOIN_CONFIRM)) {
crm_debug("Ignoring '%s' message from %s while waiting for '%s'",
op, join_from, CRM_OP_JOIN_CONFIRM);
- return;
+ goto done;
}
if (crm_element_value_int(join_ack->msg, F_CRM_JOIN_ID, &join_id) != 0) {
crm_warn("Ignoring join confirmation from %s without valid join ID",
join_from);
- return;
+ goto done;
}
peer = crm_get_peer(0, join_from);
@@ -736,7 +738,7 @@ do_dc_join_ack(long long action,
"(currently %s not %s)",
join_id, join_from, crm_join_phase_str(peer->join),
crm_join_phase_str(crm_join_finalized));
- return;
+ goto done;
}
if (join_id != current_join_id) {
@@ -744,40 +746,85 @@ do_dc_join_ack(long long action,
"because currently on join-%d",
join_id, join_from, current_join_id);
crm_update_peer_join(__func__, peer, crm_join_nack);
- return;
+ goto done;
}
crm_update_peer_join(__func__, peer, crm_join_confirmed);
/* Update CIB with node's current executor state. A new transition will be
- * triggered later, when the CIB notifies us of the change.
+ * triggered later, when the CIB manager notifies us of the change.
+ *
+ * The delete and modify requests are part of an atomic transaction.
*/
+ rc = cib->cmds->init_transaction(cib);
+ if (rc != pcmk_ok) {
+ goto done;
+ }
+
+ // Delete relevant parts of node's current executor state from CIB
if (pcmk_is_set(controld_globals.flags, controld_shutdown_lock_enabled)) {
section = controld_section_lrm_unlocked;
}
- controld_delete_node_state(join_from, section, cib_scope_local);
+ controld_node_state_deletion_strings(join_from, section, &xpath, NULL);
+
+ rc = cib->cmds->remove(cib, xpath, NULL,
+ cib_scope_local
+ |cib_xpath
+ |cib_multiple
+ |cib_transaction);
+ if (rc != pcmk_ok) {
+ goto done;
+ }
+
+ // Update CIB with node's latest known executor state
if (pcmk__str_eq(join_from, controld_globals.our_nodename,
pcmk__str_casei)) {
- xmlNode *now_dc_lrmd_state = controld_query_executor_state();
-
- if (now_dc_lrmd_state != NULL) {
- crm_debug("Updating local node history for join-%d "
- "from query result", join_id);
- controld_update_cib(XML_CIB_TAG_STATUS, now_dc_lrmd_state, cib_opts,
- join_update_complete_callback);
- free_xml(now_dc_lrmd_state);
+
+ // Use the latest possible state if processing our own join ack
+ execd_state = controld_query_executor_state();
+
+ if (execd_state != NULL) {
+ crm_debug("Updating local node history for join-%d from query "
+ "result",
+ current_join_id);
+ state = execd_state;
+
} else {
crm_warn("Updating local node history from join-%d confirmation "
- "because query failed", join_id);
- controld_update_cib(XML_CIB_TAG_STATUS, join_ack->xml, cib_opts,
- join_update_complete_callback);
+ "because query failed",
+ current_join_id);
}
+
} else {
crm_debug("Updating node history for %s from join-%d confirmation",
- join_from, join_id);
- controld_update_cib(XML_CIB_TAG_STATUS, join_ack->xml, cib_opts,
- join_update_complete_callback);
+ join_from, current_join_id);
+ }
+
+ rc = cib->cmds->modify(cib, XML_CIB_TAG_STATUS, state,
+ cib_scope_local|cib_can_create|cib_transaction);
+ free_xml(execd_state);
+ if (rc != pcmk_ok) {
+ goto done;
+ }
+
+ // Commit the transaction
+ rc = cib->cmds->end_transaction(cib, true, cib_scope_local);
+ fsa_register_cib_callback(rc, join_from, join_node_state_commit_callback);
+
+ if (rc > 0) {
+ // join_from will be freed after callback
+ join_from = NULL;
+ rc = pcmk_ok;
+ }
+
+done:
+ if (rc != pcmk_ok) {
+ crm_crit("join-%d node history update for node %s failed: %s",
+ current_join_id, join_from, pcmk_strerror(rc));
+ register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL);
}
+ free(join_from);
+ free(xpath);
}
void
@@ -808,7 +855,7 @@ finalize_join_for(gpointer key, gpointer value, gpointer user_data)
*/
crm_trace("Updating node name and UUID in CIB for %s", join_to);
tmp1 = create_xml_node(NULL, XML_CIB_TAG_NODE);
- set_uuid(tmp1, XML_ATTR_ID, join_node);
+ crm_xml_add(tmp1, XML_ATTR_ID, crm_peer_uuid(join_node));
crm_xml_add(tmp1, XML_ATTR_UNAME, join_to);
fsa_cib_anon_update(XML_CIB_TAG_NODES, tmp1);
free_xml(tmp1);
diff --git a/daemons/controld/controld_lrm.h b/daemons/controld/controld_lrm.h
index 25f3db3..c3113e4 100644
--- a/daemons/controld/controld_lrm.h
+++ b/daemons/controld/controld_lrm.h
@@ -109,11 +109,6 @@ gboolean lrm_state_init_local(void);
void lrm_state_destroy_all(void);
/*!
- * \brief Destroy executor connection by node name
- */
-void lrm_state_destroy(const char *node_name);
-
-/*!
* \brief Find lrm_state data by node name
*/
lrm_state_t *lrm_state_find(const char *node_name);
diff --git a/daemons/controld/controld_membership.c b/daemons/controld/controld_membership.c
index 1f7e4c0..f25d1e9 100644
--- a/daemons/controld/controld_membership.c
+++ b/daemons/controld/controld_membership.c
@@ -138,10 +138,8 @@ create_node_state_update(crm_node_t *node, int flags, xmlNode *parent,
pcmk__xe_set_bool_attr(node_state, XML_NODE_IS_REMOTE, true);
}
- set_uuid(node_state, XML_ATTR_ID, node);
-
- if (crm_element_value(node_state, XML_ATTR_ID) == NULL) {
- crm_info("Node update for %s cancelled: no id", node->uname);
+ if (crm_xml_add(node_state, XML_ATTR_ID, crm_peer_uuid(node)) == NULL) {
+ crm_info("Node update for %s cancelled: no ID", node->uname);
free_xml(node_state);
return NULL;
}
@@ -149,17 +147,31 @@ create_node_state_update(crm_node_t *node, int flags, xmlNode *parent,
crm_xml_add(node_state, XML_ATTR_UNAME, node->uname);
if ((flags & node_update_cluster) && node->state) {
- pcmk__xe_set_bool_attr(node_state, XML_NODE_IN_CLUSTER,
- pcmk__str_eq(node->state, CRM_NODE_MEMBER, pcmk__str_casei));
+ if (compare_version(controld_globals.dc_version, "3.18.0") >= 0) {
+ // A value 0 means the node is not a cluster member.
+ crm_xml_add_ll(node_state, PCMK__XA_IN_CCM, node->when_member);
+
+ } else {
+ pcmk__xe_set_bool_attr(node_state, PCMK__XA_IN_CCM,
+ pcmk__str_eq(node->state, CRM_NODE_MEMBER,
+ pcmk__str_casei));
+ }
}
if (!pcmk_is_set(node->flags, crm_remote_node)) {
if (flags & node_update_peer) {
- value = OFFLINESTATUS;
- if (pcmk_is_set(node->processes, crm_get_cluster_proc())) {
- value = ONLINESTATUS;
+ if (compare_version(controld_globals.dc_version, "3.18.0") >= 0) {
+ // A value 0 means the peer is offline in CPG.
+ crm_xml_add_ll(node_state, PCMK__XA_CRMD, node->when_online);
+
+ } else {
+ // @COMPAT DCs < 2.1.7 use online/offline rather than timestamp
+ value = OFFLINESTATUS;
+ if (pcmk_is_set(node->processes, crm_get_cluster_proc())) {
+ value = ONLINESTATUS;
+ }
+ crm_xml_add(node_state, PCMK__XA_CRMD, value);
}
- crm_xml_add(node_state, XML_NODE_IS_PEER, value);
}
if (flags & node_update_join) {
@@ -168,11 +180,11 @@ create_node_state_update(crm_node_t *node, int flags, xmlNode *parent,
} else {
value = CRMD_JOINSTATE_MEMBER;
}
- crm_xml_add(node_state, XML_NODE_JOIN_STATE, value);
+ crm_xml_add(node_state, PCMK__XA_JOIN, value);
}
if (flags & node_update_expected) {
- crm_xml_add(node_state, XML_NODE_EXPECTED, node->expected);
+ crm_xml_add(node_state, PCMK__XA_EXPECTED, node->expected);
}
}
@@ -210,7 +222,7 @@ search_conflicting_node_callback(xmlNode * msg, int call_id, int rc,
return;
}
- if (pcmk__str_eq(crm_element_name(output), XML_CIB_TAG_NODE, pcmk__str_casei)) {
+ if (pcmk__xe_is(output, XML_CIB_TAG_NODE)) {
node_xml = output;
} else {
@@ -224,7 +236,7 @@ search_conflicting_node_callback(xmlNode * msg, int call_id, int rc,
crm_node_t *node = NULL;
gboolean known = FALSE;
- if (!pcmk__str_eq(crm_element_name(node_xml), XML_CIB_TAG_NODE, pcmk__str_casei)) {
+ if (!pcmk__xe_is(node_xml, XML_CIB_TAG_NODE)) {
continue;
}
diff --git a/daemons/controld/controld_messages.c b/daemons/controld/controld_messages.c
index 54b27ec..39f3c7a 100644
--- a/daemons/controld/controld_messages.c
+++ b/daemons/controld/controld_messages.c
@@ -328,52 +328,80 @@ route_message(enum crmd_fsa_cause cause, xmlNode * input)
gboolean
relay_message(xmlNode * msg, gboolean originated_locally)
{
- int dest = 1;
+ enum crm_ais_msg_types dest = crm_msg_ais;
bool is_for_dc = false;
bool is_for_dcib = false;
bool is_for_te = false;
bool is_for_crm = false;
bool is_for_cib = false;
bool is_local = false;
- const char *host_to = crm_element_value(msg, F_CRM_HOST_TO);
- const char *sys_to = crm_element_value(msg, F_CRM_SYS_TO);
- const char *sys_from = crm_element_value(msg, F_CRM_SYS_FROM);
- const char *type = crm_element_value(msg, F_TYPE);
- const char *task = crm_element_value(msg, F_CRM_TASK);
- const char *ref = crm_element_value(msg, XML_ATTR_REFERENCE);
+ bool broadcast = false;
+ const char *host_to = NULL;
+ const char *sys_to = NULL;
+ const char *sys_from = NULL;
+ const char *type = NULL;
+ const char *task = NULL;
+ const char *ref = NULL;
+ crm_node_t *node_to = NULL;
+
+ CRM_CHECK(msg != NULL, return TRUE);
+
+ host_to = crm_element_value(msg, F_CRM_HOST_TO);
+ sys_to = crm_element_value(msg, F_CRM_SYS_TO);
+ sys_from = crm_element_value(msg, F_CRM_SYS_FROM);
+ type = crm_element_value(msg, F_TYPE);
+ task = crm_element_value(msg, F_CRM_TASK);
+ ref = crm_element_value(msg, XML_ATTR_REFERENCE);
+
+ broadcast = pcmk__str_empty(host_to);
if (ref == NULL) {
ref = "without reference ID";
}
- if (msg == NULL) {
- crm_warn("Cannot route empty message");
- return TRUE;
-
- } else if (pcmk__str_eq(task, CRM_OP_HELLO, pcmk__str_casei)) {
- crm_trace("No routing needed for hello message %s", ref);
+ if (pcmk__str_eq(task, CRM_OP_HELLO, pcmk__str_casei)) {
+ crm_trace("Received hello %s from %s (no processing needed)",
+ ref, pcmk__s(sys_from, "unidentified source"));
+ crm_log_xml_trace(msg, "hello");
return TRUE;
+ }
- } else if (!pcmk__str_eq(type, T_CRM, pcmk__str_casei)) {
- crm_warn("Received invalid message %s: type '%s' not '" T_CRM "'",
+ // Require message type (set by create_request())
+ if (!pcmk__str_eq(type, T_CRM, pcmk__str_casei)) {
+ crm_warn("Ignoring invalid message %s with type '%s' (not '" T_CRM "')",
ref, pcmk__s(type, ""));
- crm_log_xml_warn(msg, "[bad message type]");
+ crm_log_xml_trace(msg, "ignored");
return TRUE;
+ }
- } else if (sys_to == NULL) {
- crm_warn("Received invalid message %s: no subsystem", ref);
- crm_log_xml_warn(msg, "[no subsystem]");
+ // Require a destination subsystem (also set by create_request())
+ if (sys_to == NULL) {
+ crm_warn("Ignoring invalid message %s with no " F_CRM_SYS_TO, ref);
+ crm_log_xml_trace(msg, "ignored");
return TRUE;
}
+ // Get the message type appropriate to the destination subsystem
+ if (is_corosync_cluster()) {
+ dest = text2msg_type(sys_to);
+ if ((dest < crm_msg_ais) || (dest > crm_msg_stonith_ng)) {
+ /* Unrecognized value, use a sane default
+ *
+ * @TODO Maybe we should bail instead
+ */
+ dest = crm_msg_crmd;
+ }
+ }
+
is_for_dc = (strcasecmp(CRM_SYSTEM_DC, sys_to) == 0);
is_for_dcib = (strcasecmp(CRM_SYSTEM_DCIB, sys_to) == 0);
is_for_te = (strcasecmp(CRM_SYSTEM_TENGINE, sys_to) == 0);
is_for_cib = (strcasecmp(CRM_SYSTEM_CIB, sys_to) == 0);
is_for_crm = (strcasecmp(CRM_SYSTEM_CRMD, sys_to) == 0);
+ // Check whether message should be processed locally
is_local = false;
- if (pcmk__str_empty(host_to)) {
+ if (broadcast) {
if (is_for_dc || is_for_te) {
is_local = false;
@@ -397,6 +425,7 @@ relay_message(xmlNode * msg, gboolean originated_locally)
} else if (pcmk__str_eq(controld_globals.our_nodename, host_to,
pcmk__str_casei)) {
is_local = true;
+
} else if (is_for_crm && pcmk__str_eq(task, CRM_OP_LRM_DELETE, pcmk__str_casei)) {
xmlNode *msg_data = get_message_xml(msg, F_CRM_DATA);
const char *mode = crm_element_value(msg_data, PCMK__XA_MODE);
@@ -407,69 +436,68 @@ relay_message(xmlNode * msg, gboolean originated_locally)
}
}
- if (is_for_dc || is_for_dcib || is_for_te) {
- if (AM_I_DC && is_for_te) {
- crm_trace("Route message %s locally as transition request", ref);
- send_msg_via_ipc(msg, sys_to);
+ // Check whether message should be relayed
- } else if (AM_I_DC) {
+ if (is_for_dc || is_for_dcib || is_for_te) {
+ if (AM_I_DC) {
+ if (is_for_te) {
+ crm_trace("Route message %s locally as transition request",
+ ref);
+ crm_log_xml_trace(msg, sys_to);
+ send_msg_via_ipc(msg, sys_to);
+ return TRUE; // No further processing of message is needed
+ }
crm_trace("Route message %s locally as DC request", ref);
return FALSE; // More to be done by caller
+ }
- } else if (originated_locally && !pcmk__strcase_any_of(sys_from, CRM_SYSTEM_PENGINE,
- CRM_SYSTEM_TENGINE, NULL)) {
-
- if (is_corosync_cluster()) {
- dest = text2msg_type(sys_to);
+ if (originated_locally
+ && !pcmk__strcase_any_of(sys_from, CRM_SYSTEM_PENGINE,
+ CRM_SYSTEM_TENGINE, NULL)) {
+ crm_trace("Relay message %s to DC (via %s)",
+ ref, pcmk__s(host_to, "broadcast"));
+ crm_log_xml_trace(msg, "relayed");
+ if (!broadcast) {
+ node_to = crm_get_peer(0, host_to);
}
- crm_trace("Relay message %s to DC", ref);
- send_cluster_message(host_to ? crm_get_peer(0, host_to) : NULL, dest, msg, TRUE);
-
- } else {
- /* Neither the TE nor the scheduler should be sending messages
- * to DCs on other nodes. By definition, if we are no longer the DC,
- * then the scheduler's or TE's data should be discarded.
- */
- crm_trace("Discard message %s because we are not DC", ref);
+ send_cluster_message(node_to, dest, msg, TRUE);
+ return TRUE;
}
- } else if (is_local && (is_for_crm || is_for_cib)) {
- crm_trace("Route message %s locally as controller request", ref);
- return FALSE; // More to be done by caller
-
- } else if (is_local) {
- crm_trace("Relay message %s locally to %s",
- ref, (sys_to? sys_to : "unknown client"));
- crm_log_xml_trace(msg, "[IPC relay]");
- send_msg_via_ipc(msg, sys_to);
-
- } else {
- crm_node_t *node_to = NULL;
-
- if (is_corosync_cluster()) {
- dest = text2msg_type(sys_to);
+ /* Transition engine and scheduler messages are sent only to the DC on
+ * the same node. If we are no longer the DC, discard this message.
+ */
+ crm_trace("Ignoring message %s because we are no longer DC", ref);
+ crm_log_xml_trace(msg, "ignored");
+ return TRUE; // No further processing of message is needed
+ }
- if (dest == crm_msg_none || dest > crm_msg_stonith_ng) {
- dest = crm_msg_crmd;
- }
+ if (is_local) {
+ if (is_for_crm || is_for_cib) {
+ crm_trace("Route message %s locally as controller request", ref);
+ return FALSE; // More to be done by caller
}
+ crm_trace("Relay message %s locally to %s", ref, sys_to);
+ crm_log_xml_trace(msg, "IPC-relay");
+ send_msg_via_ipc(msg, sys_to);
+ return TRUE;
+ }
- if (host_to) {
- node_to = pcmk__search_cluster_node_cache(0, host_to);
- if (node_to == NULL) {
- crm_warn("Cannot route message %s: Unknown node %s",
- ref, host_to);
- return TRUE;
- }
- crm_trace("Relay message %s to %s",
- ref, (node_to->uname? node_to->uname : "peer"));
- } else {
- crm_trace("Broadcast message %s to all peers", ref);
+ if (!broadcast) {
+ node_to = pcmk__search_cluster_node_cache(0, host_to, NULL);
+ if (node_to == NULL) {
+ crm_warn("Ignoring message %s because node %s is unknown",
+ ref, host_to);
+ crm_log_xml_trace(msg, "ignored");
+ return TRUE;
}
- send_cluster_message(host_to ? node_to : NULL, dest, msg, TRUE);
}
- return TRUE; // No further processing of message is needed
+ crm_trace("Relay message %s to %s",
+ ref, pcmk__s(host_to, "all peers"));
+ crm_log_xml_trace(msg, "relayed");
+ send_cluster_message(node_to, dest, msg, TRUE);
+ return TRUE;
}
// Return true if field contains a positive integer
@@ -546,6 +574,7 @@ controld_authorize_ipc_message(const xmlNode *client_msg, pcmk__client_t *curr_c
}
crm_trace("Validated IPC hello from client %s", client_name);
+ crm_log_xml_trace(client_msg, "hello");
if (curr_client) {
curr_client->userdata = strdup(client_name);
}
@@ -553,6 +582,7 @@ controld_authorize_ipc_message(const xmlNode *client_msg, pcmk__client_t *curr_c
return false;
rejected:
+ crm_log_xml_trace(client_msg, "rejected");
if (curr_client) {
qb_ipcs_disconnect(curr_client->ipcs);
}
@@ -575,7 +605,9 @@ handle_message(xmlNode *msg, enum crmd_fsa_cause cause)
return I_NULL;
}
- crm_err("Unknown message type: %s", type);
+ crm_warn("Ignoring message with unknown " F_CRM_MSG_TYPE " '%s'",
+ pcmk__s(type, ""));
+ crm_log_xml_trace(msg, "bad");
return I_NULL;
}
@@ -701,7 +733,7 @@ handle_lrm_delete(xmlNode *stored_msg)
crm_info("Notifying %s on %s that %s was%s deleted",
from_sys, (from_host? from_host : "local node"), rsc_id,
((rc == pcmk_rc_ok)? "" : " not"));
- op = lrmd_new_event(rsc_id, CRMD_ACTION_DELETE, 0);
+ op = lrmd_new_event(rsc_id, PCMK_ACTION_DELETE, 0);
op->type = lrmd_event_exec_complete;
op->user_data = strdup(transition? transition : FAKE_TE_ID);
op->params = pcmk__strkey_table(free, free);
@@ -732,7 +764,7 @@ handle_remote_state(const xmlNode *msg)
bool remote_is_up = false;
int rc = pcmk_rc_ok;
- rc = pcmk__xe_get_bool_attr(msg, XML_NODE_IN_CLUSTER, &remote_is_up);
+ rc = pcmk__xe_get_bool_attr(msg, PCMK__XA_IN_CCM, &remote_is_up);
CRM_CHECK(remote_uname && rc == pcmk_rc_ok, return I_NULL);
@@ -818,7 +850,7 @@ handle_node_list(const xmlNode *request)
crm_xml_add_ll(xml, XML_ATTR_ID, (long long) node->id); // uint32_t
crm_xml_add(xml, XML_ATTR_UNAME, node->uname);
- crm_xml_add(xml, XML_NODE_IN_CLUSTER, node->state);
+ crm_xml_add(xml, PCMK__XA_IN_CCM, node->state);
}
// Create and send reply
@@ -875,7 +907,7 @@ handle_node_info_request(const xmlNode *msg)
if (node) {
crm_xml_add(reply_data, XML_ATTR_ID, node->uuid);
crm_xml_add(reply_data, XML_ATTR_UNAME, node->uname);
- crm_xml_add(reply_data, XML_NODE_IS_PEER, node->state);
+ crm_xml_add(reply_data, PCMK__XA_CRMD, node->state);
pcmk__xe_set_bool_attr(reply_data, XML_NODE_IS_REMOTE,
pcmk_is_set(node->flags, crm_remote_node));
}
@@ -988,14 +1020,15 @@ handle_request(xmlNode *stored_msg, enum crmd_fsa_cause cause)
/* Optimize this for the DC - it has the most to do */
+ crm_log_xml_trace(stored_msg, "request");
if (op == NULL) {
- crm_log_xml_warn(stored_msg, "[request without " F_CRM_TASK "]");
+ crm_warn("Ignoring request without " F_CRM_TASK);
return I_NULL;
}
if (strcmp(op, CRM_OP_SHUTDOWN_REQ) == 0) {
const char *from = crm_element_value(stored_msg, F_CRM_HOST_FROM);
- crm_node_t *node = pcmk__search_cluster_node_cache(0, from);
+ crm_node_t *node = pcmk__search_cluster_node_cache(0, from, NULL);
pcmk__update_peer_expected(__func__, node, CRMD_JOINSTATE_DOWN);
if(AM_I_DC == FALSE) {
@@ -1062,11 +1095,6 @@ handle_request(xmlNode *stored_msg, enum crmd_fsa_cause cause)
if (controld_globals.fsa_state == S_HALT) {
crm_debug("Forcing an election from S_HALT");
return I_ELECTION;
-#if 0
- } else if (AM_I_DC) {
- /* This is the old way of doing things but what is gained? */
- return I_ELECTION;
-#endif
}
} else if (strcmp(op, CRM_OP_JOIN_OFFER) == 0) {
@@ -1157,8 +1185,9 @@ handle_response(xmlNode *stored_msg)
{
const char *op = crm_element_value(stored_msg, F_CRM_TASK);
+ crm_log_xml_trace(stored_msg, "reply");
if (op == NULL) {
- crm_log_xml_err(stored_msg, "Bad message");
+ crm_warn("Ignoring reply without " F_CRM_TASK);
} else if (AM_I_DC && strcmp(op, CRM_OP_PECALC) == 0) {
// Check whether scheduler answer been superseded by subsequent request
@@ -1295,7 +1324,7 @@ broadcast_remote_state_message(const char *node_name, bool node_up)
node_name, node_up? "coming up" : "going down");
crm_xml_add(msg, XML_ATTR_ID, node_name);
- pcmk__xe_set_bool_attr(msg, XML_NODE_IN_CLUSTER, node_up);
+ pcmk__xe_set_bool_attr(msg, PCMK__XA_IN_CCM, node_up);
if (node_up) {
crm_xml_add(msg, PCMK__XA_CONN_HOST, controld_globals.our_nodename);
diff --git a/daemons/controld/controld_metadata.c b/daemons/controld/controld_metadata.c
index 240a978..c813ceb 100644
--- a/daemons/controld/controld_metadata.c
+++ b/daemons/controld/controld_metadata.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2017-2022 the Pacemaker project contributors
+ * Copyright 2017-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -172,7 +172,7 @@ controld_cache_metadata(GHashTable *mdc, const lrmd_rsc_info_t *rsc,
const char *action_name = crm_element_value(match, "name");
- if (pcmk__str_eq(action_name, CRMD_ACTION_RELOAD_AGENT,
+ if (pcmk__str_eq(action_name, PCMK_ACTION_RELOAD_AGENT,
pcmk__str_none)) {
if (ocf1_1) {
controld_set_ra_flags(md, key, ra_supports_reload_agent);
@@ -181,7 +181,7 @@ controld_cache_metadata(GHashTable *mdc, const lrmd_rsc_info_t *rsc,
"because it does not support OCF 1.1 or later", key);
}
- } else if (!ocf1_1 && pcmk__str_eq(action_name, CRMD_ACTION_RELOAD,
+ } else if (!ocf1_1 && pcmk__str_eq(action_name, PCMK_ACTION_RELOAD,
pcmk__str_casei)) {
controld_set_ra_flags(md, key, ra_supports_legacy_reload);
}
diff --git a/daemons/controld/controld_remote_ra.c b/daemons/controld/controld_remote_ra.c
index f24b755..d692ef6 100644
--- a/daemons/controld/controld_remote_ra.c
+++ b/daemons/controld/controld_remote_ra.c
@@ -280,6 +280,7 @@ remote_node_up(const char *node_name)
int call_opt;
xmlNode *update, *state;
crm_node_t *node;
+ lrm_state_t *connection_rsc = NULL;
CRM_CHECK(node_name != NULL, return);
crm_info("Announcing Pacemaker Remote node %s", node_name);
@@ -301,6 +302,20 @@ remote_node_up(const char *node_name)
purge_remote_node_attrs(call_opt, node);
pcmk__update_peer_state(__func__, node, CRM_NODE_MEMBER, 0);
+ /* Apply any start state that we were given from the environment on the
+ * remote node.
+ */
+ connection_rsc = lrm_state_find(node->uname);
+
+ if (connection_rsc != NULL) {
+ lrmd_t *lrm = connection_rsc->conn;
+ const char *start_state = lrmd__node_start_state(lrm);
+
+ if (start_state) {
+ set_join_state(start_state, node->uname, node->uuid, true);
+ }
+ }
+
/* pacemaker_remote nodes don't participate in the membership layer,
* so cluster nodes don't automatically get notified when they come and go.
* We send a cluster message to the DC, and update the CIB node state entry,
@@ -392,10 +407,11 @@ check_remote_node_state(const remote_ra_cmd_t *cmd)
return;
}
- if (pcmk__str_eq(cmd->action, "start", pcmk__str_casei)) {
+ if (pcmk__str_eq(cmd->action, PCMK_ACTION_START, pcmk__str_casei)) {
remote_node_up(cmd->rsc_id);
- } else if (pcmk__str_eq(cmd->action, "migrate_from", pcmk__str_casei)) {
+ } else if (pcmk__str_eq(cmd->action, PCMK_ACTION_MIGRATE_FROM,
+ pcmk__str_casei)) {
/* After a successful migration, we don't need to do remote_node_up()
* because the DC already knows the node is up, and we don't want to
* clear LRM history etc. We do need to add the remote node to this
@@ -408,7 +424,7 @@ check_remote_node_state(const remote_ra_cmd_t *cmd)
CRM_CHECK(node != NULL, return);
pcmk__update_peer_state(__func__, node, CRM_NODE_MEMBER, 0);
- } else if (pcmk__str_eq(cmd->action, "stop", pcmk__str_casei)) {
+ } else if (pcmk__str_eq(cmd->action, PCMK_ACTION_STOP, pcmk__str_casei)) {
lrm_state_t *lrm_state = lrm_state_find(cmd->rsc_id);
remote_ra_data_t *ra_data = lrm_state? lrm_state->remote_ra_data : NULL;
@@ -510,7 +526,8 @@ retry_start_cmd_cb(gpointer data)
return FALSE;
}
cmd = ra_data->cur_cmd;
- if (!pcmk__strcase_any_of(cmd->action, "start", "migrate_from", NULL)) {
+ if (!pcmk__strcase_any_of(cmd->action, PCMK_ACTION_START,
+ PCMK_ACTION_MIGRATE_FROM, NULL)) {
return FALSE;
}
update_remaining_timeout(cmd);
@@ -681,7 +698,8 @@ remote_lrm_op_callback(lrmd_event_data_t * op)
handle_remote_ra_stop(lrm_state, NULL);
remote_node_down(lrm_state->node_name, DOWN_KEEP_LRM);
/* now fake the reply of a successful 'stop' */
- synthesize_lrmd_success(NULL, lrm_state->node_name, "stop");
+ synthesize_lrmd_success(NULL, lrm_state->node_name,
+ PCMK_ACTION_STOP);
}
return;
}
@@ -695,8 +713,9 @@ remote_lrm_op_callback(lrmd_event_data_t * op)
/* Start actions and migrate from actions complete after connection
* comes back to us. */
- if (op->type == lrmd_event_connect && pcmk__strcase_any_of(cmd->action, "start",
- "migrate_from", NULL)) {
+ if ((op->type == lrmd_event_connect)
+ && pcmk__strcase_any_of(cmd->action, PCMK_ACTION_START,
+ PCMK_ACTION_MIGRATE_FROM, NULL)) {
if (op->connection_rc < 0) {
update_remaining_timeout(cmd);
@@ -731,7 +750,9 @@ remote_lrm_op_callback(lrmd_event_data_t * op)
report_remote_ra_result(cmd);
cmd_handled = TRUE;
- } else if (op->type == lrmd_event_poke && pcmk__str_eq(cmd->action, "monitor", pcmk__str_casei)) {
+ } else if ((op->type == lrmd_event_poke)
+ && pcmk__str_eq(cmd->action, PCMK_ACTION_MONITOR,
+ pcmk__str_casei)) {
if (cmd->monitor_timeout_id) {
g_source_remove(cmd->monitor_timeout_id);
@@ -758,7 +779,9 @@ remote_lrm_op_callback(lrmd_event_data_t * op)
}
cmd_handled = TRUE;
- } else if (op->type == lrmd_event_disconnect && pcmk__str_eq(cmd->action, "monitor", pcmk__str_casei)) {
+ } else if ((op->type == lrmd_event_disconnect)
+ && pcmk__str_eq(cmd->action, PCMK_ACTION_MONITOR,
+ pcmk__str_casei)) {
if (pcmk_is_set(ra_data->status, remote_active) &&
!pcmk_is_set(cmd->status, cmd_cancel)) {
pcmk__set_result(&(cmd->result), PCMK_OCF_UNKNOWN_ERROR,
@@ -771,7 +794,9 @@ remote_lrm_op_callback(lrmd_event_data_t * op)
}
cmd_handled = TRUE;
- } else if (op->type == lrmd_event_new_client && pcmk__str_eq(cmd->action, "stop", pcmk__str_casei)) {
+ } else if ((op->type == lrmd_event_new_client)
+ && pcmk__str_eq(cmd->action, PCMK_ACTION_STOP,
+ pcmk__str_casei)) {
handle_remote_ra_stop(lrm_state, cmd);
cmd_handled = TRUE;
@@ -882,7 +907,8 @@ handle_remote_ra_exec(gpointer user_data)
ra_data->cmds = g_list_remove_link(ra_data->cmds, first);
g_list_free_1(first);
- if (!strcmp(cmd->action, "start") || !strcmp(cmd->action, "migrate_from")) {
+ if (pcmk__str_any_of(cmd->action, PCMK_ACTION_START,
+ PCMK_ACTION_MIGRATE_FROM, NULL)) {
lrm_remote_clear_flags(lrm_state, expect_takeover | takeover_complete);
if (handle_remote_ra_start(lrm_state, cmd,
cmd->timeout) == pcmk_rc_ok) {
@@ -894,7 +920,7 @@ handle_remote_ra_exec(gpointer user_data)
}
report_remote_ra_result(cmd);
- } else if (!strcmp(cmd->action, "monitor")) {
+ } else if (!strcmp(cmd->action, PCMK_ACTION_MONITOR)) {
if (lrm_state_is_connected(lrm_state) == TRUE) {
rc = lrm_state_poke_connection(lrm_state);
@@ -917,7 +943,7 @@ handle_remote_ra_exec(gpointer user_data)
}
report_remote_ra_result(cmd);
- } else if (!strcmp(cmd->action, "stop")) {
+ } else if (!strcmp(cmd->action, PCMK_ACTION_STOP)) {
if (pcmk_is_set(ra_data->status, expect_takeover)) {
/* briefly wait on stop for the takeover event to occur. If the
@@ -933,13 +959,14 @@ handle_remote_ra_exec(gpointer user_data)
handle_remote_ra_stop(lrm_state, cmd);
- } else if (!strcmp(cmd->action, "migrate_to")) {
+ } else if (strcmp(cmd->action, PCMK_ACTION_MIGRATE_TO) == 0) {
lrm_remote_clear_flags(lrm_state, takeover_complete);
lrm_remote_set_flags(lrm_state, expect_takeover);
pcmk__set_result(&(cmd->result), PCMK_OCF_OK, PCMK_EXEC_DONE, NULL);
report_remote_ra_result(cmd);
- } else if (pcmk__str_any_of(cmd->action, CRMD_ACTION_RELOAD,
- CRMD_ACTION_RELOAD_AGENT, NULL)) {
+
+ } else if (pcmk__str_any_of(cmd->action, PCMK_ACTION_RELOAD,
+ PCMK_ACTION_RELOAD_AGENT, NULL)) {
/* Currently the only reloadable parameter is reconnect_interval,
* which is only used by the scheduler via the CIB, so reloads are a
* no-op.
@@ -1029,13 +1056,13 @@ static gboolean
is_remote_ra_supported_action(const char *action)
{
return pcmk__str_any_of(action,
- CRMD_ACTION_START,
- CRMD_ACTION_STOP,
- CRMD_ACTION_STATUS,
- CRMD_ACTION_MIGRATE,
- CRMD_ACTION_MIGRATED,
- CRMD_ACTION_RELOAD_AGENT,
- CRMD_ACTION_RELOAD,
+ PCMK_ACTION_START,
+ PCMK_ACTION_STOP,
+ PCMK_ACTION_MONITOR,
+ PCMK_ACTION_MIGRATE_TO,
+ PCMK_ACTION_MIGRATE_FROM,
+ PCMK_ACTION_RELOAD_AGENT,
+ PCMK_ACTION_RELOAD,
NULL);
}
@@ -1048,7 +1075,9 @@ fail_all_monitor_cmds(GList * list)
for (gIter = list; gIter != NULL; gIter = gIter->next) {
cmd = gIter->data;
- if ((cmd->interval_ms > 0) && pcmk__str_eq(cmd->action, "monitor", pcmk__str_casei)) {
+ if ((cmd->interval_ms > 0)
+ && pcmk__str_eq(cmd->action, PCMK_ACTION_MONITOR,
+ pcmk__str_casei)) {
rm_list = g_list_append(rm_list, cmd);
}
}
@@ -1137,8 +1166,9 @@ handle_dup_monitor(remote_ra_data_t *ra_data, guint interval_ms,
if (ra_data->cur_cmd &&
!pcmk_is_set(ra_data->cur_cmd->status, cmd_cancel) &&
- (ra_data->cur_cmd->interval_ms == interval_ms) &&
- pcmk__str_eq(ra_data->cur_cmd->action, "monitor", pcmk__str_casei)) {
+ (ra_data->cur_cmd->interval_ms == interval_ms)
+ && pcmk__str_eq(ra_data->cur_cmd->action, PCMK_ACTION_MONITOR,
+ pcmk__str_casei)) {
cmd = ra_data->cur_cmd;
goto handle_dup;
@@ -1147,7 +1177,8 @@ handle_dup_monitor(remote_ra_data_t *ra_data, guint interval_ms,
for (gIter = ra_data->recurring_cmds; gIter != NULL; gIter = gIter->next) {
cmd = gIter->data;
if ((cmd->interval_ms == interval_ms)
- && pcmk__str_eq(cmd->action, "monitor", pcmk__str_casei)) {
+ && pcmk__str_eq(cmd->action, PCMK_ACTION_MONITOR,
+ pcmk__str_casei)) {
goto handle_dup;
}
}
@@ -1155,7 +1186,8 @@ handle_dup_monitor(remote_ra_data_t *ra_data, guint interval_ms,
for (gIter = ra_data->cmds; gIter != NULL; gIter = gIter->next) {
cmd = gIter->data;
if ((cmd->interval_ms == interval_ms)
- && pcmk__str_eq(cmd->action, "monitor", pcmk__str_casei)) {
+ && pcmk__str_eq(cmd->action, PCMK_ACTION_MONITOR,
+ pcmk__str_casei)) {
goto handle_dup;
}
}
@@ -1165,7 +1197,7 @@ handle_dup_monitor(remote_ra_data_t *ra_data, guint interval_ms,
handle_dup:
crm_trace("merging duplicate monitor cmd " PCMK__OP_FMT,
- cmd->rsc_id, "monitor", interval_ms);
+ cmd->rsc_id, PCMK_ACTION_MONITOR, interval_ms);
/* update the userdata */
if (userdata) {
@@ -1385,7 +1417,7 @@ remote_ra_maintenance(lrm_state_t * lrm_state, gboolean maintenance)
}
#define XPATH_PSEUDO_MAINTENANCE "//" XML_GRAPH_TAG_PSEUDO_EVENT \
- "[@" XML_LRM_ATTR_TASK "='" CRM_OP_MAINTENANCE_NODES "']/" \
+ "[@" XML_LRM_ATTR_TASK "='" PCMK_ACTION_MAINTENANCE_NODES "']/" \
XML_GRAPH_TAG_MAINTENANCE
/*!
@@ -1403,9 +1435,10 @@ remote_ra_process_maintenance_nodes(xmlNode *xml)
xmlNode *node;
int cnt = 0, cnt_remote = 0;
- for (node =
- first_named_child(getXpathResult(search, 0), XML_CIB_TAG_NODE);
- node != NULL; node = pcmk__xml_next(node)) {
+ for (node = first_named_child(getXpathResult(search, 0),
+ XML_CIB_TAG_NODE);
+ node != NULL; node = crm_next_same_xml(node)) {
+
lrm_state_t *lrm_state = lrm_state_find(ID(node));
cnt++;
diff --git a/daemons/controld/controld_schedulerd.c b/daemons/controld/controld_schedulerd.c
index 912f9a5..8aca83f 100644
--- a/daemons/controld/controld_schedulerd.c
+++ b/daemons/controld/controld_schedulerd.c
@@ -45,11 +45,11 @@ controld_shutdown_schedulerd_ipc(void)
* \internal
* \brief Save CIB query result to file, raising FSA error
*
- * \param[in] msg Ignored
- * \param[in] call_id Call ID of CIB query
- * \param[in] rc Return code of CIB query
- * \param[in,out] output Result of CIB query
- * \param[in] user_data Unique identifier for filename
+ * \param[in] msg Ignored
+ * \param[in] call_id Call ID of CIB query
+ * \param[in] rc Return code of CIB query
+ * \param[in] output Result of CIB query
+ * \param[in] user_data Unique identifier for filename
*
* \note This is intended to be called after a scheduler connection fails.
*/
@@ -90,8 +90,9 @@ handle_disconnect(void)
int rc = pcmk_ok;
char *uuid_str = crm_generate_uuid();
- crm_crit("Connection to the scheduler failed "
- CRM_XS " uuid=%s", uuid_str);
+ crm_crit("Lost connection to the scheduler "
+ CRM_XS " CIB will be saved to " PE_STATE_DIR "/pe-core-%s.bz2",
+ uuid_str);
/*
* The scheduler died...
@@ -107,9 +108,6 @@ handle_disconnect(void)
NULL, NULL,
cib_scope_local);
fsa_register_cib_callback(rc, uuid_str, save_cib_contents);
-
- } else {
- crm_info("Connection to the scheduler released");
}
controld_clear_fsa_input_flags(R_PE_CONNECTED);
@@ -199,9 +197,10 @@ new_schedulerd_ipc_connection(void)
pcmk_register_ipc_callback(schedulerd_api, scheduler_event_callback, NULL);
- rc = pcmk_connect_ipc(schedulerd_api, pcmk_ipc_dispatch_main);
+ rc = pcmk__connect_ipc(schedulerd_api, pcmk_ipc_dispatch_main, 3);
if (rc != pcmk_rc_ok) {
- crm_err("Error connecting to the scheduler: %s", pcmk_rc_str(rc));
+ crm_err("Error connecting to %s: %s",
+ pcmk_ipc_name(schedulerd_api, true), pcmk_rc_str(rc));
return false;
}
diff --git a/daemons/controld/controld_te_actions.c b/daemons/controld/controld_te_actions.c
index d8cfcad..fe6b744 100644
--- a/daemons/controld/controld_te_actions.c
+++ b/daemons/controld/controld_te_actions.c
@@ -47,7 +47,7 @@ execute_pseudo_action(pcmk__graph_t *graph, pcmk__graph_action_t *pseudo)
const char *task = crm_element_value(pseudo->xml, XML_LRM_ATTR_TASK);
/* send to peers as well? */
- if (pcmk__str_eq(task, CRM_OP_MAINTENANCE_NODES, pcmk__str_casei)) {
+ if (pcmk__str_eq(task, PCMK_ACTION_MAINTENANCE_NODES, pcmk__str_casei)) {
GHashTableIter iter;
crm_node_t *node = NULL;
@@ -125,7 +125,7 @@ execute_cluster_action(pcmk__graph_t *graph, pcmk__graph_action_t *action)
router_node = crm_element_value(action->xml, XML_LRM_ATTR_ROUTER_NODE);
if (router_node == NULL) {
router_node = on_node;
- if (pcmk__str_eq(task, CRM_OP_LRM_DELETE, pcmk__str_none)) {
+ if (pcmk__str_eq(task, PCMK_ACTION_LRM_DELETE, pcmk__str_none)) {
const char *mode = crm_element_value(action->xml, PCMK__XA_MODE);
if (pcmk__str_eq(mode, XML_TAG_CIB, pcmk__str_none)) {
@@ -148,7 +148,8 @@ execute_cluster_action(pcmk__graph_t *graph, pcmk__graph_action_t *action)
id, task, on_node, (is_local? " locally" : ""),
(no_wait? " without waiting" : ""));
- if (is_local && pcmk__str_eq(task, CRM_OP_SHUTDOWN, pcmk__str_none)) {
+ if (is_local
+ && pcmk__str_eq(task, PCMK_ACTION_DO_SHUTDOWN, pcmk__str_none)) {
/* defer until everything else completes */
crm_info("Controller request '%s' is a local shutdown", id);
graph->completion_action = pcmk__graph_shutdown;
@@ -156,7 +157,7 @@ execute_cluster_action(pcmk__graph_t *graph, pcmk__graph_action_t *action)
te_action_confirmed(action, graph);
return pcmk_rc_ok;
- } else if (pcmk__str_eq(task, CRM_OP_SHUTDOWN, pcmk__str_none)) {
+ } else if (pcmk__str_eq(task, PCMK_ACTION_DO_SHUTDOWN, pcmk__str_none)) {
crm_node_t *peer = crm_get_peer(0, router_node);
pcmk__update_peer_expected(__func__, peer, CRMD_JOINSTATE_DOWN);
@@ -318,7 +319,7 @@ controld_record_action_timeout(pcmk__graph_action_t *action)
int target_rc = get_target_rc(action);
crm_warn("%s %d: %s on %s timed out",
- crm_element_name(action->xml), action->id, task_uuid, target);
+ action->xml->name, action->id, task_uuid, target);
op = synthesize_timeout_event(action, target_rc);
controld_record_action_event(action, op);
@@ -528,9 +529,9 @@ te_update_job_count(pcmk__graph_action_t *action, int offset)
* the connection resources */
target = crm_element_value(action->xml, XML_LRM_ATTR_ROUTER_NODE);
- if ((target == NULL) && pcmk__strcase_any_of(task, CRMD_ACTION_MIGRATE,
- CRMD_ACTION_MIGRATED, NULL)) {
-
+ if ((target == NULL)
+ && pcmk__strcase_any_of(task, PCMK_ACTION_MIGRATE_TO,
+ PCMK_ACTION_MIGRATE_FROM, NULL)) {
const char *t1 = crm_meta_value(action->params, XML_LRM_ATTR_MIGRATE_SOURCE);
const char *t2 = crm_meta_value(action->params, XML_LRM_ATTR_MIGRATE_TARGET);
@@ -586,7 +587,8 @@ allowed_on_node(const pcmk__graph_t *graph, const pcmk__graph_action_t *action,
return false;
} else if(graph->migration_limit > 0 && r->migrate_jobs >= graph->migration_limit) {
- if (pcmk__strcase_any_of(task, CRMD_ACTION_MIGRATE, CRMD_ACTION_MIGRATED, NULL)) {
+ if (pcmk__strcase_any_of(task, PCMK_ACTION_MIGRATE_TO,
+ PCMK_ACTION_MIGRATE_FROM, NULL)) {
crm_trace("Peer %s is over their migration job limit of %d (%d): deferring %s",
target, graph->migration_limit, r->migrate_jobs, id);
return false;
@@ -624,8 +626,9 @@ graph_action_allowed(pcmk__graph_t *graph, pcmk__graph_action_t *action)
* the connection resources */
target = crm_element_value(action->xml, XML_LRM_ATTR_ROUTER_NODE);
- if ((target == NULL) && pcmk__strcase_any_of(task, CRMD_ACTION_MIGRATE,
- CRMD_ACTION_MIGRATED, NULL)) {
+ if ((target == NULL)
+ && pcmk__strcase_any_of(task, PCMK_ACTION_MIGRATE_TO,
+ PCMK_ACTION_MIGRATE_FROM, NULL)) {
target = crm_meta_value(action->params, XML_LRM_ATTR_MIGRATE_SOURCE);
if (!allowed_on_node(graph, action, target)) {
return false;
diff --git a/daemons/controld/controld_te_callbacks.c b/daemons/controld/controld_te_callbacks.c
index cf9de83..c26e757 100644
--- a/daemons/controld/controld_te_callbacks.c
+++ b/daemons/controld/controld_te_callbacks.c
@@ -225,12 +225,12 @@ process_resource_updates(const char *node, xmlNode *xml, xmlNode *change,
return;
}
- if (strcmp(TYPE(xml), XML_CIB_TAG_LRM) == 0) {
+ if (pcmk__xe_is(xml, XML_CIB_TAG_LRM)) {
xml = first_named_child(xml, XML_LRM_TAG_RESOURCES);
CRM_CHECK(xml != NULL, return);
}
- CRM_CHECK(strcmp(TYPE(xml), XML_LRM_TAG_RESOURCES) == 0, return);
+ CRM_CHECK(pcmk__xe_is(xml, XML_LRM_TAG_RESOURCES), return);
/*
* Updates by, or in response to, TE actions will never contain updates
@@ -558,7 +558,7 @@ te_update_diff(const char *event, xmlNode * msg)
p_del[0], p_del[1], p_del[2], p_add[0], p_add[1], p_add[2],
fsa_state2string(controld_globals.fsa_state));
- crm_element_value_int(diff, "format", &format);
+ crm_element_value_int(diff, PCMK_XA_FORMAT, &format);
switch (format) {
case 1:
te_update_diff_v1(event, diff);
diff --git a/daemons/controld/controld_te_events.c b/daemons/controld/controld_te_events.c
index d4e2b0f..28977c0 100644
--- a/daemons/controld/controld_te_events.c
+++ b/daemons/controld/controld_te_events.c
@@ -111,7 +111,7 @@ fail_incompletable_actions(pcmk__graph_t *graph, const char *down_node)
} else if (action->type == pcmk__cluster_graph_action) {
const char *task = crm_element_value(action->xml, XML_LRM_ATTR_TASK);
- if (pcmk__str_eq(task, CRM_OP_FENCE, pcmk__str_casei)) {
+ if (pcmk__str_eq(task, PCMK_ACTION_STONITH, pcmk__str_casei)) {
continue;
}
}
@@ -196,16 +196,16 @@ update_failcount(const xmlNode *event, const char *event_node_uuid, int rc,
/* Decide whether update is necessary and what value to use */
if ((interval_ms > 0)
- || pcmk__str_eq(task, CRMD_ACTION_PROMOTE, pcmk__str_none)
- || pcmk__str_eq(task, CRMD_ACTION_DEMOTE, pcmk__str_none)) {
+ || pcmk__str_eq(task, PCMK_ACTION_PROMOTE, pcmk__str_none)
+ || pcmk__str_eq(task, PCMK_ACTION_DEMOTE, pcmk__str_none)) {
do_update = TRUE;
- } else if (pcmk__str_eq(task, CRMD_ACTION_START, pcmk__str_none)) {
+ } else if (pcmk__str_eq(task, PCMK_ACTION_START, pcmk__str_none)) {
do_update = TRUE;
value = pcmk__s(controld_globals.transition_graph->failed_start_offset,
CRM_INFINITY_S);
- } else if (pcmk__str_eq(task, CRMD_ACTION_STOP, pcmk__str_none)) {
+ } else if (pcmk__str_eq(task, PCMK_ACTION_STOP, pcmk__str_none)) {
do_update = TRUE;
value = pcmk__s(controld_globals.transition_graph->failed_stop_offset,
CRM_INFINITY_S);
@@ -314,7 +314,7 @@ get_cancel_action(const char *id, const char *node)
pcmk__graph_action_t *action = (pcmk__graph_action_t *) gIter2->data;
task = crm_element_value(action->xml, XML_LRM_ATTR_TASK);
- if (!pcmk__str_eq(CRMD_ACTION_CANCEL, task, pcmk__str_casei)) {
+ if (!pcmk__str_eq(PCMK_ACTION_CANCEL, task, pcmk__str_casei)) {
continue;
}
diff --git a/daemons/controld/controld_te_utils.c b/daemons/controld/controld_te_utils.c
index ecbc0b2..5a9f029 100644
--- a/daemons/controld/controld_te_utils.c
+++ b/daemons/controld/controld_te_utils.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2004-2022 the Pacemaker project contributors
+ * Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -17,6 +17,8 @@
//! Triggers transition graph processing
static crm_trigger_t *transition_trigger = NULL;
+static GHashTable *node_pending_timers = NULL;
+
gboolean
stop_te_timer(pcmk__graph_action_t *action)
{
@@ -132,11 +134,13 @@ static struct abort_timer_s {
static gboolean
abort_timer_popped(gpointer data)
{
- if (AM_I_DC && (abort_timer.aborted == FALSE)) {
- abort_transition(abort_timer.priority, abort_timer.action,
- abort_timer.text, NULL);
+ struct abort_timer_s *abort_timer = (struct abort_timer_s *) data;
+
+ if (AM_I_DC && (abort_timer->aborted == FALSE)) {
+ abort_transition(abort_timer->priority, abort_timer->action,
+ abort_timer->text, NULL);
}
- abort_timer.id = 0;
+ abort_timer->id = 0;
return FALSE; // do not immediately reschedule timer
}
@@ -158,7 +162,143 @@ abort_after_delay(int abort_priority, enum pcmk__graph_next abort_action,
abort_timer.priority = abort_priority;
abort_timer.action = abort_action;
abort_timer.text = abort_text;
- abort_timer.id = g_timeout_add(delay_ms, abort_timer_popped, NULL);
+ abort_timer.id = g_timeout_add(delay_ms, abort_timer_popped, &abort_timer);
+}
+
+static void
+free_node_pending_timer(gpointer data)
+{
+ struct abort_timer_s *node_pending_timer = (struct abort_timer_s *) data;
+
+ if (node_pending_timer->id != 0) {
+ g_source_remove(node_pending_timer->id);
+ node_pending_timer->id = 0;
+ }
+
+ free(node_pending_timer);
+}
+
+static gboolean
+node_pending_timer_popped(gpointer key)
+{
+ struct abort_timer_s *node_pending_timer = NULL;
+
+ if (node_pending_timers == NULL) {
+ return FALSE;
+ }
+
+ node_pending_timer = g_hash_table_lookup(node_pending_timers, key);
+ if (node_pending_timer == NULL) {
+ return FALSE;
+ }
+
+ crm_warn("Node with id '%s' pending timed out (%us) on joining the process "
+ "group",
+ (const char *) key, controld_globals.node_pending_timeout);
+
+ if (controld_globals.node_pending_timeout > 0) {
+ abort_timer_popped(node_pending_timer);
+ }
+
+ g_hash_table_remove(node_pending_timers, key);
+
+ return FALSE; // do not reschedule timer
+}
+
+static void
+init_node_pending_timer(const crm_node_t *node, guint timeout)
+{
+ struct abort_timer_s *node_pending_timer = NULL;
+ char *key = NULL;
+
+ if (node->uuid == NULL) {
+ return;
+ }
+
+ if (node_pending_timers == NULL) {
+ node_pending_timers = pcmk__strikey_table(free,
+ free_node_pending_timer);
+
+ // The timer is somehow already existing
+ } else if (g_hash_table_lookup(node_pending_timers, node->uuid) != NULL) {
+ return;
+ }
+
+ crm_notice("Waiting for pending %s with id '%s' to join the process "
+ "group (timeout=%us)",
+ node->uname ? node->uname : "node", node->uuid,
+ controld_globals.node_pending_timeout);
+
+ node_pending_timer = calloc(1, sizeof(struct abort_timer_s));
+ CRM_ASSERT(node_pending_timer != NULL);
+
+ node_pending_timer->aborted = FALSE;
+ node_pending_timer->priority = INFINITY;
+ node_pending_timer->action = pcmk__graph_restart;
+ node_pending_timer->text = "Node pending timed out";
+
+ key = strdup(node->uuid);
+ CRM_ASSERT(key != NULL);
+
+ g_hash_table_replace(node_pending_timers, key, node_pending_timer);
+
+ node_pending_timer->id = g_timeout_add_seconds(timeout,
+ node_pending_timer_popped,
+ key);
+ CRM_ASSERT(node_pending_timer->id != 0);
+}
+
+static void
+remove_node_pending_timer(const char *node_uuid)
+{
+ if (node_pending_timers == NULL) {
+ return;
+ }
+
+ g_hash_table_remove(node_pending_timers, node_uuid);
+}
+
+void
+controld_node_pending_timer(const crm_node_t *node)
+{
+ long long remaining_timeout = 0;
+
+ /* If the node is not an active cluster node, is leaving the cluster, or is
+ * already part of CPG, or node-pending-timeout is disabled, free any
+ * node pending timer for it.
+ */
+ if (pcmk_is_set(node->flags, crm_remote_node)
+ || (node->when_member <= 1) || (node->when_online > 0)
+ || (controld_globals.node_pending_timeout == 0)) {
+ remove_node_pending_timer(node->uuid);
+ return;
+ }
+
+ // Node is a cluster member but offline in CPG
+
+ remaining_timeout = node->when_member - time(NULL)
+ + controld_globals.node_pending_timeout;
+
+ /* It already passed node pending timeout somehow.
+ * Free any node pending timer of it.
+ */
+ if (remaining_timeout <= 0) {
+ remove_node_pending_timer(node->uuid);
+ return;
+ }
+
+ init_node_pending_timer(node, remaining_timeout);
+}
+
+void
+controld_free_node_pending_timers(void)
+{
+ if (node_pending_timers == NULL) {
+ return;
+ }
+
+ g_hash_table_destroy(node_pending_timers);
+ node_pending_timers = NULL;
}
static const char *
@@ -246,7 +386,7 @@ abort_transition_graph(int abort_priority, enum pcmk__graph_next abort_action,
const xmlNode *search = NULL;
for(search = reason; search; search = search->parent) {
- if (pcmk__str_eq(XML_TAG_DIFF, TYPE(search), pcmk__str_casei)) {
+ if (pcmk__xe_is(search, XML_TAG_DIFF)) {
diff = search;
break;
}
@@ -255,7 +395,7 @@ abort_transition_graph(int abort_priority, enum pcmk__graph_next abort_action,
if(diff) {
xml_patch_versions(diff, add, del);
for(search = reason; search; search = search->parent) {
- if (pcmk__str_eq(XML_DIFF_CHANGE, TYPE(search), pcmk__str_casei)) {
+ if (pcmk__xe_is(search, XML_DIFF_CHANGE)) {
change = search;
break;
}
@@ -276,14 +416,13 @@ abort_transition_graph(int abort_priority, enum pcmk__graph_next abort_action,
do_crm_log(level, "Transition %d aborted by %s.%s: %s "
CRM_XS " cib=%d.%d.%d source=%s:%d path=%s complete=%s",
- controld_globals.transition_graph->id, TYPE(reason),
+ controld_globals.transition_graph->id, reason->name,
ID(reason), abort_text, add[0], add[1], add[2], fn, line,
(const char *) local_path->str,
pcmk__btoa(controld_globals.transition_graph->complete));
g_string_free(local_path, TRUE);
} else {
- const char *kind = NULL;
const char *op = crm_element_value(change, XML_DIFF_OP);
const char *path = crm_element_value(change, XML_DIFF_PATH);
@@ -297,9 +436,9 @@ abort_transition_graph(int abort_priority, enum pcmk__graph_next abort_action,
reason = reason->children;
}
}
+ CRM_CHECK(reason != NULL, goto done);
}
- kind = TYPE(reason);
if(strcmp(op, "delete") == 0) {
const char *shortpath = strrchr(path, '/');
@@ -310,7 +449,7 @@ abort_transition_graph(int abort_priority, enum pcmk__graph_next abort_action,
add[0], add[1], add[2], fn, line, path,
pcmk__btoa(controld_globals.transition_graph->complete));
- } else if (pcmk__str_eq(XML_CIB_TAG_NVPAIR, kind, pcmk__str_none)) {
+ } else if (pcmk__xe_is(reason, XML_CIB_TAG_NVPAIR)) {
do_crm_log(level, "Transition %d aborted by %s doing %s %s=%s: %s "
CRM_XS " cib=%d.%d.%d source=%s:%d path=%s complete=%s",
controld_globals.transition_graph->id,
@@ -320,7 +459,7 @@ abort_transition_graph(int abort_priority, enum pcmk__graph_next abort_action,
abort_text, add[0], add[1], add[2], fn, line, path,
pcmk__btoa(controld_globals.transition_graph->complete));
- } else if (pcmk__str_eq(XML_LRM_TAG_RSC_OP, kind, pcmk__str_none)) {
+ } else if (pcmk__xe_is(reason, XML_LRM_TAG_RSC_OP)) {
const char *magic = crm_element_value(reason, XML_ATTR_TRANSITION_MAGIC);
do_crm_log(level, "Transition %d aborted by operation %s '%s' on %s: %s "
@@ -331,14 +470,15 @@ abort_transition_graph(int abort_priority, enum pcmk__graph_next abort_action,
magic, add[0], add[1], add[2], fn, line,
pcmk__btoa(controld_globals.transition_graph->complete));
- } else if (pcmk__str_any_of(kind, XML_CIB_TAG_STATE, XML_CIB_TAG_NODE, NULL)) {
+ } else if (pcmk__str_any_of((const char *) reason->name,
+ XML_CIB_TAG_STATE, XML_CIB_TAG_NODE, NULL)) {
const char *uname = crm_peer_uname(ID(reason));
do_crm_log(level, "Transition %d aborted by %s '%s' on %s: %s "
CRM_XS " cib=%d.%d.%d source=%s:%d complete=%s",
controld_globals.transition_graph->id,
- kind, op, (uname? uname : ID(reason)), abort_text,
- add[0], add[1], add[2], fn, line,
+ reason->name, op, pcmk__s(uname, ID(reason)),
+ abort_text, add[0], add[1], add[2], fn, line,
pcmk__btoa(controld_globals.transition_graph->complete));
} else {
@@ -347,12 +487,13 @@ abort_transition_graph(int abort_priority, enum pcmk__graph_next abort_action,
do_crm_log(level, "Transition %d aborted by %s.%s '%s': %s "
CRM_XS " cib=%d.%d.%d source=%s:%d path=%s complete=%s",
controld_globals.transition_graph->id,
- TYPE(reason), (id? id : ""), (op? op : "change"),
+ reason->name, pcmk__s(id, ""), pcmk__s(op, "change"),
abort_text, add[0], add[1], add[2], fn, line, path,
pcmk__btoa(controld_globals.transition_graph->complete));
}
}
+done:
if (controld_globals.transition_graph->complete) {
if (controld_get_period_transition_timer() > 0) {
controld_stop_transition_timer();
diff --git a/daemons/controld/controld_throttle.c b/daemons/controld/controld_throttle.c
index 5b7f9c0..a4775e5 100644
--- a/daemons/controld/controld_throttle.c
+++ b/daemons/controld/controld_throttle.c
@@ -154,7 +154,7 @@ throttle_cib_load(float *load)
if(stream == NULL) {
int rc = errno;
- crm_warn("Couldn't read %s: %s (%d)", loadfile, pcmk_strerror(rc), rc);
+ crm_warn("Couldn't read %s: %s (%d)", loadfile, pcmk_rc_str(rc), rc);
free(loadfile); loadfile = NULL;
return FALSE;
}
@@ -220,7 +220,7 @@ throttle_load_avg(float *load)
stream = fopen(loadfile, "r");
if(stream == NULL) {
int rc = errno;
- crm_warn("Couldn't read %s: %s (%d)", loadfile, pcmk_strerror(rc), rc);
+ crm_warn("Couldn't read %s: %s (%d)", loadfile, pcmk_rc_str(rc), rc);
return FALSE;
}
@@ -407,7 +407,7 @@ static void
throttle_update_job_max(const char *preference)
{
long long max = 0LL;
- const char *env_limit = getenv("PCMK_node_action_limit");
+ const char *env_limit = pcmk__env_option(PCMK__ENV_NODE_ACTION_LIMIT);
if (env_limit != NULL) {
preference = env_limit; // Per-node override
diff --git a/daemons/controld/controld_transition.c b/daemons/controld/controld_transition.c
index c8a342c..897c6d3 100644
--- a/daemons/controld/controld_transition.c
+++ b/daemons/controld/controld_transition.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2004-2022 the Pacemaker project contributors
+ * Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -15,11 +15,6 @@
#include <pacemaker-controld.h>
-static void
-global_cib_callback(const xmlNode * msg, int callid, int rc, xmlNode * output)
-{
-}
-
static pcmk__graph_t *
create_blank_graph(void)
{
@@ -82,12 +77,6 @@ do_te_control(long long action,
crm_err("Could not set CIB notification callback");
init_ok = FALSE;
}
-
- if (cib_conn->cmds->set_op_callback(cib_conn,
- global_cib_callback) != pcmk_ok) {
- crm_err("Could not set CIB global callback");
- init_ok = FALSE;
- }
}
if (init_ok) {
diff --git a/daemons/controld/controld_transition.h b/daemons/controld/controld_transition.h
index 2da4221..0655bd9 100644
--- a/daemons/controld/controld_transition.h
+++ b/daemons/controld/controld_transition.h
@@ -48,6 +48,8 @@ void controld_destroy_transition_trigger(void);
void controld_trigger_graph_as(const char *fn, int line);
void abort_after_delay(int abort_priority, enum pcmk__graph_next abort_action,
const char *abort_text, guint delay_ms);
+void controld_node_pending_timer(const crm_node_t *node);
+void controld_free_node_pending_timers(void);
void abort_transition_graph(int abort_priority,
enum pcmk__graph_next abort_action,
const char *abort_text, const xmlNode *reason,
diff --git a/daemons/controld/controld_utils.c b/daemons/controld/controld_utils.c
index 4ce09d9..9b306ee 100644
--- a/daemons/controld/controld_utils.c
+++ b/daemons/controld/controld_utils.c
@@ -828,7 +828,7 @@ get_node_id(xmlNode *lrm_rsc_op)
{
xmlNode *node = lrm_rsc_op;
- while (node != NULL && !pcmk__str_eq(XML_CIB_TAG_STATE, TYPE(node), pcmk__str_casei)) {
+ while ((node != NULL) && !pcmk__xe_is(node, XML_CIB_TAG_STATE)) {
node = node->parent;
}
diff --git a/daemons/controld/pacemaker-controld.c b/daemons/controld/pacemaker-controld.c
index 5858898..e4a72c2 100644
--- a/daemons/controld/pacemaker-controld.c
+++ b/daemons/controld/pacemaker-controld.c
@@ -112,7 +112,7 @@ main(int argc, char **argv)
goto done;
}
- if (crm_ipc_connect(old_instance)) {
+ if (pcmk__connect_generic_ipc(old_instance) == pcmk_rc_ok) {
/* IPC end-point already up */
crm_ipc_close(old_instance);
crm_ipc_destroy(old_instance);
diff --git a/daemons/controld/pacemaker-controld.h b/daemons/controld/pacemaker-controld.h
index 1484a00..2334cce 100644
--- a/daemons/controld/pacemaker-controld.h
+++ b/daemons/controld/pacemaker-controld.h
@@ -36,4 +36,7 @@ void controld_remove_voter(const char *uname);
void controld_election_fini(void);
void controld_stop_current_election_timeout(void);
+void set_join_state(const char *start_state, const char *node_name,
+ const char *node_uuid, bool remote);
+
#endif
diff --git a/daemons/execd/Makefile.am b/daemons/execd/Makefile.am
index 466f0df..ab8544f 100644
--- a/daemons/execd/Makefile.am
+++ b/daemons/execd/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2012-2021 the Pacemaker project contributors
+# Copyright 2012-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -12,18 +12,20 @@ include $(top_srcdir)/mk/man.mk
halibdir = $(CRM_DAEMON_DIR)
-halib_PROGRAMS = pacemaker-execd cts-exec-helper
+halib_PROGRAMS = pacemaker-execd \
+ cts-exec-helper
EXTRA_DIST = pacemaker-remoted.8.inc
pacemaker_execd_CFLAGS = $(CFLAGS_HARDENED_EXE)
pacemaker_execd_LDFLAGS = $(LDFLAGS_HARDENED_EXE)
-pacemaker_execd_LDADD = $(top_builddir)/lib/common/libcrmcommon.la \
- $(top_builddir)/lib/services/libcrmservice.la \
- $(top_builddir)/lib/fencing/libstonithd.la
-pacemaker_execd_SOURCES = pacemaker-execd.c execd_commands.c \
- execd_alerts.c
+pacemaker_execd_LDADD = $(top_builddir)/lib/fencing/libstonithd.la
+pacemaker_execd_LDADD += $(top_builddir)/lib/services/libcrmservice.la
+pacemaker_execd_LDADD += $(top_builddir)/lib/common/libcrmcommon.la
+pacemaker_execd_SOURCES = pacemaker-execd.c \
+ execd_commands.c \
+ execd_alerts.c
if BUILD_REMOTE
sbin_PROGRAMS = pacemaker-remoted
@@ -34,22 +36,27 @@ initdir = $(INITDIR)
init_SCRIPTS = pacemaker_remote
endif
-pacemaker_remoted_CPPFLAGS = -DPCMK__COMPILE_REMOTE $(AM_CPPFLAGS)
+pacemaker_remoted_CPPFLAGS = -DPCMK__COMPILE_REMOTE \
+ $(AM_CPPFLAGS)
pacemaker_remoted_CFLAGS = $(CFLAGS_HARDENED_EXE)
pacemaker_remoted_LDFLAGS = $(LDFLAGS_HARDENED_EXE)
-pacemaker_remoted_LDADD = $(pacemaker_execd_LDADD) \
- $(top_builddir)/lib/lrmd/liblrmd.la
-pacemaker_remoted_SOURCES = $(pacemaker_execd_SOURCES) \
- remoted_tls.c remoted_pidone.c remoted_proxy.c
+pacemaker_remoted_LDADD = $(top_builddir)/lib/fencing/libstonithd.la
+pacemaker_remoted_LDADD += $(top_builddir)/lib/services/libcrmservice.la
+pacemaker_remoted_LDADD += $(top_builddir)/lib/lrmd/liblrmd.la
+pacemaker_remoted_LDADD += $(top_builddir)/lib/common/libcrmcommon.la
+pacemaker_remoted_SOURCES = $(pacemaker_execd_SOURCES) \
+ remoted_tls.c \
+ remoted_pidone.c \
+ remoted_proxy.c
endif
-cts_exec_helper_LDADD = $(top_builddir)/lib/common/libcrmcommon.la \
- $(top_builddir)/lib/lrmd/liblrmd.la \
- $(top_builddir)/lib/cib/libcib.la \
- $(top_builddir)/lib/services/libcrmservice.la \
- $(top_builddir)/lib/pengine/libpe_status.la
+cts_exec_helper_LDADD = $(top_builddir)/lib/pengine/libpe_status.la
+cts_exec_helper_LDADD += $(top_builddir)/lib/cib/libcib.la
+cts_exec_helper_LDADD += $(top_builddir)/lib/lrmd/liblrmd.la
+cts_exec_helper_LDADD += $(top_builddir)/lib/services/libcrmservice.la
+cts_exec_helper_LDADD += $(top_builddir)/lib/common/libcrmcommon.la
cts_exec_helper_SOURCES = cts-exec-helper.c
noinst_HEADERS = pacemaker-execd.h
@@ -59,6 +66,7 @@ CLEANFILES = $(man8_MANS)
# Always create a symlink for the old pacemaker_remoted name, so that bundle
# container images using a current Pacemaker will run on cluster nodes running
# Pacemaker 1 (>=1.1.17).
+.PHONY: install-exec-hook
install-exec-hook:
if BUILD_LEGACY_LINKS
cd $(DESTDIR)$(CRM_DAEMON_DIR) && rm -f lrmd && $(LN_S) pacemaker-execd lrmd
@@ -67,6 +75,7 @@ if BUILD_REMOTE
cd $(DESTDIR)$(sbindir) && rm -f pacemaker_remoted && $(LN_S) pacemaker-remoted pacemaker_remoted
endif
+.PHONY: uninstall-hook
uninstall-hook:
if BUILD_LEGACY_LINKS
cd $(DESTDIR)$(CRM_DAEMON_DIR) && rm -f lrmd
diff --git a/daemons/execd/cts-exec-helper.c b/daemons/execd/cts-exec-helper.c
index 2af5e16..6ebbedf 100644
--- a/daemons/execd/cts-exec-helper.c
+++ b/daemons/execd/cts-exec-helper.c
@@ -443,9 +443,9 @@ static int
generate_params(void)
{
int rc = pcmk_rc_ok;
- pe_working_set_t *data_set = NULL;
+ pcmk_scheduler_t *scheduler = NULL;
xmlNode *cib_xml_copy = NULL;
- pe_resource_t *rsc = NULL;
+ pcmk_resource_t *rsc = NULL;
GHashTable *params = NULL;
GHashTable *meta = NULL;
GHashTableIter iter;
@@ -467,27 +467,29 @@ generate_params(void)
}
// Calculate cluster status
- data_set = pe_new_working_set();
- if (data_set == NULL) {
- crm_crit("Could not allocate working set");
+ scheduler = pe_new_working_set();
+ if (scheduler == NULL) {
+ crm_crit("Could not allocate scheduler data");
return ENOMEM;
}
- pe__set_working_set_flags(data_set, pe_flag_no_counts|pe_flag_no_compat);
- data_set->input = cib_xml_copy;
- data_set->now = crm_time_new(NULL);
- cluster_status(data_set);
+ pe__set_working_set_flags(scheduler,
+ pcmk_sched_no_counts|pcmk_sched_no_compat);
+ scheduler->input = cib_xml_copy;
+ scheduler->now = crm_time_new(NULL);
+ cluster_status(scheduler);
// Find resource in CIB
- rsc = pe_find_resource_with_flags(data_set->resources, options.rsc_id,
- pe_find_renamed|pe_find_any);
+ rsc = pe_find_resource_with_flags(scheduler->resources, options.rsc_id,
+ pcmk_rsc_match_history
+ |pcmk_rsc_match_basename);
if (rsc == NULL) {
crm_err("Resource does not exist in config");
- pe_free_working_set(data_set);
+ pe_free_working_set(scheduler);
return EINVAL;
}
// Add resource instance parameters to options.params
- params = pe_rsc_params(rsc, NULL, data_set);
+ params = pe_rsc_params(rsc, NULL, scheduler);
if (params != NULL) {
g_hash_table_iter_init(&iter, params);
while (g_hash_table_iter_next(&iter, (gpointer *) &key,
@@ -498,7 +500,7 @@ generate_params(void)
// Add resource meta-attributes to options.params
meta = pcmk__strkey_table(free, free);
- get_meta_attributes(meta, rsc, NULL, data_set);
+ get_meta_attributes(meta, rsc, NULL, scheduler);
g_hash_table_iter_init(&iter, meta);
while (g_hash_table_iter_next(&iter, (gpointer *) &key,
(gpointer *) &value)) {
@@ -509,7 +511,7 @@ generate_params(void)
}
g_hash_table_destroy(meta);
- pe_free_working_set(data_set);
+ pe_free_working_set(scheduler);
return rc;
}
@@ -587,7 +589,7 @@ main(int argc, char **argv)
goto done;
}
options.api_call = "exec";
- options.action = "monitor";
+ options.action = PCMK_ACTION_MONITOR;
options.exec_call_opts = lrmd_opt_notify_orig_only;
}
diff --git a/daemons/execd/execd_commands.c b/daemons/execd/execd_commands.c
index fa2761e..cf4503a 100644
--- a/daemons/execd/execd_commands.c
+++ b/daemons/execd/execd_commands.c
@@ -213,7 +213,7 @@ log_finished(const lrmd_cmd_t *cmd, int exec_time_ms, int queue_time_ms)
int log_level = LOG_INFO;
GString *str = g_string_sized_new(100); // reasonable starting size
- if (pcmk__str_eq(cmd->action, "monitor", pcmk__str_casei)) {
+ if (pcmk__str_eq(cmd->action, PCMK_ACTION_MONITOR, pcmk__str_casei)) {
log_level = LOG_DEBUG;
}
@@ -253,7 +253,7 @@ log_execute(lrmd_cmd_t * cmd)
{
int log_level = LOG_INFO;
- if (pcmk__str_eq(cmd->action, "monitor", pcmk__str_casei)) {
+ if (pcmk__str_eq(cmd->action, PCMK_ACTION_MONITOR, pcmk__str_casei)) {
log_level = LOG_DEBUG;
}
@@ -264,9 +264,9 @@ log_execute(lrmd_cmd_t * cmd)
static const char *
normalize_action_name(lrmd_rsc_t * rsc, const char *action)
{
- if (pcmk__str_eq(action, "monitor", pcmk__str_casei) &&
+ if (pcmk__str_eq(action, PCMK_ACTION_MONITOR, pcmk__str_casei) &&
pcmk_is_set(pcmk_get_ra_caps(rsc->class), pcmk_ra_cap_status)) {
- return "status";
+ return PCMK_ACTION_STATUS;
}
return action;
}
@@ -517,7 +517,7 @@ schedule_lrmd_cmd(lrmd_rsc_t * rsc, lrmd_cmd_t * cmd)
/* The controller expects the executor to automatically cancel
* recurring operations before a resource stops.
*/
- if (pcmk__str_eq(cmd->action, "stop", pcmk__str_casei)) {
+ if (pcmk__str_eq(cmd->action, PCMK_ACTION_STOP, pcmk__str_casei)) {
cancel_all_recurring(rsc, NULL);
}
@@ -844,7 +844,8 @@ action_complete(svc_action_t * action)
if (pcmk__str_eq(rclass, PCMK_RESOURCE_CLASS_SYSTEMD, pcmk__str_casei)) {
if (pcmk__result_ok(&(cmd->result))
- && pcmk__strcase_any_of(cmd->action, "start", "stop", NULL)) {
+ && pcmk__strcase_any_of(cmd->action, PCMK_ACTION_START,
+ PCMK_ACTION_STOP, NULL)) {
/* systemd returns from start and stop actions after the action
* begins, not after it completes. We have to jump through a few
* hoops so that we don't report 'complete' to the rest of pacemaker
@@ -852,7 +853,7 @@ action_complete(svc_action_t * action)
*/
goagain = true;
cmd->real_action = cmd->action;
- cmd->action = strdup("monitor");
+ cmd->action = strdup(PCMK_ACTION_MONITOR);
} else if (cmd->real_action != NULL) {
// This is follow-up monitor to check whether start/stop completed
@@ -860,7 +861,8 @@ action_complete(svc_action_t * action)
goagain = true;
} else if (pcmk__result_ok(&(cmd->result))
- && pcmk__str_eq(cmd->real_action, "stop", pcmk__str_casei)) {
+ && pcmk__str_eq(cmd->real_action, PCMK_ACTION_STOP,
+ pcmk__str_casei)) {
goagain = true;
} else {
@@ -878,9 +880,11 @@ action_complete(svc_action_t * action)
if ((cmd->result.execution_status == PCMK_EXEC_DONE)
&& (cmd->result.exit_status == PCMK_OCF_NOT_RUNNING)) {
- if (pcmk__str_eq(cmd->real_action, "start", pcmk__str_casei)) {
+ if (pcmk__str_eq(cmd->real_action, PCMK_ACTION_START,
+ pcmk__str_casei)) {
cmd->result.exit_status = PCMK_OCF_UNKNOWN_ERROR;
- } else if (pcmk__str_eq(cmd->real_action, "stop", pcmk__str_casei)) {
+ } else if (pcmk__str_eq(cmd->real_action, PCMK_ACTION_STOP,
+ pcmk__str_casei)) {
cmd->result.exit_status = PCMK_OCF_OK;
}
}
@@ -891,12 +895,12 @@ action_complete(svc_action_t * action)
#if SUPPORT_NAGIOS
if (rsc && pcmk__str_eq(rsc->class, PCMK_RESOURCE_CLASS_NAGIOS, pcmk__str_casei)) {
- if (action_matches(cmd, "monitor", 0)
+ if (action_matches(cmd, PCMK_ACTION_MONITOR, 0)
&& pcmk__result_ok(&(cmd->result))) {
/* Successfully executed --version for the nagios plugin */
cmd->result.exit_status = PCMK_OCF_NOT_RUNNING;
- } else if (pcmk__str_eq(cmd->action, "start", pcmk__str_casei)
+ } else if (pcmk__str_eq(cmd->action, PCMK_ACTION_START, pcmk__str_casei)
&& !pcmk__result_ok(&(cmd->result))) {
#ifdef PCMK__TIME_USE_CGT
goagain = true;
@@ -1007,11 +1011,11 @@ stonith_action_complete(lrmd_cmd_t *cmd, int exit_status,
/* This should be possible only for probes in practice, but
* interpret for all actions to be safe.
*/
- if (pcmk__str_eq(cmd->action, CRMD_ACTION_STATUS,
+ if (pcmk__str_eq(cmd->action, PCMK_ACTION_MONITOR,
pcmk__str_none)) {
exit_status = PCMK_OCF_NOT_RUNNING;
- } else if (pcmk__str_eq(cmd->action, CRMD_ACTION_STOP,
+ } else if (pcmk__str_eq(cmd->action, PCMK_ACTION_STOP,
pcmk__str_none)) {
exit_status = PCMK_OCF_OK;
@@ -1035,11 +1039,12 @@ stonith_action_complete(lrmd_cmd_t *cmd, int exit_status,
// Certain successful actions change the known state of the resource
if ((rsc != NULL) && pcmk__result_ok(&(cmd->result))) {
- if (pcmk__str_eq(cmd->action, "start", pcmk__str_casei)) {
+ if (pcmk__str_eq(cmd->action, PCMK_ACTION_START, pcmk__str_casei)) {
pcmk__set_result(&rsc->fence_probe_result, CRM_EX_OK,
PCMK_EXEC_DONE, NULL); // "running"
- } else if (pcmk__str_eq(cmd->action, "stop", pcmk__str_casei)) {
+ } else if (pcmk__str_eq(cmd->action, PCMK_ACTION_STOP,
+ pcmk__str_casei)) {
pcmk__set_result(&rsc->fence_probe_result, CRM_EX_ERROR,
PCMK_EXEC_NO_FENCE_DEVICE, NULL); // "not running"
}
@@ -1235,7 +1240,7 @@ execute_stonith_action(lrmd_rsc_t *rsc, lrmd_cmd_t *cmd)
stonith_t *stonith_api = get_stonith_connection();
- if (pcmk__str_eq(cmd->action, "monitor", pcmk__str_casei)
+ if (pcmk__str_eq(cmd->action, PCMK_ACTION_MONITOR, pcmk__str_casei)
&& (cmd->interval_ms == 0)) {
// Probes don't require a fencer connection
stonith_action_complete(cmd, rsc->fence_probe_result.exit_status,
@@ -1249,16 +1254,17 @@ execute_stonith_action(lrmd_rsc_t *rsc, lrmd_cmd_t *cmd)
"No connection to fencer");
return;
- } else if (pcmk__str_eq(cmd->action, "start", pcmk__str_casei)) {
+ } else if (pcmk__str_eq(cmd->action, PCMK_ACTION_START, pcmk__str_casei)) {
rc = execd_stonith_start(stonith_api, rsc, cmd);
if (rc == pcmk_ok) {
do_monitor = TRUE;
}
- } else if (pcmk__str_eq(cmd->action, "stop", pcmk__str_casei)) {
+ } else if (pcmk__str_eq(cmd->action, PCMK_ACTION_STOP, pcmk__str_casei)) {
rc = execd_stonith_stop(stonith_api, rsc);
- } else if (pcmk__str_eq(cmd->action, "monitor", pcmk__str_casei)) {
+ } else if (pcmk__str_eq(cmd->action, PCMK_ACTION_MONITOR,
+ pcmk__str_casei)) {
do_monitor = TRUE;
} else {
@@ -1297,7 +1303,7 @@ execute_nonstonith_action(lrmd_rsc_t *rsc, lrmd_cmd_t *cmd)
#if SUPPORT_NAGIOS
/* Recurring operations are cancelled anyway for a stop operation */
if (pcmk__str_eq(rsc->class, PCMK_RESOURCE_CLASS_NAGIOS, pcmk__str_casei)
- && pcmk__str_eq(cmd->action, "stop", pcmk__str_casei)) {
+ && pcmk__str_eq(cmd->action, PCMK_ACTION_STOP, pcmk__str_casei)) {
cmd->result.exit_status = PCMK_OCF_OK;
cmd_finalize(cmd, rsc);
@@ -1474,6 +1480,7 @@ process_lrmd_signon(pcmk__client_t *client, xmlNode *request, int call_id,
int rc = pcmk_ok;
time_t now = time(NULL);
const char *protocol_version = crm_element_value(request, F_LRMD_PROTOCOL_VERSION);
+ const char *start_state = pcmk__env_option(PCMK__ENV_NODE_START_STATE);
if (compare_version(protocol_version, LRMD_MIN_PROTOCOL_VERSION) < 0) {
crm_err("Cluster API version must be greater than or equal to %s, not %s",
@@ -1503,6 +1510,10 @@ process_lrmd_signon(pcmk__client_t *client, xmlNode *request, int call_id,
crm_xml_add(*reply, F_LRMD_PROTOCOL_VERSION, LRMD_PROTOCOL_VERSION);
crm_xml_add_ll(*reply, PCMK__XA_UPTIME, now - start_time);
+ if (start_state) {
+ crm_xml_add(*reply, PCMK__XA_NODE_START_STATE, start_state);
+ }
+
return rc;
}
diff --git a/daemons/execd/pacemaker-execd.c b/daemons/execd/pacemaker-execd.c
index 83a8cd7..e7e30eb 100644
--- a/daemons/execd/pacemaker-execd.c
+++ b/daemons/execd/pacemaker-execd.c
@@ -493,26 +493,28 @@ main(int argc, char **argv, char **envp)
pcmk__cli_init_logging(EXECD_NAME, args->verbosity);
crm_log_init(NULL, LOG_INFO, TRUE, FALSE, argc, argv, FALSE);
+ // ocf_log() (in resource-agents) uses the capitalized env options below
option = pcmk__env_option(PCMK__ENV_LOGFACILITY);
if (!pcmk__str_eq(option, PCMK__VALUE_NONE,
pcmk__str_casei|pcmk__str_null_matches)
&& !pcmk__str_eq(option, "/dev/null", pcmk__str_none)) {
- setenv("HA_LOGFACILITY", option, 1); /* Used by the ocf_log/ha_log OCF macro */
+
+ pcmk__set_env_option("LOGFACILITY", option, true);
}
option = pcmk__env_option(PCMK__ENV_LOGFILE);
if (!pcmk__str_eq(option, PCMK__VALUE_NONE,
pcmk__str_casei|pcmk__str_null_matches)) {
- setenv("HA_LOGFILE", option, 1); /* Used by the ocf_log/ha_log OCF macro */
+ pcmk__set_env_option("LOGFILE", option, true);
if (pcmk__env_option_enabled(crm_system_name, PCMK__ENV_DEBUG)) {
- setenv("HA_DEBUGLOG", option, 1); /* Used by the ocf_log/ha_debug OCF macro */
+ pcmk__set_env_option("DEBUGLOG", option, true);
}
}
#ifdef PCMK__COMPILE_REMOTE
if (options.port != NULL) {
- setenv("PCMK_remote_port", options.port, 1);
+ pcmk__set_env_option(PCMK__ENV_REMOTE_PORT, options.port, false);
}
#endif // PCMK__COMPILE_REMOTE
diff --git a/daemons/execd/remoted_pidone.c b/daemons/execd/remoted_pidone.c
index 4f914eb..08271bf 100644
--- a/daemons/execd/remoted_pidone.c
+++ b/daemons/execd/remoted_pidone.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2017-2020 the Pacemaker project contributors
+ * Copyright 2017-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -203,10 +203,14 @@ remoted_spawn_pidone(int argc, char **argv, char **envp)
* from /etc/pacemaker/pcmk-init.env, which could be useful for testing or
* containers with a custom PID 1 script that launches pacemaker-remoted.
*/
- const char *pid1 = (getpid() == 1)? "full" : getenv("PCMK_remote_pid1");
+ const char *pid1 = "default";
- if (pid1 == NULL) {
- return;
+ if (getpid() != 1) {
+ pid1 = pcmk__env_option(PCMK__ENV_REMOTE_PID1);
+ if (!pcmk__str_any_of(pid1, "full", "vars", NULL)) {
+ // Default, unset, or invalid
+ return;
+ }
}
/* When a container is launched, it may be given specific environment
@@ -217,7 +221,7 @@ remoted_spawn_pidone(int argc, char **argv, char **envp)
*/
load_env_vars("/etc/pacemaker/pcmk-init.env");
- if (strcmp(pid1, "full")) {
+ if (strcmp(pid1, "vars") == 0) {
return;
}
@@ -226,7 +230,7 @@ remoted_spawn_pidone(int argc, char **argv, char **envp)
* explicitly configured in the container's environment.
*/
if (pcmk__env_option(PCMK__ENV_LOGFILE) == NULL) {
- pcmk__set_env_option(PCMK__ENV_LOGFILE, "/var/log/pcmk-init.log");
+ pcmk__set_env_option(PCMK__ENV_LOGFILE, "/var/log/pcmk-init.log", true);
}
sigfillset(&set);
@@ -242,7 +246,7 @@ remoted_spawn_pidone(int argc, char **argv, char **envp)
// Child remains as pacemaker-remoted
return;
case -1:
- perror("fork");
+ crm_err("fork failed: %s", pcmk_rc_str(errno));
}
/* Parent becomes the reaper of zombie processes */
diff --git a/daemons/execd/remoted_tls.c b/daemons/execd/remoted_tls.c
index c65e3f3..23a2dcf 100644
--- a/daemons/execd/remoted_tls.c
+++ b/daemons/execd/remoted_tls.c
@@ -273,39 +273,44 @@ bind_and_listen(struct addrinfo *addr)
fd = socket(addr->ai_family, addr->ai_socktype, addr->ai_protocol);
if (fd < 0) {
- crm_perror(LOG_ERR, "Listener socket creation failed");
- return -1;
+ rc = errno;
+ crm_err("Listener socket creation failed: %", pcmk_rc_str(rc));
+ return -rc;
}
/* reuse address */
optval = 1;
rc = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &optval, sizeof(optval));
if (rc < 0) {
- crm_perror(LOG_ERR, "Local address reuse not allowed on %s", buffer);
+ rc = errno;
+ crm_err("Local address reuse not allowed on %s: %s", buffer, pcmk_rc_str(rc));
close(fd);
- return -1;
+ return -rc;
}
if (addr->ai_family == AF_INET6) {
optval = 0;
rc = setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, &optval, sizeof(optval));
if (rc < 0) {
- crm_perror(LOG_INFO, "Couldn't disable IPV6-only on %s", buffer);
+ rc = errno;
+ crm_err("Couldn't disable IPV6-only on %s: %s", buffer, pcmk_rc_str(rc));
close(fd);
- return -1;
+ return -rc;
}
}
if (bind(fd, addr->ai_addr, addr->ai_addrlen) != 0) {
- crm_perror(LOG_ERR, "Cannot bind to %s", buffer);
+ rc = errno;
+ crm_err("Cannot bind to %s: %s", buffer, pcmk_rc_str(rc));
close(fd);
- return -1;
+ return -rc;
}
if (listen(fd, 10) == -1) {
- crm_perror(LOG_ERR, "Cannot listen on %s", buffer);
+ rc = errno;
+ crm_err("Cannot listen on %s: %s", buffer, pcmk_rc_str(rc));
close(fd);
- return -1;
+ return -rc;
}
return fd;
}
@@ -325,12 +330,15 @@ get_address_info(const char *bind_name, int port, struct addrinfo **res)
snprintf(port_str, sizeof(port_str), "%d", port);
rc = getaddrinfo(bind_name, port_str, &hints, res);
- if (rc) {
+ rc = pcmk__gaierror2rc(rc);
+
+ if (rc != pcmk_rc_ok) {
crm_err("Unable to get IP address(es) for %s: %s",
- (bind_name? bind_name : "local node"), gai_strerror(rc));
- return -EADDRNOTAVAIL;
+ (bind_name? bind_name : "local node"), pcmk_rc_str(rc));
+ return rc;
}
- return pcmk_ok;
+
+ return pcmk_rc_ok;
}
int
@@ -340,7 +348,7 @@ lrmd_init_remote_tls_server(void)
int port = crm_default_remote_port();
struct addrinfo *res = NULL, *iter;
gnutls_datum_t psk_key = { NULL, 0 };
- const char *bind_name = getenv("PCMK_remote_address");
+ const char *bind_name = pcmk__env_option(PCMK__ENV_REMOTE_ADDRESS);
static struct mainloop_fd_callbacks remote_listen_fd_callbacks = {
.dispatch = lrmd_remote_listen,
@@ -371,7 +379,7 @@ lrmd_init_remote_tls_server(void)
}
gnutls_free(psk_key.data);
- if (get_address_info(bind_name, port, &res) != pcmk_ok) {
+ if (get_address_info(bind_name, port, &res) != pcmk_rc_ok) {
return -1;
}
@@ -391,7 +399,7 @@ lrmd_init_remote_tls_server(void)
if (iter->ai_family == filter) {
ssock = bind_and_listen(iter);
}
- if (ssock != -1) {
+ if (ssock >= 0) {
break;
}
diff --git a/daemons/fenced/Makefile.am b/daemons/fenced/Makefile.am
index 2ca0088..62aa864 100644
--- a/daemons/fenced/Makefile.am
+++ b/daemons/fenced/Makefile.am
@@ -14,7 +14,8 @@ include $(top_srcdir)/mk/man.mk
halibdir = $(CRM_DAEMON_DIR)
-halib_PROGRAMS = pacemaker-fenced cts-fence-helper
+halib_PROGRAMS = pacemaker-fenced \
+ cts-fence-helper
noinst_HEADERS = pacemaker-fenced.h
@@ -23,30 +24,36 @@ man7_MANS = pacemaker-fenced.7
endif
cts_fence_helper_SOURCES = cts-fence-helper.c
-cts_fence_helper_LDADD = $(top_builddir)/lib/common/libcrmcommon.la \
- $(top_builddir)/lib/fencing/libstonithd.la
+cts_fence_helper_LDADD = $(top_builddir)/lib/fencing/libstonithd.la
+cts_fence_helper_LDADD += $(top_builddir)/lib/common/libcrmcommon.la
pacemaker_fenced_YFLAGS = -d
pacemaker_fenced_CFLAGS = $(CFLAGS_HARDENED_EXE)
pacemaker_fenced_LDFLAGS = $(LDFLAGS_HARDENED_EXE)
-pacemaker_fenced_LDADD = $(top_builddir)/lib/common/libcrmcommon.la \
- $(top_builddir)/lib/cib/libcib.la \
- $(top_builddir)/lib/cluster/libcrmcluster.la \
- $(top_builddir)/lib/fencing/libstonithd.la \
- $(top_builddir)/lib/pengine/libpe_status.la \
- $(top_builddir)/lib/pacemaker/libpacemaker.la \
- $(CLUSTERLIBS)
-pacemaker_fenced_SOURCES = pacemaker-fenced.c \
- fenced_commands.c \
- fenced_remote.c \
+
+pacemaker_fenced_LDADD = $(top_builddir)/lib/pacemaker/libpacemaker.la
+pacemaker_fenced_LDADD += $(top_builddir)/lib/pengine/libpe_status.la
+pacemaker_fenced_LDADD += $(top_builddir)/lib/cib/libcib.la
+pacemaker_fenced_LDADD += $(top_builddir)/lib/cluster/libcrmcluster.la
+pacemaker_fenced_LDADD += $(top_builddir)/lib/fencing/libstonithd.la
+pacemaker_fenced_LDADD += $(top_builddir)/lib/common/libcrmcommon.la
+pacemaker_fenced_LDADD += $(CLUSTERLIBS)
+
+pacemaker_fenced_SOURCES = pacemaker-fenced.c \
+ fenced_cib.c \
+ fenced_commands.c \
+ fenced_remote.c \
+ fenced_scheduler.c \
fenced_history.c
CLEANFILES = $(man7_MANS) $(man8_MANS)
if BUILD_LEGACY_LINKS
+.PHONY: install-exec-hook
install-exec-hook:
cd $(DESTDIR)$(CRM_DAEMON_DIR) && rm -f stonithd && $(LN_S) pacemaker-fenced stonithd
+.PHONY: uninstall-hook
uninstall-hook:
cd $(DESTDIR)$(CRM_DAEMON_DIR) && rm -f stonithd
endif
diff --git a/daemons/fenced/cts-fence-helper.c b/daemons/fenced/cts-fence-helper.c
index e18a1f4..07bd500 100644
--- a/daemons/fenced/cts-fence-helper.c
+++ b/daemons/fenced/cts-fence-helper.c
@@ -212,10 +212,12 @@ run_fence_failure_test(void)
cmds->register_device(st, st_opts, "test-id1", "stonith-ng", "fence_dummy", params),
"Register device1 for failure test", 1, 0);
- single_test(st->cmds->fence(st, st_opts, "false_1_node2", "off", 3, 0),
+ single_test(st->cmds->fence(st, st_opts, "false_1_node2", PCMK_ACTION_OFF,
+ 3, 0),
"Fence failure results off", 1, -ENODATA);
- single_test(st->cmds->fence(st, st_opts, "false_1_node2", "reboot", 3, 0),
+ single_test(st->cmds->fence(st, st_opts, "false_1_node2",
+ PCMK_ACTION_REBOOT, 3, 0),
"Fence failure results reboot", 1, -ENODATA);
single_test(st->cmds->remove_device(st, st_opts, "test-id1"),
@@ -246,11 +248,13 @@ run_fence_failure_rollover_test(void)
cmds->register_device(st, st_opts, "test-id2", "stonith-ng", "fence_dummy", params),
"Register device2 for rollover test", 1, 0);
- single_test(st->cmds->fence(st, st_opts, "false_1_node2", "off", 3, 0),
+ single_test(st->cmds->fence(st, st_opts, "false_1_node2", PCMK_ACTION_OFF,
+ 3, 0),
"Fence rollover results off", 1, 0);
/* Expect -ENODEV because fence_dummy requires 'on' to be executed on target */
- single_test(st->cmds->fence(st, st_opts, "false_1_node2", "on", 3, 0),
+ single_test(st->cmds->fence(st, st_opts, "false_1_node2", PCMK_ACTION_ON, 3,
+ 0),
"Fence rollover results on", 1, -ENODEV);
single_test(st->cmds->remove_device(st, st_opts, "test-id1"),
@@ -278,7 +282,8 @@ run_standard_test(void)
stonith_key_value_freeall(params, 1, 1);
params = NULL;
- single_test(st->cmds->list(st, st_opts, "test-id", NULL, 1), "list", 1, 0);
+ single_test(st->cmds->list(st, st_opts, "test-id", NULL, 1),
+ PCMK_ACTION_LIST, 1, 0);
single_test(st->cmds->monitor(st, st_opts, "test-id", 1), "Monitor", 1, 0);
@@ -288,14 +293,17 @@ run_standard_test(void)
single_test(st->cmds->status(st, st_opts, "test-id", "false_1_node1", 1),
"Status false_1_node1", 1, 0);
- single_test(st->cmds->fence(st, st_opts, "unknown-host", "off", 1, 0),
+ single_test(st->cmds->fence(st, st_opts, "unknown-host", PCMK_ACTION_OFF,
+ 1, 0),
"Fence unknown-host (expected failure)", 0, -ENODEV);
- single_test(st->cmds->fence(st, st_opts, "false_1_node1", "off", 1, 0),
+ single_test(st->cmds->fence(st, st_opts, "false_1_node1", PCMK_ACTION_OFF,
+ 1, 0),
"Fence false_1_node1", 1, 0);
/* Expect -ENODEV because fence_dummy requires 'on' to be executed on target */
- single_test(st->cmds->fence(st, st_opts, "false_1_node1", "on", 1, 0),
+ single_test(st->cmds->fence(st, st_opts, "false_1_node1", PCMK_ACTION_ON, 1,
+ 0),
"Unfence false_1_node1", 1, -ENODEV);
/* Confirm that an invalid level index is rejected */
@@ -362,31 +370,31 @@ standard_dev_test(void)
rc = st->cmds->status(st, st_opts, "test-id", "false_1_node1", 10);
crm_debug("Status false_1_node1: %d", rc);
- rc = st->cmds->fence(st, st_opts, "unknown-host", "off", 60, 0);
+ rc = st->cmds->fence(st, st_opts, "unknown-host", PCMK_ACTION_OFF, 60, 0);
crm_debug("Fence unknown-host: %d", rc);
rc = st->cmds->status(st, st_opts, "test-id", "false_1_node1", 10);
crm_debug("Status false_1_node1: %d", rc);
- rc = st->cmds->fence(st, st_opts, "false_1_node1", "off", 60, 0);
+ rc = st->cmds->fence(st, st_opts, "false_1_node1", PCMK_ACTION_OFF, 60, 0);
crm_debug("Fence false_1_node1: %d", rc);
rc = st->cmds->status(st, st_opts, "test-id", "false_1_node1", 10);
crm_debug("Status false_1_node1: %d", rc);
- rc = st->cmds->fence(st, st_opts, "false_1_node1", "on", 10, 0);
+ rc = st->cmds->fence(st, st_opts, "false_1_node1", PCMK_ACTION_ON, 10, 0);
crm_debug("Unfence false_1_node1: %d", rc);
rc = st->cmds->status(st, st_opts, "test-id", "false_1_node1", 10);
crm_debug("Status false_1_node1: %d", rc);
- rc = st->cmds->fence(st, st_opts, "some-host", "off", 10, 0);
+ rc = st->cmds->fence(st, st_opts, "some-host", PCMK_ACTION_OFF, 10, 0);
crm_debug("Fence alias: %d", rc);
rc = st->cmds->status(st, st_opts, "test-id", "some-host", 10);
crm_debug("Status alias: %d", rc);
- rc = st->cmds->fence(st, st_opts, "false_1_node1", "on", 10, 0);
+ rc = st->cmds->fence(st, st_opts, "false_1_node1", PCMK_ACTION_ON, 10, 0);
crm_debug("Unfence false_1_node1: %d", rc);
rc = st->cmds->remove_device(st, st_opts, "test-id");
@@ -426,7 +434,8 @@ test_async_fence_pass(int check_event)
return;
}
- rc = st->cmds->fence(st, 0, "true_1_node1", "off", MAINLOOP_DEFAULT_TIMEOUT, 0);
+ rc = st->cmds->fence(st, 0, "true_1_node1", PCMK_ACTION_OFF,
+ MAINLOOP_DEFAULT_TIMEOUT, 0);
if (rc < 0) {
crm_err("fence failed with rc %d", rc);
mainloop_test_done(__func__, false);
@@ -459,7 +468,8 @@ test_async_fence_custom_timeout(int check_event)
}
begin = time(NULL);
- rc = st->cmds->fence(st, 0, "custom_timeout_node1", "off", MAINLOOP_DEFAULT_TIMEOUT, 0);
+ rc = st->cmds->fence(st, 0, "custom_timeout_node1", PCMK_ACTION_OFF,
+ MAINLOOP_DEFAULT_TIMEOUT, 0);
if (rc < 0) {
crm_err("fence failed with rc %d", rc);
mainloop_test_done(__func__, false);
@@ -479,7 +489,8 @@ test_async_fence_timeout(int check_event)
return;
}
- rc = st->cmds->fence(st, 0, "false_1_node2", "off", MAINLOOP_DEFAULT_TIMEOUT, 0);
+ rc = st->cmds->fence(st, 0, "false_1_node2", PCMK_ACTION_OFF,
+ MAINLOOP_DEFAULT_TIMEOUT, 0);
if (rc < 0) {
crm_err("fence failed with rc %d", rc);
mainloop_test_done(__func__, false);
diff --git a/daemons/fenced/fenced_cib.c b/daemons/fenced/fenced_cib.c
new file mode 100644
index 0000000..e11bf68
--- /dev/null
+++ b/daemons/fenced/fenced_cib.c
@@ -0,0 +1,734 @@
+/*
+ * Copyright 2009-2023 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU General Public License version 2
+ * or later (GPLv2+) WITHOUT ANY WARRANTY.
+*/
+
+#include <crm_internal.h>
+
+#include <stdbool.h>
+#include <stdio.h>
+#include <libxml/tree.h>
+#include <libxml/xpath.h>
+
+#include <crm/crm.h>
+#include <crm/msg_xml.h>
+#include <crm/common/xml.h>
+
+#include <crm/cluster/internal.h>
+
+#include <crm/cib.h>
+#include <crm/cib/internal.h>
+
+#include <pacemaker-fenced.h>
+
+static xmlNode *local_cib = NULL;
+static cib_t *cib_api = NULL;
+static bool have_cib_devices = FALSE;
+
+/*!
+ * \internal
+ * \brief Check whether a node has a specific attribute name/value
+ *
+ * \param[in] node Name of node to check
+ * \param[in] name Name of an attribute to look for
+ * \param[in] value The value the named attribute needs to be set to in order to be considered a match
+ *
+ * \return TRUE if the locally cached CIB has the specified node attribute
+ */
+gboolean
+node_has_attr(const char *node, const char *name, const char *value)
+{
+ GString *xpath = NULL;
+ xmlNode *match;
+
+ CRM_CHECK((local_cib != NULL) && (node != NULL) && (name != NULL)
+ && (value != NULL), return FALSE);
+
+ /* Search for the node's attributes in the CIB. While the schema allows
+ * multiple sets of instance attributes, and allows instance attributes to
+ * use id-ref to reference values elsewhere, that is intended for resources,
+ * so we ignore that here.
+ */
+ xpath = g_string_sized_new(256);
+ pcmk__g_strcat(xpath,
+ "//" XML_CIB_TAG_NODES "/" XML_CIB_TAG_NODE
+ "[@" XML_ATTR_UNAME "='", node, "']/" XML_TAG_ATTR_SETS
+ "/" XML_CIB_TAG_NVPAIR
+ "[@" XML_NVPAIR_ATTR_NAME "='", name, "' "
+ "and @" XML_NVPAIR_ATTR_VALUE "='", value, "']", NULL);
+
+ match = get_xpath_object((const char *) xpath->str, local_cib, LOG_NEVER);
+
+ g_string_free(xpath, TRUE);
+ return (match != NULL);
+}
+
+static void
+add_topology_level(xmlNode *match)
+{
+ char *desc = NULL;
+ pcmk__action_result_t result = PCMK__UNKNOWN_RESULT;
+
+ CRM_CHECK(match != NULL, return);
+
+ fenced_register_level(match, &desc, &result);
+ fenced_send_level_notification(STONITH_OP_LEVEL_ADD, &result, desc);
+ pcmk__reset_result(&result);
+ free(desc);
+}
+
+static void
+topology_remove_helper(const char *node, int level)
+{
+ char *desc = NULL;
+ pcmk__action_result_t result = PCMK__UNKNOWN_RESULT;
+ xmlNode *data = create_xml_node(NULL, XML_TAG_FENCING_LEVEL);
+
+ crm_xml_add(data, F_STONITH_ORIGIN, __func__);
+ crm_xml_add_int(data, XML_ATTR_STONITH_INDEX, level);
+ crm_xml_add(data, XML_ATTR_STONITH_TARGET, node);
+
+ fenced_unregister_level(data, &desc, &result);
+ fenced_send_level_notification(STONITH_OP_LEVEL_DEL, &result, desc);
+ pcmk__reset_result(&result);
+ free_xml(data);
+ free(desc);
+}
+
+static void
+remove_topology_level(xmlNode *match)
+{
+ int index = 0;
+ char *key = NULL;
+
+ CRM_CHECK(match != NULL, return);
+
+ key = stonith_level_key(match, fenced_target_by_unknown);
+ crm_element_value_int(match, XML_ATTR_STONITH_INDEX, &index);
+ topology_remove_helper(key, index);
+ free(key);
+}
+
+static void
+register_fencing_topology(xmlXPathObjectPtr xpathObj)
+{
+ int max = numXpathResults(xpathObj), lpc = 0;
+
+ for (lpc = 0; lpc < max; lpc++) {
+ xmlNode *match = getXpathResult(xpathObj, lpc);
+
+ remove_topology_level(match);
+ add_topology_level(match);
+ }
+}
+
+/* Fencing
+<diff crm_feature_set="3.0.6">
+ <diff-removed>
+ <fencing-topology>
+ <fencing-level id="f-p1.1" target="pcmk-1" index="1" devices="poison-pill" __crm_diff_marker__="removed:top"/>
+ <fencing-level id="f-p1.2" target="pcmk-1" index="2" devices="power" __crm_diff_marker__="removed:top"/>
+ <fencing-level devices="disk,network" id="f-p2.1"/>
+ </fencing-topology>
+ </diff-removed>
+ <diff-added>
+ <fencing-topology>
+ <fencing-level id="f-p.1" target="pcmk-1" index="1" devices="poison-pill" __crm_diff_marker__="added:top"/>
+ <fencing-level id="f-p2.1" target="pcmk-2" index="1" devices="disk,something"/>
+ <fencing-level id="f-p3.1" target="pcmk-2" index="2" devices="power" __crm_diff_marker__="added:top"/>
+ </fencing-topology>
+ </diff-added>
+</diff>
+*/
+
+void
+fencing_topology_init(void)
+{
+ xmlXPathObjectPtr xpathObj = NULL;
+ const char *xpath = "//" XML_TAG_FENCING_LEVEL;
+
+ crm_trace("Full topology refresh");
+ free_topology_list();
+ init_topology_list();
+
+ /* Grab everything */
+ xpathObj = xpath_search(local_cib, xpath);
+ register_fencing_topology(xpathObj);
+
+ freeXpathObject(xpathObj);
+}
+
+static void
+remove_cib_device(xmlXPathObjectPtr xpathObj)
+{
+ int max = numXpathResults(xpathObj), lpc = 0;
+
+ for (lpc = 0; lpc < max; lpc++) {
+ const char *rsc_id = NULL;
+ const char *standard = NULL;
+ xmlNode *match = getXpathResult(xpathObj, lpc);
+
+ CRM_LOG_ASSERT(match != NULL);
+ if(match != NULL) {
+ standard = crm_element_value(match, XML_AGENT_ATTR_CLASS);
+ }
+
+ if (!pcmk__str_eq(standard, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) {
+ continue;
+ }
+
+ rsc_id = crm_element_value(match, XML_ATTR_ID);
+
+ stonith_device_remove(rsc_id, true);
+ }
+}
+
+static void
+update_stonith_watchdog_timeout_ms(xmlNode *cib)
+{
+ long timeout_ms = 0;
+ xmlNode *stonith_watchdog_xml = NULL;
+ const char *value = NULL;
+
+ stonith_watchdog_xml = get_xpath_object("//nvpair[@name='stonith-watchdog-timeout']",
+ cib, LOG_NEVER);
+ if (stonith_watchdog_xml) {
+ value = crm_element_value(stonith_watchdog_xml, XML_NVPAIR_ATTR_VALUE);
+ }
+ if (value) {
+ timeout_ms = crm_get_msec(value);
+ }
+
+ if (timeout_ms < 0) {
+ timeout_ms = pcmk__auto_watchdog_timeout();
+ }
+
+ stonith_watchdog_timeout_ms = timeout_ms;
+}
+
+/*!
+ * \internal
+ * \brief Update all STONITH device definitions based on current CIB
+ */
+static void
+cib_devices_update(void)
+{
+ GHashTableIter iter;
+ stonith_device_t *device = NULL;
+
+ crm_info("Updating devices to version %s.%s.%s",
+ crm_element_value(local_cib, XML_ATTR_GENERATION_ADMIN),
+ crm_element_value(local_cib, XML_ATTR_GENERATION),
+ crm_element_value(local_cib, XML_ATTR_NUMUPDATES));
+
+ g_hash_table_iter_init(&iter, device_list);
+ while (g_hash_table_iter_next(&iter, NULL, (void **)&device)) {
+ if (device->cib_registered) {
+ device->dirty = TRUE;
+ }
+ }
+
+ /* have list repopulated if cib has a watchdog-fencing-resource
+ TODO: keep a cached list for queries happening while we are refreshing
+ */
+ g_list_free_full(stonith_watchdog_targets, free);
+ stonith_watchdog_targets = NULL;
+
+ fenced_scheduler_run(local_cib);
+
+ g_hash_table_iter_init(&iter, device_list);
+ while (g_hash_table_iter_next(&iter, NULL, (void **)&device)) {
+ if (device->dirty) {
+ g_hash_table_iter_remove(&iter);
+ }
+ }
+}
+
+static void
+update_cib_stonith_devices_v1(const char *event, xmlNode * msg)
+{
+ const char *reason = "none";
+ gboolean needs_update = FALSE;
+ xmlXPathObjectPtr xpath_obj = NULL;
+
+ /* process new constraints */
+ xpath_obj = xpath_search(msg, "//" F_CIB_UPDATE_RESULT "//" XML_CONS_TAG_RSC_LOCATION);
+ if (numXpathResults(xpath_obj) > 0) {
+ int max = numXpathResults(xpath_obj), lpc = 0;
+
+ /* Safest and simplest to always recompute */
+ needs_update = TRUE;
+ reason = "new location constraint";
+
+ for (lpc = 0; lpc < max; lpc++) {
+ xmlNode *match = getXpathResult(xpath_obj, lpc);
+
+ crm_log_xml_trace(match, "new constraint");
+ }
+ }
+ freeXpathObject(xpath_obj);
+
+ /* process deletions */
+ xpath_obj = xpath_search(msg, "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_REMOVED "//" XML_CIB_TAG_RESOURCE);
+ if (numXpathResults(xpath_obj) > 0) {
+ remove_cib_device(xpath_obj);
+ }
+ freeXpathObject(xpath_obj);
+
+ /* process additions */
+ xpath_obj = xpath_search(msg, "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_ADDED "//" XML_CIB_TAG_RESOURCE);
+ if (numXpathResults(xpath_obj) > 0) {
+ int max = numXpathResults(xpath_obj), lpc = 0;
+
+ for (lpc = 0; lpc < max; lpc++) {
+ const char *rsc_id = NULL;
+ const char *standard = NULL;
+ xmlNode *match = getXpathResult(xpath_obj, lpc);
+
+ rsc_id = crm_element_value(match, XML_ATTR_ID);
+ standard = crm_element_value(match, XML_AGENT_ATTR_CLASS);
+
+ if (!pcmk__str_eq(standard, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) {
+ continue;
+ }
+
+ crm_trace("Fencing resource %s was added or modified", rsc_id);
+ reason = "new resource";
+ needs_update = TRUE;
+ }
+ }
+ freeXpathObject(xpath_obj);
+
+ if(needs_update) {
+ crm_info("Updating device list from CIB: %s", reason);
+ cib_devices_update();
+ }
+}
+
+static void
+update_cib_stonith_devices_v2(const char *event, xmlNode * msg)
+{
+ xmlNode *change = NULL;
+ char *reason = NULL;
+ bool needs_update = FALSE;
+ xmlNode *patchset = get_message_xml(msg, F_CIB_UPDATE_RESULT);
+
+ for (change = pcmk__xml_first_child(patchset); change != NULL;
+ change = pcmk__xml_next(change)) {
+ const char *op = crm_element_value(change, XML_DIFF_OP);
+ const char *xpath = crm_element_value(change, XML_DIFF_PATH);
+ const char *shortpath = NULL;
+
+ if ((op == NULL) ||
+ (strcmp(op, "move") == 0) ||
+ strstr(xpath, "/"XML_CIB_TAG_STATUS)) {
+ continue;
+ } else if (pcmk__str_eq(op, "delete", pcmk__str_casei) && strstr(xpath, "/"XML_CIB_TAG_RESOURCE)) {
+ const char *rsc_id = NULL;
+ char *search = NULL;
+ char *mutable = NULL;
+
+ if (strstr(xpath, XML_TAG_ATTR_SETS) ||
+ strstr(xpath, XML_TAG_META_SETS)) {
+ needs_update = TRUE;
+ pcmk__str_update(&reason,
+ "(meta) attribute deleted from resource");
+ break;
+ }
+ pcmk__str_update(&mutable, xpath);
+ rsc_id = strstr(mutable, "primitive[@" XML_ATTR_ID "=\'");
+ if (rsc_id != NULL) {
+ rsc_id += strlen("primitive[@" XML_ATTR_ID "=\'");
+ search = strchr(rsc_id, '\'');
+ }
+ if (search != NULL) {
+ *search = 0;
+ stonith_device_remove(rsc_id, true);
+ /* watchdog_device_update called afterwards
+ to fall back to implicit definition if needed */
+ } else {
+ crm_warn("Ignoring malformed CIB update (resource deletion)");
+ }
+ free(mutable);
+
+ } else if (strstr(xpath, "/"XML_CIB_TAG_RESOURCES) ||
+ strstr(xpath, "/"XML_CIB_TAG_CONSTRAINTS) ||
+ strstr(xpath, "/"XML_CIB_TAG_RSCCONFIG)) {
+ shortpath = strrchr(xpath, '/'); CRM_ASSERT(shortpath);
+ reason = crm_strdup_printf("%s %s", op, shortpath+1);
+ needs_update = TRUE;
+ break;
+ }
+ }
+
+ if(needs_update) {
+ crm_info("Updating device list from CIB: %s", reason);
+ cib_devices_update();
+ } else {
+ crm_trace("No updates for device list found in CIB");
+ }
+ free(reason);
+}
+
+static void
+update_cib_stonith_devices(const char *event, xmlNode * msg)
+{
+ int format = 1;
+ xmlNode *patchset = get_message_xml(msg, F_CIB_UPDATE_RESULT);
+
+ CRM_ASSERT(patchset);
+ crm_element_value_int(patchset, PCMK_XA_FORMAT, &format);
+ switch(format) {
+ case 1:
+ update_cib_stonith_devices_v1(event, msg);
+ break;
+ case 2:
+ update_cib_stonith_devices_v2(event, msg);
+ break;
+ default:
+ crm_warn("Unknown patch format: %d", format);
+ }
+}
+
+static void
+watchdog_device_update(void)
+{
+ if (stonith_watchdog_timeout_ms > 0) {
+ if (!g_hash_table_lookup(device_list, STONITH_WATCHDOG_ID) &&
+ !stonith_watchdog_targets) {
+ /* getting here watchdog-fencing enabled, no device there yet
+ and reason isn't stonith_watchdog_targets preventing that
+ */
+ int rc;
+ xmlNode *xml;
+
+ xml = create_device_registration_xml(
+ STONITH_WATCHDOG_ID,
+ st_namespace_internal,
+ STONITH_WATCHDOG_AGENT,
+ NULL, /* stonith_device_register will add our
+ own name as PCMK_STONITH_HOST_LIST param
+ so we can skip that here
+ */
+ NULL);
+ rc = stonith_device_register(xml, TRUE);
+ free_xml(xml);
+ if (rc != pcmk_ok) {
+ rc = pcmk_legacy2rc(rc);
+ exit_code = CRM_EX_FATAL;
+ crm_crit("Cannot register watchdog pseudo fence agent: %s",
+ pcmk_rc_str(rc));
+ stonith_shutdown(0);
+ }
+ }
+
+ } else if (g_hash_table_lookup(device_list, STONITH_WATCHDOG_ID) != NULL) {
+ /* be silent if no device - todo parameter to stonith_device_remove */
+ stonith_device_remove(STONITH_WATCHDOG_ID, true);
+ }
+}
+
+/*!
+ * \internal
+ * \brief Query the full CIB
+ *
+ * \return Standard Pacemaker return code
+ */
+static int
+fenced_query_cib(void)
+{
+ int rc = pcmk_ok;
+
+ crm_trace("Re-requesting full CIB");
+ rc = cib_api->cmds->query(cib_api, NULL, &local_cib,
+ cib_scope_local|cib_sync_call);
+ rc = pcmk_legacy2rc(rc);
+ if (rc == pcmk_rc_ok) {
+ CRM_ASSERT(local_cib != NULL);
+ } else {
+ crm_err("Couldn't retrieve the CIB: %s " CRM_XS " rc=%d",
+ pcmk_rc_str(rc), rc);
+ }
+ return rc;
+}
+
+static void
+remove_fencing_topology(xmlXPathObjectPtr xpathObj)
+{
+ int max = numXpathResults(xpathObj), lpc = 0;
+
+ for (lpc = 0; lpc < max; lpc++) {
+ xmlNode *match = getXpathResult(xpathObj, lpc);
+
+ CRM_LOG_ASSERT(match != NULL);
+ if (match && crm_element_value(match, XML_DIFF_MARKER)) {
+ /* Deletion */
+ int index = 0;
+ char *target = stonith_level_key(match, fenced_target_by_unknown);
+
+ crm_element_value_int(match, XML_ATTR_STONITH_INDEX, &index);
+ if (target == NULL) {
+ crm_err("Invalid fencing target in element %s", ID(match));
+
+ } else if (index <= 0) {
+ crm_err("Invalid level for %s in element %s", target, ID(match));
+
+ } else {
+ topology_remove_helper(target, index);
+ }
+ /* } else { Deal with modifications during the 'addition' stage */
+ }
+ }
+}
+
+static void
+update_fencing_topology(const char *event, xmlNode * msg)
+{
+ int format = 1;
+ const char *xpath;
+ xmlXPathObjectPtr xpathObj = NULL;
+ xmlNode *patchset = get_message_xml(msg, F_CIB_UPDATE_RESULT);
+
+ CRM_ASSERT(patchset);
+ crm_element_value_int(patchset, PCMK_XA_FORMAT, &format);
+
+ if(format == 1) {
+ /* Process deletions (only) */
+ xpath = "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_REMOVED "//" XML_TAG_FENCING_LEVEL;
+ xpathObj = xpath_search(msg, xpath);
+
+ remove_fencing_topology(xpathObj);
+ freeXpathObject(xpathObj);
+
+ /* Process additions and changes */
+ xpath = "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_ADDED "//" XML_TAG_FENCING_LEVEL;
+ xpathObj = xpath_search(msg, xpath);
+
+ register_fencing_topology(xpathObj);
+ freeXpathObject(xpathObj);
+
+ } else if(format == 2) {
+ xmlNode *change = NULL;
+ int add[] = { 0, 0, 0 };
+ int del[] = { 0, 0, 0 };
+
+ xml_patch_versions(patchset, add, del);
+
+ for (change = pcmk__xml_first_child(patchset); change != NULL;
+ change = pcmk__xml_next(change)) {
+ const char *op = crm_element_value(change, XML_DIFF_OP);
+ const char *xpath = crm_element_value(change, XML_DIFF_PATH);
+
+ if(op == NULL) {
+ continue;
+
+ } else if(strstr(xpath, "/" XML_TAG_FENCING_LEVEL) != NULL) {
+ /* Change to a specific entry */
+
+ crm_trace("Handling %s operation %d.%d.%d for %s", op, add[0], add[1], add[2], xpath);
+ if(strcmp(op, "move") == 0) {
+ continue;
+
+ } else if(strcmp(op, "create") == 0) {
+ add_topology_level(change->children);
+
+ } else if(strcmp(op, "modify") == 0) {
+ xmlNode *match = first_named_child(change, XML_DIFF_RESULT);
+
+ if(match) {
+ remove_topology_level(match->children);
+ add_topology_level(match->children);
+ }
+
+ } else if(strcmp(op, "delete") == 0) {
+ /* Nuclear option, all we have is the path and an id... not enough to remove a specific entry */
+ crm_info("Re-initializing fencing topology after %s operation %d.%d.%d for %s",
+ op, add[0], add[1], add[2], xpath);
+ fencing_topology_init();
+ return;
+ }
+
+ } else if (strstr(xpath, "/" XML_TAG_FENCING_TOPOLOGY) != NULL) {
+ /* Change to the topology in general */
+ crm_info("Re-initializing fencing topology after top-level %s operation %d.%d.%d for %s",
+ op, add[0], add[1], add[2], xpath);
+ fencing_topology_init();
+ return;
+
+ } else if (strstr(xpath, "/" XML_CIB_TAG_CONFIGURATION)) {
+ /* Changes to the whole config section, possibly including the topology as a whild */
+ if(first_named_child(change, XML_TAG_FENCING_TOPOLOGY) == NULL) {
+ crm_trace("Nothing for us in %s operation %d.%d.%d for %s.",
+ op, add[0], add[1], add[2], xpath);
+
+ } else if(strcmp(op, "delete") == 0 || strcmp(op, "create") == 0) {
+ crm_info("Re-initializing fencing topology after top-level %s operation %d.%d.%d for %s.",
+ op, add[0], add[1], add[2], xpath);
+ fencing_topology_init();
+ return;
+ }
+
+ } else {
+ crm_trace("Nothing for us in %s operation %d.%d.%d for %s",
+ op, add[0], add[1], add[2], xpath);
+ }
+ }
+
+ } else {
+ crm_warn("Unknown patch format: %d", format);
+ }
+}
+
+static void
+update_cib_cache_cb(const char *event, xmlNode * msg)
+{
+ long timeout_ms_saved = stonith_watchdog_timeout_ms;
+ bool need_full_refresh = false;
+
+ if(!have_cib_devices) {
+ crm_trace("Skipping updates until we get a full dump");
+ return;
+
+ } else if(msg == NULL) {
+ crm_trace("Missing %s update", event);
+ return;
+ }
+
+ /* Maintain a local copy of the CIB so that we have full access
+ * to device definitions, location constraints, and node attributes
+ */
+ if (local_cib != NULL) {
+ int rc = pcmk_ok;
+ xmlNode *patchset = NULL;
+
+ crm_element_value_int(msg, F_CIB_RC, &rc);
+ if (rc != pcmk_ok) {
+ return;
+ }
+
+ patchset = get_message_xml(msg, F_CIB_UPDATE_RESULT);
+ rc = xml_apply_patchset(local_cib, patchset, TRUE);
+ switch (rc) {
+ case pcmk_ok:
+ case -pcmk_err_old_data:
+ break;
+ case -pcmk_err_diff_resync:
+ case -pcmk_err_diff_failed:
+ crm_notice("[%s] Patch aborted: %s (%d)", event, pcmk_strerror(rc), rc);
+ free_xml(local_cib);
+ local_cib = NULL;
+ break;
+ default:
+ crm_warn("[%s] ABORTED: %s (%d)", event, pcmk_strerror(rc), rc);
+ free_xml(local_cib);
+ local_cib = NULL;
+ }
+ }
+
+ if (local_cib == NULL) {
+ if (fenced_query_cib() != pcmk_rc_ok) {
+ return;
+ }
+ need_full_refresh = true;
+ }
+
+ pcmk__refresh_node_caches_from_cib(local_cib);
+ update_stonith_watchdog_timeout_ms(local_cib);
+
+ if (timeout_ms_saved != stonith_watchdog_timeout_ms) {
+ need_full_refresh = true;
+ }
+
+ if (need_full_refresh) {
+ fencing_topology_init();
+ cib_devices_update();
+ } else {
+ // Partial refresh
+ update_fencing_topology(event, msg);
+ update_cib_stonith_devices(event, msg);
+ }
+
+ watchdog_device_update();
+}
+
+static void
+init_cib_cache_cb(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data)
+{
+ crm_info("Updating device list from CIB");
+ have_cib_devices = TRUE;
+ local_cib = copy_xml(output);
+
+ pcmk__refresh_node_caches_from_cib(local_cib);
+ update_stonith_watchdog_timeout_ms(local_cib);
+
+ fencing_topology_init();
+ cib_devices_update();
+ watchdog_device_update();
+}
+
+static void
+cib_connection_destroy(gpointer user_data)
+{
+ if (stonith_shutdown_flag) {
+ crm_info("Connection to the CIB manager closed");
+ return;
+ } else {
+ crm_crit("Lost connection to the CIB manager, shutting down");
+ }
+ if (cib_api) {
+ cib_api->cmds->signoff(cib_api);
+ }
+ stonith_shutdown(0);
+}
+
+/*!
+ * \internal
+ * \brief Disconnect from CIB manager
+ */
+void
+fenced_cib_cleanup(void)
+{
+ if (cib_api != NULL) {
+ cib_api->cmds->del_notify_callback(cib_api, T_CIB_DIFF_NOTIFY,
+ update_cib_cache_cb);
+ cib__clean_up_connection(&cib_api);
+ }
+ free_xml(local_cib);
+ local_cib = NULL;
+}
+
+void
+setup_cib(void)
+{
+ int rc, retries = 0;
+
+ cib_api = cib_new();
+ if (cib_api == NULL) {
+ crm_err("No connection to the CIB manager");
+ return;
+ }
+
+ do {
+ sleep(retries);
+ rc = cib_api->cmds->signon(cib_api, CRM_SYSTEM_STONITHD, cib_command);
+ } while (rc == -ENOTCONN && ++retries < 5);
+
+ if (rc != pcmk_ok) {
+ crm_err("Could not connect to the CIB manager: %s (%d)", pcmk_strerror(rc), rc);
+
+ } else if (pcmk_ok !=
+ cib_api->cmds->add_notify_callback(cib_api, T_CIB_DIFF_NOTIFY, update_cib_cache_cb)) {
+ crm_err("Could not set CIB notification callback");
+
+ } else {
+ rc = cib_api->cmds->query(cib_api, NULL, NULL, cib_scope_local);
+ cib_api->cmds->register_callback(cib_api, rc, 120, FALSE, NULL, "init_cib_cache_cb",
+ init_cib_cache_cb);
+ cib_api->cmds->set_connection_dnotify(cib_api, cib_connection_destroy);
+ crm_info("Watching for fencing topology changes");
+ }
+}
diff --git a/daemons/fenced/fenced_commands.c b/daemons/fenced/fenced_commands.c
index ba63cf8..7a62ed6 100644
--- a/daemons/fenced/fenced_commands.c
+++ b/daemons/fenced/fenced_commands.c
@@ -68,8 +68,6 @@ struct device_search_s {
static gboolean stonith_device_dispatch(gpointer user_data);
static void st_child_done(int pid, const pcmk__action_result_t *result,
void *user_data);
-static void stonith_send_reply(xmlNode * reply, int call_options, const char *remote_peer,
- pcmk__client_t *client);
static void search_devices_record_result(struct device_search_s *search, const char *device,
gboolean can_fence);
@@ -124,7 +122,7 @@ static gboolean
is_action_required(const char *action, const stonith_device_t *device)
{
return (device != NULL) && device->automatic_unfencing
- && pcmk__str_eq(action, "on", pcmk__str_none);
+ && pcmk__str_eq(action, PCMK_ACTION_ON, pcmk__str_none);
}
static int
@@ -223,11 +221,11 @@ get_action_timeout(const stonith_device_t *device, const char *action,
/* If "reboot" was requested but the device does not support it,
* we will remap to "off", so check timeout for "off" instead
*/
- if (pcmk__str_eq(action, "reboot", pcmk__str_none)
+ if (pcmk__str_eq(action, PCMK_ACTION_REBOOT, pcmk__str_none)
&& !pcmk_is_set(device->flags, st_device_supports_reboot)) {
crm_trace("%s doesn't support reboot, using timeout for off instead",
device->id);
- action = "off";
+ action = PCMK_ACTION_OFF;
}
/* If the device config specified an action-specific timeout, use it */
@@ -277,7 +275,7 @@ fenced_device_reboot_action(const char *device_id)
action = g_hash_table_lookup(device->params, "pcmk_reboot_action");
}
}
- return pcmk__s(action, "reboot");
+ return pcmk__s(action, PCMK_ACTION_REBOOT);
}
/*!
@@ -554,7 +552,7 @@ stonith_device_execute(stonith_device_t * device)
#if SUPPORT_CIBSECRETS
exec_rc = pcmk__substitute_secrets(device->id, device->params);
if (exec_rc != pcmk_rc_ok) {
- if (pcmk__str_eq(cmd->action, "stop", pcmk__str_none)) {
+ if (pcmk__str_eq(cmd->action, PCMK_ACTION_STOP, pcmk__str_none)) {
crm_info("Proceeding with stop operation for %s "
"despite being unable to load CIB secrets (%s)",
device->id, pcmk_rc_str(exec_rc));
@@ -570,14 +568,14 @@ stonith_device_execute(stonith_device_t * device)
#endif
action_str = cmd->action;
- if (pcmk__str_eq(cmd->action, "reboot", pcmk__str_none)
+ if (pcmk__str_eq(cmd->action, PCMK_ACTION_REBOOT, pcmk__str_none)
&& !pcmk_is_set(device->flags, st_device_supports_reboot)) {
crm_notice("Remapping 'reboot' action%s%s using %s to 'off' "
"because agent '%s' does not support reboot",
((cmd->target == NULL)? "" : " targeting "),
pcmk__s(cmd->target, ""), device->id, device->agent);
- action_str = "off";
+ action_str = PCMK_ACTION_OFF;
}
if (pcmk_is_set(device->flags, st_device_supports_parameter_port)) {
@@ -691,7 +689,7 @@ schedule_stonith_command(async_command_t * cmd, stonith_device_t * device)
delay_base = delay_max;
}
if (delay_max > 0) {
- // coverity[dont_call] We're not using rand() for security
+ // coverity[dontcall] It doesn't matter here if rand() is predictable
cmd->start_delay +=
((delay_max != delay_base)?(rand() % (delay_max - delay_base)):0)
+ delay_base;
@@ -948,16 +946,16 @@ read_action_metadata(stonith_device_t *device)
action = crm_element_value(match, "name");
- if (pcmk__str_eq(action, "list", pcmk__str_none)) {
+ if (pcmk__str_eq(action, PCMK_ACTION_LIST, pcmk__str_none)) {
stonith__set_device_flags(device->flags, device->id,
st_device_supports_list);
- } else if (pcmk__str_eq(action, "status", pcmk__str_none)) {
+ } else if (pcmk__str_eq(action, PCMK_ACTION_STATUS, pcmk__str_none)) {
stonith__set_device_flags(device->flags, device->id,
st_device_supports_status);
- } else if (pcmk__str_eq(action, "reboot", pcmk__str_none)) {
+ } else if (pcmk__str_eq(action, PCMK_ACTION_REBOOT, pcmk__str_none)) {
stonith__set_device_flags(device->flags, device->id,
st_device_supports_reboot);
- } else if (pcmk__str_eq(action, "on", pcmk__str_none)) {
+ } else if (pcmk__str_eq(action, PCMK_ACTION_ON, pcmk__str_none)) {
/* "automatic" means the cluster will unfence node when it joins */
/* "required" is a deprecated synonym for "automatic" */
if (pcmk__xe_attr_is_true(match, "automatic") || pcmk__xe_attr_is_true(match, "required")) {
@@ -1024,16 +1022,16 @@ xml2device_params(const char *name, const xmlNode *dev)
if (*value == '\0') {
crm_warn("Ignoring empty '%s' parameter", STONITH_ATTR_ACTION_OP);
- } else if (strcmp(value, "reboot") == 0) {
+ } else if (strcmp(value, PCMK_ACTION_REBOOT) == 0) {
crm_warn("Ignoring %s='reboot' (see stonith-action cluster property instead)",
STONITH_ATTR_ACTION_OP);
- } else if (strcmp(value, "off") == 0) {
- map_action(params, "reboot", value);
+ } else if (strcmp(value, PCMK_ACTION_OFF) == 0) {
+ map_action(params, PCMK_ACTION_REBOOT, value);
} else {
- map_action(params, "off", value);
- map_action(params, "reboot", value);
+ map_action(params, PCMK_ACTION_OFF, value);
+ map_action(params, PCMK_ACTION_REBOOT, value);
}
g_hash_table_remove(params, STONITH_ATTR_ACTION_OP);
@@ -1132,7 +1130,7 @@ build_device_from_xml(xmlNode *dev)
device->automatic_unfencing = TRUE;
}
- if (is_action_required("on", device)) {
+ if (is_action_required(PCMK_ACTION_ON, device)) {
crm_info("Fencing device '%s' requires unfencing", device->id);
}
@@ -1672,8 +1670,7 @@ unpack_level_request(xmlNode *xml, enum fenced_target_by *mode, char **target,
* search by xpath, because it might give multiple hits if the XML is the
* CIB.
*/
- if ((xml != NULL)
- && !pcmk__str_eq(TYPE(xml), XML_TAG_FENCING_LEVEL, pcmk__str_none)) {
+ if ((xml != NULL) && !pcmk__xe_is(xml, XML_TAG_FENCING_LEVEL)) {
xml = get_xpath_object("//" XML_TAG_FENCING_LEVEL, xml, LOG_WARNING);
}
@@ -1972,7 +1969,7 @@ execute_agent_action(xmlNode *msg, pcmk__action_result_t *result)
"Watchdog fence device not configured");
return;
- } else if (pcmk__str_eq(action, "list", pcmk__str_none)) {
+ } else if (pcmk__str_eq(action, PCMK_ACTION_LIST, pcmk__str_none)) {
pcmk__set_result(result, CRM_EX_OK, PCMK_EXEC_DONE, NULL);
pcmk__set_result_output(result,
list_to_string(stonith_watchdog_targets,
@@ -1980,7 +1977,7 @@ execute_agent_action(xmlNode *msg, pcmk__action_result_t *result)
NULL);
return;
- } else if (pcmk__str_eq(action, "monitor", pcmk__str_none)) {
+ } else if (pcmk__str_eq(action, PCMK_ACTION_MONITOR, pcmk__str_none)) {
pcmk__set_result(result, CRM_EX_OK, PCMK_EXEC_DONE, NULL);
return;
}
@@ -1994,7 +1991,8 @@ execute_agent_action(xmlNode *msg, pcmk__action_result_t *result)
"'%s' not found", id);
return;
- } else if (!device->api_registered && !strcmp(action, "monitor")) {
+ } else if (!device->api_registered
+ && (strcmp(action, PCMK_ACTION_MONITOR) == 0)) {
// Monitors may run only on "started" (API-registered) devices
crm_info("Ignoring API '%s' action request because device %s not active",
action, id);
@@ -2104,14 +2102,14 @@ localhost_is_eligible_with_remap(const stonith_device_t *device,
// Check potential remaps
- if (pcmk__str_eq(action, "reboot", pcmk__str_none)) {
+ if (pcmk__str_eq(action, PCMK_ACTION_REBOOT, pcmk__str_none)) {
/* "reboot" might get remapped to "off" then "on", so even if reboot is
* disallowed, return true if either of those is allowed. We'll report
* the disallowed actions with the results. We never allow self-fencing
* for remapped "on" actions because the target is off at that point.
*/
- if (localhost_is_eligible(device, "off", target, allow_self)
- || localhost_is_eligible(device, "on", target, FALSE)) {
+ if (localhost_is_eligible(device, PCMK_ACTION_OFF, target, allow_self)
+ || localhost_is_eligible(device, PCMK_ACTION_ON, target, FALSE)) {
return true;
}
}
@@ -2146,7 +2144,7 @@ can_fence_host_with_device(stonith_device_t *dev,
/* Answer immediately if the device does not support the action
* or the local node is not allowed to perform it
*/
- if (pcmk__str_eq(action, "on", pcmk__str_none)
+ if (pcmk__str_eq(action, PCMK_ACTION_ON, pcmk__str_none)
&& !pcmk_is_set(dev->flags, st_device_supports_on)) {
check_type = "Agent does not support 'on'";
goto search_report_results;
@@ -2175,7 +2173,8 @@ can_fence_host_with_device(stonith_device_t *dev,
time_t now = time(NULL);
if (dev->targets == NULL || dev->targets_age + 60 < now) {
- int device_timeout = get_action_timeout(dev, "list", search->per_device_timeout);
+ int device_timeout = get_action_timeout(dev, PCMK_ACTION_LIST,
+ search->per_device_timeout);
if (device_timeout > search->per_device_timeout) {
crm_notice("Since the pcmk_list_timeout(%ds) parameter of %s is larger than stonith-timeout(%ds), timeout may occur",
@@ -2185,7 +2184,7 @@ can_fence_host_with_device(stonith_device_t *dev,
crm_trace("Running '%s' to check whether %s is eligible to fence %s (%s)",
check_type, dev_id, target, action);
- schedule_internal_command(__func__, dev, "list", NULL,
+ schedule_internal_command(__func__, dev, PCMK_ACTION_LIST, NULL,
search->per_device_timeout, search, dynamic_list_search_cb);
/* we'll respond to this search request async in the cb */
@@ -2207,7 +2206,7 @@ can_fence_host_with_device(stonith_device_t *dev,
crm_trace("Running '%s' to check whether %s is eligible to fence %s (%s)",
check_type, dev_id, target, action);
- schedule_internal_command(__func__, dev, "status", target,
+ schedule_internal_command(__func__, dev, PCMK_ACTION_STATUS, target,
search->per_device_timeout, search, status_search_cb);
/* we'll respond to this search request async in the cb */
return;
@@ -2384,6 +2383,30 @@ add_action_reply(xmlNode *xml, const char *action,
add_disallowed(child, action, device, target, allow_suicide);
}
+/*!
+ * \internal
+ * \brief Send a reply to a CPG peer or IPC client
+ *
+ * \param[in] reply XML reply to send
+ * \param[in] call_options Send synchronously if st_opt_sync_call is set
+ * \param[in] remote_peer If not NULL, name of peer node to send CPG reply
+ * \param[in,out] client If not NULL, client to send IPC reply
+ */
+static void
+stonith_send_reply(const xmlNode *reply, int call_options,
+ const char *remote_peer, pcmk__client_t *client)
+{
+ CRM_CHECK((reply != NULL) && ((remote_peer != NULL) || (client != NULL)),
+ return);
+
+ if (remote_peer == NULL) {
+ do_local_reply(reply, client, call_options);
+ } else {
+ send_cluster_message(crm_get_peer(0, remote_peer), crm_msg_stonith_ng,
+ reply, FALSE);
+ }
+}
+
static void
stonith_query_capable_device_cb(GList * devices, void *user_data)
{
@@ -2429,15 +2452,16 @@ stonith_query_capable_device_cb(GList * devices, void *user_data)
* capable device that doesn't support "reboot", remap to "off" instead.
*/
if (!pcmk_is_set(device->flags, st_device_supports_reboot)
- && pcmk__str_eq(query->action, "reboot", pcmk__str_none)) {
+ && pcmk__str_eq(query->action, PCMK_ACTION_REBOOT,
+ pcmk__str_none)) {
crm_trace("%s doesn't support reboot, using values for off instead",
device->id);
- action = "off";
+ action = PCMK_ACTION_OFF;
}
/* Add action-specific values if available */
add_action_specific_attributes(dev, action, device, query->target);
- if (pcmk__str_eq(query->action, "reboot", pcmk__str_none)) {
+ if (pcmk__str_eq(query->action, PCMK_ACTION_REBOOT, pcmk__str_none)) {
/* A "reboot" *might* get remapped to "off" then "on", so after
* sending the "reboot"-specific values in the main element, we add
* sub-elements for "off" and "on" values.
@@ -2451,9 +2475,9 @@ stonith_query_capable_device_cb(GList * devices, void *user_data)
*/
add_disallowed(dev, action, device, query->target,
pcmk_is_set(query->call_options, st_opt_allow_suicide));
- add_action_reply(dev, "off", device, query->target,
+ add_action_reply(dev, PCMK_ACTION_OFF, device, query->target,
pcmk_is_set(query->call_options, st_opt_allow_suicide));
- add_action_reply(dev, "on", device, query->target, FALSE);
+ add_action_reply(dev, PCMK_ACTION_ON, device, query->target, FALSE);
}
/* A query without a target wants device parameters */
@@ -2765,8 +2789,10 @@ st_child_done(int pid, const pcmk__action_result_t *result, void *user_data)
/* The device is ready to do something else now */
if (device) {
- if (!device->verified && pcmk__result_ok(result) &&
- (pcmk__strcase_any_of(cmd->action, "list", "monitor", "status", NULL))) {
+ if (!device->verified && pcmk__result_ok(result)
+ && pcmk__strcase_any_of(cmd->action, PCMK_ACTION_LIST,
+ PCMK_ACTION_MONITOR, PCMK_ACTION_STATUS,
+ NULL)) {
device->verified = TRUE;
}
@@ -3052,30 +3078,6 @@ check_alternate_host(const char *target)
return NULL;
}
-/*!
- * \internal
- * \brief Send a reply to a CPG peer or IPC client
- *
- * \param[in] reply XML reply to send
- * \param[in] call_options Send synchronously if st_opt_sync_call is set
- * \param[in] remote_peer If not NULL, name of peer node to send CPG reply
- * \param[in,out] client If not NULL, client to send IPC reply
- */
-static void
-stonith_send_reply(xmlNode *reply, int call_options, const char *remote_peer,
- pcmk__client_t *client)
-{
- CRM_CHECK((reply != NULL) && ((remote_peer != NULL) || (client != NULL)),
- return);
-
- if (remote_peer == NULL) {
- do_local_reply(reply, client, call_options);
- } else {
- send_cluster_message(crm_get_peer(0, remote_peer), crm_msg_stonith_ng,
- reply, FALSE);
- }
-}
-
static void
remove_relay_op(xmlNode * request)
{
diff --git a/daemons/fenced/fenced_remote.c b/daemons/fenced/fenced_remote.c
index dc67947..843b3d4 100644
--- a/daemons/fenced/fenced_remote.c
+++ b/daemons/fenced/fenced_remote.c
@@ -292,7 +292,7 @@ init_stonith_remote_op_hash_table(GHashTable **table)
static const char *
op_requested_action(const remote_fencing_op_t *op)
{
- return ((op->phase > st_phase_requested)? "reboot" : op->action);
+ return ((op->phase > st_phase_requested)? PCMK_ACTION_REBOOT : op->action);
}
/*!
@@ -311,7 +311,7 @@ op_phase_off(remote_fencing_op_t *op)
/* Happily, "off" and "on" are shorter than "reboot", so we can reuse the
* memory allocation at each phase.
*/
- strcpy(op->action, "off");
+ strcpy(op->action, PCMK_ACTION_OFF);
}
/*!
@@ -329,7 +329,7 @@ op_phase_on(remote_fencing_op_t *op)
"remapping to 'on' for %s " CRM_XS " id=%.8s",
op->target, op->client_name, op->id);
op->phase = st_phase_on;
- strcpy(op->action, "on");
+ strcpy(op->action, PCMK_ACTION_ON);
/* Skip devices with automatic unfencing, because the cluster will handle it
* when the node rejoins.
@@ -362,7 +362,7 @@ undo_op_remap(remote_fencing_op_t *op)
crm_info("Undoing remap of reboot targeting %s for %s "
CRM_XS " id=%.8s", op->target, op->client_name, op->id);
op->phase = st_phase_requested;
- strcpy(op->action, "reboot");
+ strcpy(op->action, PCMK_ACTION_REBOOT);
}
}
@@ -673,8 +673,8 @@ remote_op_timeout_one(gpointer userdata)
"Peer did not return fence result within timeout");
// The requested delay has been applied for the first device
- if (op->delay > 0) {
- op->delay = 0;
+ if (op->client_delay > 0) {
+ op->client_delay = 0;
crm_trace("Try another device for '%s' action targeting %s "
"for client %s without delay " CRM_XS " id=%.8s",
op->action, op->target, op->client_name, op->id);
@@ -961,12 +961,12 @@ advance_topology_level(remote_fencing_op_t *op, bool empty_ok)
set_op_device_list(op, tp->levels[op->level]);
// The requested delay has been applied for the first fencing level
- if (op->level > 1 && op->delay > 0) {
- op->delay = 0;
+ if ((op->level > 1) && (op->client_delay > 0)) {
+ op->client_delay = 0;
}
if ((g_list_next(op->devices_list) != NULL)
- && pcmk__str_eq(op->action, "reboot", pcmk__str_none)) {
+ && pcmk__str_eq(op->action, PCMK_ACTION_REBOOT, pcmk__str_none)) {
/* A reboot has been requested for a topology level with multiple
* devices. Instead of rebooting the devices sequentially, we will
* turn them all off, then turn them all on again. (Think about
@@ -1163,7 +1163,7 @@ create_remote_stonith_op(const char *client, xmlNode *request, gboolean peer)
crm_element_value_int(request, F_STONITH_TIMEOUT, &(op->base_timeout));
// Value -1 means disable any static/random fencing delays
- crm_element_value_int(request, F_STONITH_DELAY, &(op->delay));
+ crm_element_value_int(request, F_STONITH_DELAY, &(op->client_delay));
if (peer && dev) {
op->id = crm_element_value_copy(dev, F_STONITH_REMOTE_OP_ID);
@@ -1474,8 +1474,8 @@ get_device_timeout(const remote_fencing_op_t *op,
return op->base_timeout;
}
- // op->delay < 0 means disable any static/random fencing delays
- if (with_delay && op->delay >= 0) {
+ // op->client_delay < 0 means disable any static/random fencing delays
+ if (with_delay && (op->client_delay >= 0)) {
// delay_base is eventually limited by delay_max
delay = (props->delay_max[op->phase] > 0 ?
props->delay_max[op->phase] : props->delay_base[op->phase]);
@@ -1541,7 +1541,7 @@ get_op_total_timeout(const remote_fencing_op_t *op,
GList *iter = NULL;
GList *auto_list = NULL;
- if (pcmk__str_eq(op->action, "on", pcmk__str_none)
+ if (pcmk__str_eq(op->action, PCMK_ACTION_ON, pcmk__str_none)
&& (op->automatic_list != NULL)) {
auto_list = g_list_copy(op->automatic_list);
}
@@ -1620,7 +1620,7 @@ get_op_total_timeout(const remote_fencing_op_t *op,
* up the total timeout.
*/
return ((total_timeout ? total_timeout : op->base_timeout)
- + (op->delay > 0 ? op->delay : 0));
+ + ((op->client_delay > 0)? op->client_delay : 0));
}
static void
@@ -1695,7 +1695,7 @@ advance_topology_device_in_level(remote_fencing_op_t *op, const char *device,
/* Handle automatic unfencing if an "on" action was requested */
if ((op->phase == st_phase_requested)
- && pcmk__str_eq(op->action, "on", pcmk__str_none)) {
+ && pcmk__str_eq(op->action, PCMK_ACTION_ON, pcmk__str_none)) {
/* If the device we just executed was required, it's not anymore */
remove_required_device(op, device);
@@ -1724,8 +1724,8 @@ advance_topology_device_in_level(remote_fencing_op_t *op, const char *device,
op->target, op->client_name, op->originator);
// The requested delay has been applied for the first device
- if (op->delay > 0) {
- op->delay = 0;
+ if (op->client_delay > 0) {
+ op->client_delay = 0;
}
request_peer_fencing(op, NULL);
@@ -1794,7 +1794,7 @@ request_peer_fencing(remote_fencing_op_t *op, peer_device_info_t *peer)
* node back on when we should.
*/
device = op->devices->data;
- if (pcmk__str_eq(fenced_device_reboot_action(device), "off",
+ if (pcmk__str_eq(fenced_device_reboot_action(device), PCMK_ACTION_OFF,
pcmk__str_none)) {
crm_info("Not turning %s back on using %s because the device is "
"configured to stay off (pcmk_reboot_action='off')",
@@ -1844,13 +1844,16 @@ request_peer_fencing(remote_fencing_op_t *op, peer_device_info_t *peer)
}
if (peer) {
- /* Take any requested fencing delay into account to prevent it from eating
- * up the timeout.
- */
- int timeout_one = (op->delay > 0 ?
- TIMEOUT_MULTIPLY_FACTOR * op->delay : 0);
+ int timeout_one = 0;
xmlNode *remote_op = stonith_create_op(op->client_callid, op->id, STONITH_OP_FENCE, NULL, 0);
+ if (op->client_delay > 0) {
+ /* Take requested fencing delay into account to prevent it from
+ * eating up the timeout.
+ */
+ timeout_one = TIMEOUT_MULTIPLY_FACTOR * op->client_delay;
+ }
+
crm_xml_add(remote_op, F_STONITH_REMOTE_OP_ID, op->id);
crm_xml_add(remote_op, F_STONITH_TARGET, op->target);
crm_xml_add(remote_op, F_STONITH_ACTION, op->action);
@@ -1859,7 +1862,7 @@ request_peer_fencing(remote_fencing_op_t *op, peer_device_info_t *peer)
crm_xml_add(remote_op, F_STONITH_CLIENTNAME, op->client_name);
crm_xml_add_int(remote_op, F_STONITH_TIMEOUT, timeout);
crm_xml_add_int(remote_op, F_STONITH_CALLOPTS, op->call_options);
- crm_xml_add_int(remote_op, F_STONITH_DELAY, op->delay);
+ crm_xml_add_int(remote_op, F_STONITH_DELAY, op->client_delay);
if (device) {
timeout_one += TIMEOUT_MULTIPLY_FACTOR *
@@ -2097,7 +2100,7 @@ parse_action_specific(const xmlNode *xml, const char *peer, const char *device,
}
/* Handle devices with automatic unfencing */
- if (pcmk__str_eq(action, "on", pcmk__str_none)) {
+ if (pcmk__str_eq(action, PCMK_ACTION_ON, pcmk__str_none)) {
int required = 0;
crm_element_value_int(xml, F_STONITH_DEVICE_REQUIRED, &required);
@@ -2160,11 +2163,11 @@ add_device_properties(const xmlNode *xml, remote_fencing_op_t *op,
* values for "off" and "on" in child elements, just in case the reboot
* winds up getting remapped.
*/
- if (pcmk__str_eq(ID(child), "off", pcmk__str_none)) {
- parse_action_specific(child, peer->host, device, "off",
+ if (pcmk__str_eq(ID(child), PCMK_ACTION_OFF, pcmk__str_none)) {
+ parse_action_specific(child, peer->host, device, PCMK_ACTION_OFF,
op, st_phase_off, props);
- } else if (pcmk__str_eq(ID(child), "on", pcmk__str_none)) {
- parse_action_specific(child, peer->host, device, "on",
+ } else if (pcmk__str_eq(ID(child), PCMK_ACTION_ON, pcmk__str_none)) {
+ parse_action_specific(child, peer->host, device, PCMK_ACTION_ON,
op, st_phase_on, props);
}
}
diff --git a/daemons/fenced/fenced_scheduler.c b/daemons/fenced/fenced_scheduler.c
new file mode 100644
index 0000000..27d990f
--- /dev/null
+++ b/daemons/fenced/fenced_scheduler.c
@@ -0,0 +1,225 @@
+/*
+ * Copyright 2009-2023 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU General Public License version 2
+ * or later (GPLv2+) WITHOUT ANY WARRANTY.
+*/
+
+#include <crm_internal.h>
+
+#include <stdio.h>
+#include <errno.h>
+#include <glib.h>
+
+#include <crm/pengine/status.h>
+#include <crm/pengine/internal.h>
+
+#include <pacemaker-internal.h>
+#include <pacemaker-fenced.h>
+
+static pcmk_scheduler_t *scheduler = NULL;
+
+/*!
+ * \internal
+ * \brief Initialize scheduler data for fencer purposes
+ *
+ * \return Standard Pacemaker return code
+ */
+int
+fenced_scheduler_init(void)
+{
+ pcmk__output_t *logger = NULL;
+ int rc = pcmk__log_output_new(&logger);
+
+ if (rc != pcmk_rc_ok) {
+ return rc;
+ }
+
+ scheduler = pe_new_working_set();
+ if (scheduler == NULL) {
+ pcmk__output_free(logger);
+ return ENOMEM;
+ }
+
+ pe__register_messages(logger);
+ pcmk__register_lib_messages(logger);
+ pcmk__output_set_log_level(logger, LOG_TRACE);
+ scheduler->priv = logger;
+
+ return pcmk_rc_ok;
+}
+
+/*!
+ * \internal
+ * \brief Free all scheduler-related resources
+ */
+void
+fenced_scheduler_cleanup(void)
+{
+ if (scheduler != NULL) {
+ pcmk__output_t *logger = scheduler->priv;
+
+ if (logger != NULL) {
+ logger->finish(logger, CRM_EX_OK, true, NULL);
+ pcmk__output_free(logger);
+ scheduler->priv = NULL;
+ }
+ pe_free_working_set(scheduler);
+ scheduler = NULL;
+ }
+}
+
+/*!
+ * \internal
+ * \brief Check whether the local node is in a resource's allowed node list
+ *
+ * \param[in] rsc Resource to check
+ *
+ * \return Pointer to node if found, otherwise NULL
+ */
+static pcmk_node_t *
+local_node_allowed_for(const pcmk_resource_t *rsc)
+{
+ if ((rsc != NULL) && (stonith_our_uname != NULL)) {
+ GHashTableIter iter;
+ pcmk_node_t *node = NULL;
+
+ g_hash_table_iter_init(&iter, rsc->allowed_nodes);
+ while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
+ if (pcmk__str_eq(node->details->uname, stonith_our_uname,
+ pcmk__str_casei)) {
+ return node;
+ }
+ }
+ }
+ return NULL;
+}
+
+/*!
+ * \internal
+ * \brief If a given resource or any of its children are fencing devices,
+ * register the devices
+ *
+ * \param[in,out] data Resource to check
+ * \param[in,out] user_data Ignored
+ */
+static void
+register_if_fencing_device(gpointer data, gpointer user_data)
+{
+ pcmk_resource_t *rsc = data;
+
+ xmlNode *xml = NULL;
+ GHashTableIter hash_iter;
+ pcmk_node_t *node = NULL;
+ const char *name = NULL;
+ const char *value = NULL;
+ const char *rclass = NULL;
+ const char *agent = NULL;
+ const char *rsc_provides = NULL;
+ stonith_key_value_t *params = NULL;
+
+ // If this is a collective resource, check children instead
+ if (rsc->children != NULL) {
+ for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
+ register_if_fencing_device(iter->data, NULL);
+ if (pe_rsc_is_clone(rsc)) {
+ return; // Only one instance needs to be checked for clones
+ }
+ }
+ return;
+ }
+
+ rclass = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
+ if (!pcmk__str_eq(rclass, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) {
+ return; // Not a fencing device
+ }
+
+ if (pe__resource_is_disabled(rsc)) {
+ crm_info("Ignoring fencing device %s because it is disabled", rsc->id);
+ return;
+ }
+
+ if ((stonith_watchdog_timeout_ms <= 0) &&
+ pcmk__str_eq(rsc->id, STONITH_WATCHDOG_ID, pcmk__str_none)) {
+ crm_info("Ignoring fencing device %s "
+ "because watchdog fencing is disabled", rsc->id);
+ return;
+ }
+
+ // Check whether local node is allowed to run resource
+ node = local_node_allowed_for(rsc);
+ if (node == NULL) {
+ crm_info("Ignoring fencing device %s "
+ "because local node is not allowed to run it", rsc->id);
+ return;
+ }
+ if (node->weight < 0) {
+ crm_info("Ignoring fencing device %s "
+ "because local node has preference %s for it",
+ rsc->id, pcmk_readable_score(node->weight));
+ return;
+ }
+
+ // If device is in a group, check whether local node is allowed for group
+ if ((rsc->parent != NULL)
+ && (rsc->parent->variant == pcmk_rsc_variant_group)) {
+ pcmk_node_t *group_node = local_node_allowed_for(rsc->parent);
+
+ if ((group_node != NULL) && (group_node->weight < 0)) {
+ crm_info("Ignoring fencing device %s "
+ "because local node has preference %s for its group",
+ rsc->id, pcmk_readable_score(group_node->weight));
+ return;
+ }
+ }
+
+ crm_debug("Reloading configuration of fencing device %s", rsc->id);
+
+ agent = crm_element_value(rsc->xml, XML_EXPR_ATTR_TYPE);
+
+ get_meta_attributes(rsc->meta, rsc, node, scheduler);
+ rsc_provides = g_hash_table_lookup(rsc->meta, PCMK_STONITH_PROVIDES);
+
+ g_hash_table_iter_init(&hash_iter, pe_rsc_params(rsc, node, scheduler));
+ while (g_hash_table_iter_next(&hash_iter, (gpointer *) &name,
+ (gpointer *) &value)) {
+ if ((name == NULL) || (value == NULL)) {
+ continue;
+ }
+ params = stonith_key_value_add(params, name, value);
+ }
+
+ xml = create_device_registration_xml(pcmk__s(rsc->clone_name, rsc->id),
+ st_namespace_any, agent, params,
+ rsc_provides);
+ stonith_key_value_freeall(params, 1, 1);
+ CRM_ASSERT(stonith_device_register(xml, TRUE) == pcmk_ok);
+ free_xml(xml);
+}
+
+/*!
+ * \internal
+ * \brief Run the scheduler for fencer purposes
+ *
+ * \param[in] cib Cluster's current CIB
+ */
+void
+fenced_scheduler_run(xmlNode *cib)
+{
+ CRM_CHECK((cib != NULL) && (scheduler != NULL), return);
+
+ if (scheduler->now != NULL) {
+ crm_time_free(scheduler->now);
+ scheduler->now = NULL;
+ }
+ scheduler->localhost = stonith_our_uname;
+ pcmk__schedule_actions(cib, pcmk_sched_location_only
+ |pcmk_sched_no_compat
+ |pcmk_sched_no_counts, scheduler);
+ g_list_foreach(scheduler->resources, register_if_fencing_device, NULL);
+
+ scheduler->input = NULL; // Wasn't a copy, so don't let API free it
+ pe_reset_working_set(scheduler);
+}
diff --git a/daemons/fenced/pacemaker-fenced.c b/daemons/fenced/pacemaker-fenced.c
index 4edda6c..7c69fb8 100644
--- a/daemons/fenced/pacemaker-fenced.c
+++ b/daemons/fenced/pacemaker-fenced.c
@@ -27,7 +27,6 @@
#include <crm/common/ipc.h>
#include <crm/common/ipc_internal.h>
#include <crm/common/output_internal.h>
-#include <crm/cluster/internal.h>
#include <crm/stonith-ng.h>
#include <crm/fencing/internal.h>
@@ -37,8 +36,6 @@
#include <crm/common/mainloop.h>
#include <crm/cib/internal.h>
-#include <crm/pengine/status.h>
-#include <pacemaker-internal.h>
#include <pacemaker-fenced.h>
@@ -51,18 +48,9 @@ GList *stonith_watchdog_targets = NULL;
static GMainLoop *mainloop = NULL;
gboolean stand_alone = FALSE;
-static gboolean stonith_shutdown_flag = FALSE;
+gboolean stonith_shutdown_flag = FALSE;
static qb_ipcs_service_t *ipcs = NULL;
-static xmlNode *local_cib = NULL;
-static pe_working_set_t *fenced_data_set = NULL;
-static const unsigned long long data_set_flags = pe_flag_quick_location
- | pe_flag_no_compat
- | pe_flag_no_counts;
-
-static cib_t *cib_api = NULL;
-
-static pcmk__output_t *logger_out = NULL;
static pcmk__output_t *out = NULL;
pcmk__supported_format_t formats[] = {
@@ -77,9 +65,8 @@ static struct {
gchar **log_files;
} options;
-static crm_exit_t exit_code = CRM_EX_OK;
+crm_exit_t exit_code = CRM_EX_OK;
-static void stonith_shutdown(int nsig);
static void stonith_cleanup(void);
static int32_t
@@ -241,7 +228,8 @@ stonith_peer_cs_destroy(gpointer user_data)
#endif
void
-do_local_reply(xmlNode *notify_src, pcmk__client_t *client, int call_options)
+do_local_reply(const xmlNode *notify_src, pcmk__client_t *client,
+ int call_options)
{
/* send callback to originating child */
int local_rc = pcmk_rc_ok;
@@ -292,7 +280,7 @@ static void
stonith_notify_client(gpointer key, gpointer value, gpointer user_data)
{
- xmlNode *update_msg = user_data;
+ const xmlNode *update_msg = user_data;
pcmk__client_t *client = value;
const char *type = NULL;
@@ -443,589 +431,6 @@ fenced_send_level_notification(const char *op,
send_config_notification(op, result, desc, g_hash_table_size(topology));
}
-static void
-topology_remove_helper(const char *node, int level)
-{
- char *desc = NULL;
- pcmk__action_result_t result = PCMK__UNKNOWN_RESULT;
- xmlNode *data = create_xml_node(NULL, XML_TAG_FENCING_LEVEL);
-
- crm_xml_add(data, F_STONITH_ORIGIN, __func__);
- crm_xml_add_int(data, XML_ATTR_STONITH_INDEX, level);
- crm_xml_add(data, XML_ATTR_STONITH_TARGET, node);
-
- fenced_unregister_level(data, &desc, &result);
- fenced_send_level_notification(STONITH_OP_LEVEL_DEL, &result, desc);
- pcmk__reset_result(&result);
- free_xml(data);
- free(desc);
-}
-
-static void
-remove_cib_device(xmlXPathObjectPtr xpathObj)
-{
- int max = numXpathResults(xpathObj), lpc = 0;
-
- for (lpc = 0; lpc < max; lpc++) {
- const char *rsc_id = NULL;
- const char *standard = NULL;
- xmlNode *match = getXpathResult(xpathObj, lpc);
-
- CRM_LOG_ASSERT(match != NULL);
- if(match != NULL) {
- standard = crm_element_value(match, XML_AGENT_ATTR_CLASS);
- }
-
- if (!pcmk__str_eq(standard, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) {
- continue;
- }
-
- rsc_id = crm_element_value(match, XML_ATTR_ID);
-
- stonith_device_remove(rsc_id, true);
- }
-}
-
-static void
-remove_topology_level(xmlNode *match)
-{
- int index = 0;
- char *key = NULL;
-
- CRM_CHECK(match != NULL, return);
-
- key = stonith_level_key(match, fenced_target_by_unknown);
- crm_element_value_int(match, XML_ATTR_STONITH_INDEX, &index);
- topology_remove_helper(key, index);
- free(key);
-}
-
-static void
-add_topology_level(xmlNode *match)
-{
- char *desc = NULL;
- pcmk__action_result_t result = PCMK__UNKNOWN_RESULT;
-
- CRM_CHECK(match != NULL, return);
-
- fenced_register_level(match, &desc, &result);
- fenced_send_level_notification(STONITH_OP_LEVEL_ADD, &result, desc);
- pcmk__reset_result(&result);
- free(desc);
-}
-
-static void
-remove_fencing_topology(xmlXPathObjectPtr xpathObj)
-{
- int max = numXpathResults(xpathObj), lpc = 0;
-
- for (lpc = 0; lpc < max; lpc++) {
- xmlNode *match = getXpathResult(xpathObj, lpc);
-
- CRM_LOG_ASSERT(match != NULL);
- if (match && crm_element_value(match, XML_DIFF_MARKER)) {
- /* Deletion */
- int index = 0;
- char *target = stonith_level_key(match, fenced_target_by_unknown);
-
- crm_element_value_int(match, XML_ATTR_STONITH_INDEX, &index);
- if (target == NULL) {
- crm_err("Invalid fencing target in element %s", ID(match));
-
- } else if (index <= 0) {
- crm_err("Invalid level for %s in element %s", target, ID(match));
-
- } else {
- topology_remove_helper(target, index);
- }
- /* } else { Deal with modifications during the 'addition' stage */
- }
- }
-}
-
-static void
-register_fencing_topology(xmlXPathObjectPtr xpathObj)
-{
- int max = numXpathResults(xpathObj), lpc = 0;
-
- for (lpc = 0; lpc < max; lpc++) {
- xmlNode *match = getXpathResult(xpathObj, lpc);
-
- remove_topology_level(match);
- add_topology_level(match);
- }
-}
-
-/* Fencing
-<diff crm_feature_set="3.0.6">
- <diff-removed>
- <fencing-topology>
- <fencing-level id="f-p1.1" target="pcmk-1" index="1" devices="poison-pill" __crm_diff_marker__="removed:top"/>
- <fencing-level id="f-p1.2" target="pcmk-1" index="2" devices="power" __crm_diff_marker__="removed:top"/>
- <fencing-level devices="disk,network" id="f-p2.1"/>
- </fencing-topology>
- </diff-removed>
- <diff-added>
- <fencing-topology>
- <fencing-level id="f-p.1" target="pcmk-1" index="1" devices="poison-pill" __crm_diff_marker__="added:top"/>
- <fencing-level id="f-p2.1" target="pcmk-2" index="1" devices="disk,something"/>
- <fencing-level id="f-p3.1" target="pcmk-2" index="2" devices="power" __crm_diff_marker__="added:top"/>
- </fencing-topology>
- </diff-added>
-</diff>
-*/
-
-static void
-fencing_topology_init(void)
-{
- xmlXPathObjectPtr xpathObj = NULL;
- const char *xpath = "//" XML_TAG_FENCING_LEVEL;
-
- crm_trace("Full topology refresh");
- free_topology_list();
- init_topology_list();
-
- /* Grab everything */
- xpathObj = xpath_search(local_cib, xpath);
- register_fencing_topology(xpathObj);
-
- freeXpathObject(xpathObj);
-}
-
-#define rsc_name(x) x->clone_name?x->clone_name:x->id
-
-/*!
- * \internal
- * \brief Check whether our uname is in a resource's allowed node list
- *
- * \param[in] rsc Resource to check
- *
- * \return Pointer to node object if found, NULL otherwise
- */
-static pe_node_t *
-our_node_allowed_for(const pe_resource_t *rsc)
-{
- GHashTableIter iter;
- pe_node_t *node = NULL;
-
- if (rsc && stonith_our_uname) {
- g_hash_table_iter_init(&iter, rsc->allowed_nodes);
- while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
- if (node && strcmp(node->details->uname, stonith_our_uname) == 0) {
- break;
- }
- node = NULL;
- }
- }
- return node;
-}
-
-static void
-watchdog_device_update(void)
-{
- if (stonith_watchdog_timeout_ms > 0) {
- if (!g_hash_table_lookup(device_list, STONITH_WATCHDOG_ID) &&
- !stonith_watchdog_targets) {
- /* getting here watchdog-fencing enabled, no device there yet
- and reason isn't stonith_watchdog_targets preventing that
- */
- int rc;
- xmlNode *xml;
-
- xml = create_device_registration_xml(
- STONITH_WATCHDOG_ID,
- st_namespace_internal,
- STONITH_WATCHDOG_AGENT,
- NULL, /* stonith_device_register will add our
- own name as PCMK_STONITH_HOST_LIST param
- so we can skip that here
- */
- NULL);
- rc = stonith_device_register(xml, TRUE);
- free_xml(xml);
- if (rc != pcmk_ok) {
- rc = pcmk_legacy2rc(rc);
- exit_code = CRM_EX_FATAL;
- crm_crit("Cannot register watchdog pseudo fence agent: %s",
- pcmk_rc_str(rc));
- stonith_shutdown(0);
- }
- }
-
- } else if (g_hash_table_lookup(device_list, STONITH_WATCHDOG_ID) != NULL) {
- /* be silent if no device - todo parameter to stonith_device_remove */
- stonith_device_remove(STONITH_WATCHDOG_ID, true);
- }
-}
-
-static void
-update_stonith_watchdog_timeout_ms(xmlNode *cib)
-{
- long timeout_ms = 0;
- xmlNode *stonith_watchdog_xml = NULL;
- const char *value = NULL;
-
- stonith_watchdog_xml = get_xpath_object("//nvpair[@name='stonith-watchdog-timeout']",
- cib, LOG_NEVER);
- if (stonith_watchdog_xml) {
- value = crm_element_value(stonith_watchdog_xml, XML_NVPAIR_ATTR_VALUE);
- }
- if (value) {
- timeout_ms = crm_get_msec(value);
- }
-
- if (timeout_ms < 0) {
- timeout_ms = pcmk__auto_watchdog_timeout();
- }
-
- stonith_watchdog_timeout_ms = timeout_ms;
-}
-
-/*!
- * \internal
- * \brief If a resource or any of its children are STONITH devices, update their
- * definitions given a cluster working set.
- *
- * \param[in,out] rsc Resource to check
- * \param[in,out] data_set Cluster working set with device information
- */
-static void
-cib_device_update(pe_resource_t *rsc, pe_working_set_t *data_set)
-{
- pe_node_t *node = NULL;
- const char *value = NULL;
- const char *rclass = NULL;
- pe_node_t *parent = NULL;
-
- /* If this is a complex resource, check children rather than this resource itself. */
- if(rsc->children) {
- GList *gIter = NULL;
- for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
- cib_device_update(gIter->data, data_set);
- if(pe_rsc_is_clone(rsc)) {
- crm_trace("Only processing one copy of the clone %s", rsc->id);
- break;
- }
- }
- return;
- }
-
- /* We only care about STONITH resources. */
- rclass = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
- if (!pcmk__str_eq(rclass, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) {
- return;
- }
-
- /* If this STONITH resource is disabled, remove it. */
- if (pe__resource_is_disabled(rsc)) {
- crm_info("Device %s has been disabled", rsc->id);
- return;
- }
-
- /* if watchdog-fencing is disabled handle any watchdog-fence
- resource as if it was disabled
- */
- if ((stonith_watchdog_timeout_ms <= 0) &&
- pcmk__str_eq(rsc->id, STONITH_WATCHDOG_ID, pcmk__str_none)) {
- crm_info("Watchdog-fencing disabled thus handling "
- "device %s as disabled", rsc->id);
- return;
- }
-
- /* Check whether our node is allowed for this resource (and its parent if in a group) */
- node = our_node_allowed_for(rsc);
- if (rsc->parent && (rsc->parent->variant == pe_group)) {
- parent = our_node_allowed_for(rsc->parent);
- }
-
- if(node == NULL) {
- /* Our node is disallowed, so remove the device */
- GHashTableIter iter;
-
- crm_info("Device %s has been disabled on %s: unknown", rsc->id, stonith_our_uname);
- g_hash_table_iter_init(&iter, rsc->allowed_nodes);
- while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
- crm_trace("Available: %s = %d", pe__node_name(node), node->weight);
- }
-
- return;
-
- } else if(node->weight < 0 || (parent && parent->weight < 0)) {
- /* Our node (or its group) is disallowed by score, so remove the device */
- int score = (node->weight < 0)? node->weight : parent->weight;
-
- crm_info("Device %s has been disabled on %s: score=%s",
- rsc->id, stonith_our_uname, pcmk_readable_score(score));
- return;
-
- } else {
- /* Our node is allowed, so update the device information */
- int rc;
- xmlNode *data;
- GHashTable *rsc_params = NULL;
- GHashTableIter gIter;
- stonith_key_value_t *params = NULL;
-
- const char *name = NULL;
- const char *agent = crm_element_value(rsc->xml, XML_EXPR_ATTR_TYPE);
- const char *rsc_provides = NULL;
-
- crm_debug("Device %s is allowed on %s: score=%d", rsc->id, stonith_our_uname, node->weight);
- rsc_params = pe_rsc_params(rsc, node, data_set);
- get_meta_attributes(rsc->meta, rsc, node, data_set);
-
- rsc_provides = g_hash_table_lookup(rsc->meta, PCMK_STONITH_PROVIDES);
-
- g_hash_table_iter_init(&gIter, rsc_params);
- while (g_hash_table_iter_next(&gIter, (gpointer *) & name, (gpointer *) & value)) {
- if (!name || !value) {
- continue;
- }
- params = stonith_key_value_add(params, name, value);
- crm_trace(" %s=%s", name, value);
- }
-
- data = create_device_registration_xml(rsc_name(rsc), st_namespace_any,
- agent, params, rsc_provides);
- stonith_key_value_freeall(params, 1, 1);
- rc = stonith_device_register(data, TRUE);
- CRM_ASSERT(rc == pcmk_ok);
- free_xml(data);
- }
-}
-
-/*!
- * \internal
- * \brief Update all STONITH device definitions based on current CIB
- */
-static void
-cib_devices_update(void)
-{
- GHashTableIter iter;
- stonith_device_t *device = NULL;
-
- crm_info("Updating devices to version %s.%s.%s",
- crm_element_value(local_cib, XML_ATTR_GENERATION_ADMIN),
- crm_element_value(local_cib, XML_ATTR_GENERATION),
- crm_element_value(local_cib, XML_ATTR_NUMUPDATES));
-
- if (fenced_data_set->now != NULL) {
- crm_time_free(fenced_data_set->now);
- fenced_data_set->now = NULL;
- }
- fenced_data_set->localhost = stonith_our_uname;
- pcmk__schedule_actions(local_cib, data_set_flags, fenced_data_set);
-
- g_hash_table_iter_init(&iter, device_list);
- while (g_hash_table_iter_next(&iter, NULL, (void **)&device)) {
- if (device->cib_registered) {
- device->dirty = TRUE;
- }
- }
-
- /* have list repopulated if cib has a watchdog-fencing-resource
- TODO: keep a cached list for queries happening while we are refreshing
- */
- g_list_free_full(stonith_watchdog_targets, free);
- stonith_watchdog_targets = NULL;
- g_list_foreach(fenced_data_set->resources, (GFunc) cib_device_update, fenced_data_set);
-
- g_hash_table_iter_init(&iter, device_list);
- while (g_hash_table_iter_next(&iter, NULL, (void **)&device)) {
- if (device->dirty) {
- g_hash_table_iter_remove(&iter);
- }
- }
-
- fenced_data_set->input = NULL; // Wasn't a copy, so don't let API free it
- pe_reset_working_set(fenced_data_set);
-}
-
-static void
-update_cib_stonith_devices_v2(const char *event, xmlNode * msg)
-{
- xmlNode *change = NULL;
- char *reason = NULL;
- bool needs_update = FALSE;
- xmlNode *patchset = get_message_xml(msg, F_CIB_UPDATE_RESULT);
-
- for (change = pcmk__xml_first_child(patchset); change != NULL;
- change = pcmk__xml_next(change)) {
- const char *op = crm_element_value(change, XML_DIFF_OP);
- const char *xpath = crm_element_value(change, XML_DIFF_PATH);
- const char *shortpath = NULL;
-
- if ((op == NULL) ||
- (strcmp(op, "move") == 0) ||
- strstr(xpath, "/"XML_CIB_TAG_STATUS)) {
- continue;
- } else if (pcmk__str_eq(op, "delete", pcmk__str_casei) && strstr(xpath, "/"XML_CIB_TAG_RESOURCE)) {
- const char *rsc_id = NULL;
- char *search = NULL;
- char *mutable = NULL;
-
- if (strstr(xpath, XML_TAG_ATTR_SETS) ||
- strstr(xpath, XML_TAG_META_SETS)) {
- needs_update = TRUE;
- pcmk__str_update(&reason,
- "(meta) attribute deleted from resource");
- break;
- }
- pcmk__str_update(&mutable, xpath);
- rsc_id = strstr(mutable, "primitive[@" XML_ATTR_ID "=\'");
- if (rsc_id != NULL) {
- rsc_id += strlen("primitive[@" XML_ATTR_ID "=\'");
- search = strchr(rsc_id, '\'');
- }
- if (search != NULL) {
- *search = 0;
- stonith_device_remove(rsc_id, true);
- /* watchdog_device_update called afterwards
- to fall back to implicit definition if needed */
- } else {
- crm_warn("Ignoring malformed CIB update (resource deletion)");
- }
- free(mutable);
-
- } else if (strstr(xpath, "/"XML_CIB_TAG_RESOURCES) ||
- strstr(xpath, "/"XML_CIB_TAG_CONSTRAINTS) ||
- strstr(xpath, "/"XML_CIB_TAG_RSCCONFIG)) {
- shortpath = strrchr(xpath, '/'); CRM_ASSERT(shortpath);
- reason = crm_strdup_printf("%s %s", op, shortpath+1);
- needs_update = TRUE;
- break;
- }
- }
-
- if(needs_update) {
- crm_info("Updating device list from CIB: %s", reason);
- cib_devices_update();
- } else {
- crm_trace("No updates for device list found in CIB");
- }
- free(reason);
-}
-
-
-static void
-update_cib_stonith_devices_v1(const char *event, xmlNode * msg)
-{
- const char *reason = "none";
- gboolean needs_update = FALSE;
- xmlXPathObjectPtr xpath_obj = NULL;
-
- /* process new constraints */
- xpath_obj = xpath_search(msg, "//" F_CIB_UPDATE_RESULT "//" XML_CONS_TAG_RSC_LOCATION);
- if (numXpathResults(xpath_obj) > 0) {
- int max = numXpathResults(xpath_obj), lpc = 0;
-
- /* Safest and simplest to always recompute */
- needs_update = TRUE;
- reason = "new location constraint";
-
- for (lpc = 0; lpc < max; lpc++) {
- xmlNode *match = getXpathResult(xpath_obj, lpc);
-
- crm_log_xml_trace(match, "new constraint");
- }
- }
- freeXpathObject(xpath_obj);
-
- /* process deletions */
- xpath_obj = xpath_search(msg, "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_REMOVED "//" XML_CIB_TAG_RESOURCE);
- if (numXpathResults(xpath_obj) > 0) {
- remove_cib_device(xpath_obj);
- }
- freeXpathObject(xpath_obj);
-
- /* process additions */
- xpath_obj = xpath_search(msg, "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_ADDED "//" XML_CIB_TAG_RESOURCE);
- if (numXpathResults(xpath_obj) > 0) {
- int max = numXpathResults(xpath_obj), lpc = 0;
-
- for (lpc = 0; lpc < max; lpc++) {
- const char *rsc_id = NULL;
- const char *standard = NULL;
- xmlNode *match = getXpathResult(xpath_obj, lpc);
-
- rsc_id = crm_element_value(match, XML_ATTR_ID);
- standard = crm_element_value(match, XML_AGENT_ATTR_CLASS);
-
- if (!pcmk__str_eq(standard, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) {
- continue;
- }
-
- crm_trace("Fencing resource %s was added or modified", rsc_id);
- reason = "new resource";
- needs_update = TRUE;
- }
- }
- freeXpathObject(xpath_obj);
-
- if(needs_update) {
- crm_info("Updating device list from CIB: %s", reason);
- cib_devices_update();
- }
-}
-
-static void
-update_cib_stonith_devices(const char *event, xmlNode * msg)
-{
- int format = 1;
- xmlNode *patchset = get_message_xml(msg, F_CIB_UPDATE_RESULT);
-
- CRM_ASSERT(patchset);
- crm_element_value_int(patchset, "format", &format);
- switch(format) {
- case 1:
- update_cib_stonith_devices_v1(event, msg);
- break;
- case 2:
- update_cib_stonith_devices_v2(event, msg);
- break;
- default:
- crm_warn("Unknown patch format: %d", format);
- }
-}
-
-/*!
- * \internal
- * \brief Check whether a node has a specific attribute name/value
- *
- * \param[in] node Name of node to check
- * \param[in] name Name of an attribute to look for
- * \param[in] value The value the named attribute needs to be set to in order to be considered a match
- *
- * \return TRUE if the locally cached CIB has the specified node attribute
- */
-gboolean
-node_has_attr(const char *node, const char *name, const char *value)
-{
- GString *xpath = NULL;
- xmlNode *match;
-
- CRM_CHECK((local_cib != NULL) && (node != NULL) && (name != NULL)
- && (value != NULL), return FALSE);
-
- /* Search for the node's attributes in the CIB. While the schema allows
- * multiple sets of instance attributes, and allows instance attributes to
- * use id-ref to reference values elsewhere, that is intended for resources,
- * so we ignore that here.
- */
- xpath = g_string_sized_new(256);
- pcmk__g_strcat(xpath,
- "//" XML_CIB_TAG_NODES "/" XML_CIB_TAG_NODE
- "[@" XML_ATTR_UNAME "='", node, "']/" XML_TAG_ATTR_SETS
- "/" XML_CIB_TAG_NVPAIR
- "[@" XML_NVPAIR_ATTR_NAME "='", name, "' "
- "and @" XML_NVPAIR_ATTR_VALUE "='", value, "']", NULL);
-
- match = get_xpath_object((const char *) xpath->str, local_cib, LOG_NEVER);
-
- g_string_free(xpath, TRUE);
- return (match != NULL);
-}
-
/*!
* \internal
* \brief Check whether a node does watchdog-fencing
@@ -1043,201 +448,7 @@ node_does_watchdog_fencing(const char *node)
pcmk__str_in_list(node, stonith_watchdog_targets, pcmk__str_casei));
}
-
-static void
-update_fencing_topology(const char *event, xmlNode * msg)
-{
- int format = 1;
- const char *xpath;
- xmlXPathObjectPtr xpathObj = NULL;
- xmlNode *patchset = get_message_xml(msg, F_CIB_UPDATE_RESULT);
-
- CRM_ASSERT(patchset);
- crm_element_value_int(patchset, "format", &format);
-
- if(format == 1) {
- /* Process deletions (only) */
- xpath = "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_REMOVED "//" XML_TAG_FENCING_LEVEL;
- xpathObj = xpath_search(msg, xpath);
-
- remove_fencing_topology(xpathObj);
- freeXpathObject(xpathObj);
-
- /* Process additions and changes */
- xpath = "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_ADDED "//" XML_TAG_FENCING_LEVEL;
- xpathObj = xpath_search(msg, xpath);
-
- register_fencing_topology(xpathObj);
- freeXpathObject(xpathObj);
-
- } else if(format == 2) {
- xmlNode *change = NULL;
- int add[] = { 0, 0, 0 };
- int del[] = { 0, 0, 0 };
-
- xml_patch_versions(patchset, add, del);
-
- for (change = pcmk__xml_first_child(patchset); change != NULL;
- change = pcmk__xml_next(change)) {
- const char *op = crm_element_value(change, XML_DIFF_OP);
- const char *xpath = crm_element_value(change, XML_DIFF_PATH);
-
- if(op == NULL) {
- continue;
-
- } else if(strstr(xpath, "/" XML_TAG_FENCING_LEVEL) != NULL) {
- /* Change to a specific entry */
-
- crm_trace("Handling %s operation %d.%d.%d for %s", op, add[0], add[1], add[2], xpath);
- if(strcmp(op, "move") == 0) {
- continue;
-
- } else if(strcmp(op, "create") == 0) {
- add_topology_level(change->children);
-
- } else if(strcmp(op, "modify") == 0) {
- xmlNode *match = first_named_child(change, XML_DIFF_RESULT);
-
- if(match) {
- remove_topology_level(match->children);
- add_topology_level(match->children);
- }
-
- } else if(strcmp(op, "delete") == 0) {
- /* Nuclear option, all we have is the path and an id... not enough to remove a specific entry */
- crm_info("Re-initializing fencing topology after %s operation %d.%d.%d for %s",
- op, add[0], add[1], add[2], xpath);
- fencing_topology_init();
- return;
- }
-
- } else if (strstr(xpath, "/" XML_TAG_FENCING_TOPOLOGY) != NULL) {
- /* Change to the topology in general */
- crm_info("Re-initializing fencing topology after top-level %s operation %d.%d.%d for %s",
- op, add[0], add[1], add[2], xpath);
- fencing_topology_init();
- return;
-
- } else if (strstr(xpath, "/" XML_CIB_TAG_CONFIGURATION)) {
- /* Changes to the whole config section, possibly including the topology as a whild */
- if(first_named_child(change, XML_TAG_FENCING_TOPOLOGY) == NULL) {
- crm_trace("Nothing for us in %s operation %d.%d.%d for %s.",
- op, add[0], add[1], add[2], xpath);
-
- } else if(strcmp(op, "delete") == 0 || strcmp(op, "create") == 0) {
- crm_info("Re-initializing fencing topology after top-level %s operation %d.%d.%d for %s.",
- op, add[0], add[1], add[2], xpath);
- fencing_topology_init();
- return;
- }
-
- } else {
- crm_trace("Nothing for us in %s operation %d.%d.%d for %s",
- op, add[0], add[1], add[2], xpath);
- }
- }
-
- } else {
- crm_warn("Unknown patch format: %d", format);
- }
-}
-static bool have_cib_devices = FALSE;
-
-static void
-update_cib_cache_cb(const char *event, xmlNode * msg)
-{
- int rc = pcmk_ok;
- long timeout_ms_saved = stonith_watchdog_timeout_ms;
- bool need_full_refresh = false;
-
- if(!have_cib_devices) {
- crm_trace("Skipping updates until we get a full dump");
- return;
-
- } else if(msg == NULL) {
- crm_trace("Missing %s update", event);
- return;
- }
-
- /* Maintain a local copy of the CIB so that we have full access
- * to device definitions, location constraints, and node attributes
- */
- if (local_cib != NULL) {
- int rc = pcmk_ok;
- xmlNode *patchset = NULL;
-
- crm_element_value_int(msg, F_CIB_RC, &rc);
- if (rc != pcmk_ok) {
- return;
- }
-
- patchset = get_message_xml(msg, F_CIB_UPDATE_RESULT);
- pcmk__output_set_log_level(logger_out, LOG_TRACE);
- out->message(out, "xml-patchset", patchset);
- rc = xml_apply_patchset(local_cib, patchset, TRUE);
- switch (rc) {
- case pcmk_ok:
- case -pcmk_err_old_data:
- break;
- case -pcmk_err_diff_resync:
- case -pcmk_err_diff_failed:
- crm_notice("[%s] Patch aborted: %s (%d)", event, pcmk_strerror(rc), rc);
- free_xml(local_cib);
- local_cib = NULL;
- break;
- default:
- crm_warn("[%s] ABORTED: %s (%d)", event, pcmk_strerror(rc), rc);
- free_xml(local_cib);
- local_cib = NULL;
- }
- }
-
- if (local_cib == NULL) {
- crm_trace("Re-requesting full CIB");
- rc = cib_api->cmds->query(cib_api, NULL, &local_cib, cib_scope_local | cib_sync_call);
- if(rc != pcmk_ok) {
- crm_err("Couldn't retrieve the CIB: %s (%d)", pcmk_strerror(rc), rc);
- return;
- }
- CRM_ASSERT(local_cib != NULL);
- need_full_refresh = true;
- }
-
- pcmk__refresh_node_caches_from_cib(local_cib);
- update_stonith_watchdog_timeout_ms(local_cib);
-
- if (timeout_ms_saved != stonith_watchdog_timeout_ms) {
- need_full_refresh = true;
- }
-
- if (need_full_refresh) {
- fencing_topology_init();
- cib_devices_update();
- } else {
- // Partial refresh
- update_fencing_topology(event, msg);
- update_cib_stonith_devices(event, msg);
- }
-
- watchdog_device_update();
-}
-
-static void
-init_cib_cache_cb(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data)
-{
- crm_info("Updating device list from CIB");
- have_cib_devices = TRUE;
- local_cib = copy_xml(output);
-
- pcmk__refresh_node_caches_from_cib(local_cib);
- update_stonith_watchdog_timeout_ms(local_cib);
-
- fencing_topology_init();
- cib_devices_update();
- watchdog_device_update();
-}
-
-static void
+void
stonith_shutdown(int nsig)
{
crm_info("Terminating with %d clients", pcmk__ipc_client_count());
@@ -1248,28 +459,9 @@ stonith_shutdown(int nsig)
}
static void
-cib_connection_destroy(gpointer user_data)
-{
- if (stonith_shutdown_flag) {
- crm_info("Connection to the CIB manager closed");
- return;
- } else {
- crm_crit("Lost connection to the CIB manager, shutting down");
- }
- if (cib_api) {
- cib_api->cmds->signoff(cib_api);
- }
- stonith_shutdown(0);
-}
-
-static void
stonith_cleanup(void)
{
- if (cib_api) {
- cib_api->cmds->del_notify_callback(cib_api, T_CIB_DIFF_NOTIFY, update_cib_cache_cb);
- cib_api->cmds->signoff(cib_api);
- }
-
+ fenced_cib_cleanup();
if (ipcs) {
qb_ipcs_destroy(ipcs);
}
@@ -1284,9 +476,6 @@ stonith_cleanup(void)
free(stonith_our_uname);
stonith_our_uname = NULL;
-
- free_xml(local_cib);
- local_cib = NULL;
}
static gboolean
@@ -1298,38 +487,6 @@ stand_alone_cpg_cb(const gchar *option_name, const gchar *optarg, gpointer data,
return TRUE;
}
-static void
-setup_cib(void)
-{
- int rc, retries = 0;
-
- cib_api = cib_new();
- if (cib_api == NULL) {
- crm_err("No connection to the CIB manager");
- return;
- }
-
- do {
- sleep(retries);
- rc = cib_api->cmds->signon(cib_api, CRM_SYSTEM_STONITHD, cib_command);
- } while (rc == -ENOTCONN && ++retries < 5);
-
- if (rc != pcmk_ok) {
- crm_err("Could not connect to the CIB manager: %s (%d)", pcmk_strerror(rc), rc);
-
- } else if (pcmk_ok !=
- cib_api->cmds->add_notify_callback(cib_api, T_CIB_DIFF_NOTIFY, update_cib_cache_cb)) {
- crm_err("Could not set CIB notification callback");
-
- } else {
- rc = cib_api->cmds->query(cib_api, NULL, NULL, cib_scope_local);
- cib_api->cmds->register_callback(cib_api, rc, 120, FALSE, NULL, "init_cib_cache_cb",
- init_cib_cache_cb);
- cib_api->cmds->set_connection_dnotify(cib_api, cib_connection_destroy);
- crm_info("Watching for fencing topology changes");
- }
-}
-
struct qb_ipcs_service_handlers ipc_callbacks = {
.connection_accept = st_ipc_accept,
.connection_created = NULL,
@@ -1435,10 +592,11 @@ static pcmk__cluster_option_t fencer_options[] = {
"Then use this to specify the maximum number of actions can be performed in parallel on this device. -1 is unlimited.")
},
{
- "pcmk_reboot_action",NULL, "string", NULL, "reboot", NULL,
- N_("Advanced use only: An alternate command to run instead of 'reboot'"),
+ "pcmk_reboot_action", NULL, "string", NULL,
+ PCMK_ACTION_REBOOT, NULL,
+ N_("Advanced use only: An alternate command to run instead of 'reboot'"),
N_("Some devices do not support the standard commands or may provide additional ones.\n"
- "Use this to specify an alternate, device-specific, command that implements the \'reboot\' action.")
+ "Use this to specify an alternate, device-specific, command that implements the \'reboot\' action.")
},
{
"pcmk_reboot_timeout",NULL, "time", NULL, "60s", NULL,
@@ -1454,10 +612,11 @@ static pcmk__cluster_option_t fencer_options[] = {
" Use this option to alter the number of times Pacemaker retries \'reboot\' actions before giving up.")
},
{
- "pcmk_off_action",NULL, "string", NULL, "off", NULL,
- N_("Advanced use only: An alternate command to run instead of \'off\'"),
+ "pcmk_off_action", NULL, "string", NULL,
+ PCMK_ACTION_OFF, NULL,
+ N_("Advanced use only: An alternate command to run instead of \'off\'"),
N_("Some devices do not support the standard commands or may provide additional ones."
- "Use this to specify an alternate, device-specific, command that implements the \'off\' action.")
+ "Use this to specify an alternate, device-specific, command that implements the \'off\' action.")
},
{
"pcmk_off_timeout",NULL, "time", NULL, "60s", NULL,
@@ -1473,10 +632,11 @@ static pcmk__cluster_option_t fencer_options[] = {
" Use this option to alter the number of times Pacemaker retries \'off\' actions before giving up.")
},
{
- "pcmk_on_action",NULL, "string", NULL, "on", NULL,
- N_("Advanced use only: An alternate command to run instead of 'on'"),
+ "pcmk_on_action", NULL, "string", NULL,
+ PCMK_ACTION_ON, NULL,
+ N_("Advanced use only: An alternate command to run instead of 'on'"),
N_("Some devices do not support the standard commands or may provide additional ones."
- "Use this to specify an alternate, device-specific, command that implements the \'on\' action.")
+ "Use this to specify an alternate, device-specific, command that implements the \'on\' action.")
},
{
"pcmk_on_timeout",NULL, "time", NULL, "60s", NULL,
@@ -1492,10 +652,11 @@ static pcmk__cluster_option_t fencer_options[] = {
" Use this option to alter the number of times Pacemaker retries \'on\' actions before giving up.")
},
{
- "pcmk_list_action",NULL, "string", NULL, "list", NULL,
- N_("Advanced use only: An alternate command to run instead of \'list\'"),
+ "pcmk_list_action",NULL, "string", NULL,
+ PCMK_ACTION_LIST, NULL,
+ N_("Advanced use only: An alternate command to run instead of \'list\'"),
N_("Some devices do not support the standard commands or may provide additional ones."
- "Use this to specify an alternate, device-specific, command that implements the \'list\' action.")
+ "Use this to specify an alternate, device-specific, command that implements the \'list\' action.")
},
{
"pcmk_list_timeout",NULL, "time", NULL, "60s", NULL,
@@ -1511,7 +672,8 @@ static pcmk__cluster_option_t fencer_options[] = {
" Use this option to alter the number of times Pacemaker retries \'list\' actions before giving up.")
},
{
- "pcmk_monitor_action",NULL, "string", NULL, "monitor", NULL,
+ "pcmk_monitor_action", NULL, "string", NULL,
+ PCMK_ACTION_MONITOR, NULL,
N_("Advanced use only: An alternate command to run instead of \'monitor\'"),
N_("Some devices do not support the standard commands or may provide additional ones."
"Use this to specify an alternate, device-specific, command that implements the \'monitor\' action.")
@@ -1530,10 +692,11 @@ static pcmk__cluster_option_t fencer_options[] = {
" Use this option to alter the number of times Pacemaker retries \'monitor\' actions before giving up.")
},
{
- "pcmk_status_action",NULL, "string", NULL, "status", NULL,
- N_("Advanced use only: An alternate command to run instead of \'status\'"),
+ "pcmk_status_action", NULL, "string", NULL,
+ PCMK_ACTION_STATUS, NULL,
+ N_("Advanced use only: An alternate command to run instead of \'status\'"),
N_("Some devices do not support the standard commands or may provide additional ones."
- "Use this to specify an alternate, device-specific, command that implements the \'status\' action.")
+ "Use this to specify an alternate, device-specific, command that implements the \'status\' action.")
},
{
"pcmk_status_timeout",NULL, "time", NULL, "60s", NULL,
@@ -1568,13 +731,13 @@ fencer_metadata(void)
static GOptionEntry entries[] = {
{ "stand-alone", 's', G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &stand_alone,
- "Deprecated (will be removed in a future release)", NULL },
+ N_("Deprecated (will be removed in a future release)"), NULL },
{ "stand-alone-w-cpg", 'c', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
- stand_alone_cpg_cb, "Intended for use in regression testing only", NULL },
+ stand_alone_cpg_cb, N_("Intended for use in regression testing only"), NULL },
{ "logfile", 'l', G_OPTION_FLAG_NONE, G_OPTION_ARG_FILENAME_ARRAY,
- &options.log_files, "Send logs to the additional named logfile", NULL },
+ &options.log_files, N_("Send logs to the additional named logfile"), NULL },
{ NULL }
};
@@ -1649,7 +812,7 @@ main(int argc, char **argv)
goto done;
}
- if (crm_ipc_connect(old_instance)) {
+ if (pcmk__connect_generic_ipc(old_instance) == pcmk_rc_ok) {
// IPC endpoint already up
crm_ipc_close(old_instance);
crm_ipc_destroy(old_instance);
@@ -1665,26 +828,15 @@ main(int argc, char **argv)
crm_peer_init();
- fenced_data_set = pe_new_working_set();
- CRM_ASSERT(fenced_data_set != NULL);
-
- cluster = pcmk_cluster_new();
-
- /* Initialize the logger prior to setup_cib(). update_cib_cache_cb() may
- * call the "xml-patchset" message function, which needs the logger, after
- * setup_cib() has run.
- */
- rc = pcmk__log_output_new(&logger_out) != pcmk_rc_ok;
+ rc = fenced_scheduler_init();
if (rc != pcmk_rc_ok) {
exit_code = CRM_EX_FATAL;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
- "Error creating output format log: %s", pcmk_rc_str(rc));
+ "Error initializing scheduler data: %s", pcmk_rc_str(rc));
goto done;
}
- pe__register_messages(logger_out);
- pcmk__register_lib_messages(logger_out);
- pcmk__output_set_log_level(logger_out, LOG_TRACE);
- fenced_data_set->priv = logger_out;
+
+ cluster = pcmk_cluster_new();
if (!stand_alone) {
#if SUPPORT_COROSYNC
@@ -1732,15 +884,10 @@ done:
stonith_cleanup();
pcmk_cluster_free(cluster);
- pe_free_working_set(fenced_data_set);
+ fenced_scheduler_cleanup();
pcmk__output_and_clear_error(&error, out);
- if (logger_out != NULL) {
- logger_out->finish(logger_out, exit_code, true, NULL);
- pcmk__output_free(logger_out);
- }
-
if (out != NULL) {
out->finish(out, exit_code, true, NULL);
pcmk__output_free(out);
diff --git a/daemons/fenced/pacemaker-fenced.h b/daemons/fenced/pacemaker-fenced.h
index a3d2e17..220978a 100644
--- a/daemons/fenced/pacemaker-fenced.h
+++ b/daemons/fenced/pacemaker-fenced.h
@@ -6,7 +6,12 @@
*/
#include <stdint.h> // uint32_t, uint64_t
+#include <libxml/tree.h> // xmlNode
+
#include <crm/common/mainloop.h>
+#include <crm/cluster.h>
+#include <crm/stonith-ng.h>
+#include <crm/fencing/internal.h>
/*!
* \internal
@@ -104,9 +109,12 @@ typedef struct remote_fencing_op_s {
* values associated with the devices this fencing operation may call */
gint total_timeout;
- /*! Requested fencing delay.
- * Value -1 means disable any static/random fencing delays. */
- int delay;
+ /*!
+ * Fencing delay (in seconds) requested by API client (used by controller to
+ * implement priority-fencing-delay). A value of -1 means disable all
+ * configured delays.
+ */
+ int client_delay;
/*! Delegate is the node being asked to perform a fencing action
* on behalf of the node that owns the remote operation. Some operations
@@ -205,6 +213,8 @@ typedef struct stonith_topology_s {
} stonith_topology_t;
+void stonith_shutdown(int nsig);
+
void init_device_list(void);
void free_device_list(void);
void init_topology_list(void);
@@ -231,7 +241,7 @@ void fenced_unregister_level(xmlNode *msg, char **desc,
stonith_topology_t *find_topology_for_host(const char *host);
-void do_local_reply(xmlNode *notify_src, pcmk__client_t *client,
+void do_local_reply(const xmlNode *notify_src, pcmk__client_t *client,
int call_options);
xmlNode *fenced_construct_reply(const xmlNode *request, xmlNode *data,
@@ -280,6 +290,14 @@ gboolean node_has_attr(const char *node, const char *name, const char *value);
gboolean node_does_watchdog_fencing(const char *node);
+void fencing_topology_init(void);
+void setup_cib(void);
+void fenced_cib_cleanup(void);
+
+int fenced_scheduler_init(void);
+void fenced_scheduler_cleanup(void);
+void fenced_scheduler_run(xmlNode *cib);
+
static inline void
fenced_set_protocol_error(pcmk__action_result_t *result)
{
@@ -299,7 +317,7 @@ fenced_set_protocol_error(pcmk__action_result_t *result)
static inline uint32_t
fenced_support_flag(const char *action)
{
- if (pcmk__str_eq(action, "on", pcmk__str_none)) {
+ if (pcmk__str_eq(action, PCMK_ACTION_ON, pcmk__str_none)) {
return st_device_supports_on;
}
return st_device_supports_none;
@@ -311,5 +329,6 @@ extern GHashTable *device_list;
extern GHashTable *topology;
extern long stonith_watchdog_timeout_ms;
extern GList *stonith_watchdog_targets;
-
extern GHashTable *stonith_remote_op_list;
+extern crm_exit_t exit_code;
+extern gboolean stonith_shutdown_flag;
diff --git a/daemons/pacemakerd/Makefile.am b/daemons/pacemakerd/Makefile.am
index fc0e014..78e7c37 100644
--- a/daemons/pacemakerd/Makefile.am
+++ b/daemons/pacemakerd/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2004-2021 the Pacemaker project contributors
+# Copyright 2004-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -25,8 +25,10 @@ noinst_HEADERS = pacemakerd.h
pacemakerd_CFLAGS = $(CFLAGS_HARDENED_EXE)
pacemakerd_LDFLAGS = $(LDFLAGS_HARDENED_EXE)
-pacemakerd_LDADD = $(top_builddir)/lib/cluster/libcrmcluster.la $(top_builddir)/lib/common/libcrmcommon.la
-pacemakerd_LDADD += $(CLUSTERLIBS)
+pacemakerd_LDADD = $(top_builddir)/lib/cluster/libcrmcluster.la
+pacemakerd_LDADD += $(top_builddir)/lib/common/libcrmcommon.la
+pacemakerd_LDADD += $(CLUSTERLIBS)
+
pacemakerd_SOURCES = pacemakerd.c
if BUILD_CS_SUPPORT
pacemakerd_SOURCES += pcmkd_corosync.c
diff --git a/daemons/pacemakerd/pacemakerd.c b/daemons/pacemakerd/pacemakerd.c
index 9f77ccc..365b743 100644
--- a/daemons/pacemakerd/pacemakerd.c
+++ b/daemons/pacemakerd/pacemakerd.c
@@ -92,7 +92,7 @@ pid_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **er
static gboolean
standby_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **err) {
options.standby = TRUE;
- pcmk__set_env_option("node_start_state", "standby");
+ pcmk__set_env_option(PCMK__ENV_NODE_START_STATE, "standby", false);
return TRUE;
}
@@ -129,7 +129,7 @@ pcmk_sigquit(int nsig)
}
static void
-mcp_chown(const char *path, uid_t uid, gid_t gid)
+pacemakerd_chown(const char *path, uid_t uid, gid_t gid)
{
int rc = chown(path, uid, gid);
@@ -166,7 +166,7 @@ create_pcmk_dirs(void)
crm_warn("Could not create directory " CRM_STATE_DIR ": %s",
pcmk_rc_str(errno));
} else {
- mcp_chown(CRM_STATE_DIR, pcmk_uid, pcmk_gid);
+ pacemakerd_chown(CRM_STATE_DIR, pcmk_uid, pcmk_gid);
}
for (int i = 0; dirs[i] != NULL; ++i) {
@@ -176,7 +176,7 @@ create_pcmk_dirs(void)
crm_warn("Could not create directory %s: %s",
dirs[i], pcmk_rc_str(rc));
} else {
- mcp_chown(dirs[i], pcmk_uid, pcmk_gid);
+ pacemakerd_chown(dirs[i], pcmk_uid, pcmk_gid);
}
}
}
@@ -312,7 +312,8 @@ main(int argc, char **argv)
goto done;
}
- pcmk__set_env_option("mcp", "true");
+ // @COMPAT Drop at 3.0.0; likely last used in 1.1.24
+ pcmk__set_env_option(PCMK__ENV_MCP, "true", true);
if (options.shutdown) {
pcmk__cli_init_logging("pacemakerd", args->verbosity);
@@ -330,7 +331,11 @@ main(int argc, char **argv)
}
pcmk_register_ipc_callback(old_instance, pacemakerd_event_cb, NULL);
- rc = pcmk_connect_ipc(old_instance, pcmk_ipc_dispatch_sync);
+ rc = pcmk__connect_ipc(old_instance, pcmk_ipc_dispatch_sync, 2);
+ if (rc != pcmk_rc_ok) {
+ crm_debug("No existing %s instance found: %s",
+ pcmk_ipc_name(old_instance, true), pcmk_rc_str(rc));
+ }
old_instance_connected = pcmk_ipc_is_connected(old_instance);
if (options.shutdown) {
@@ -388,7 +393,7 @@ main(int argc, char **argv)
}
#ifdef SUPPORT_COROSYNC
- if (mcp_read_config() == FALSE) {
+ if (pacemakerd_read_config() == FALSE) {
crm_exit(CRM_EX_UNAVAILABLE);
}
#endif
@@ -399,7 +404,7 @@ main(int argc, char **argv)
if (!pcmk__str_eq(facility, PCMK__VALUE_NONE,
pcmk__str_casei|pcmk__str_null_matches)) {
- setenv("HA_LOGFACILITY", facility, 1);
+ pcmk__set_env_option("LOGFACILITY", facility, true);
}
}
@@ -409,7 +414,7 @@ main(int argc, char **argv)
remove_core_file_limit();
create_pcmk_dirs();
- pcmk__serve_pacemakerd_ipc(&ipcs, &mcp_ipc_callbacks);
+ pcmk__serve_pacemakerd_ipc(&ipcs, &pacemakerd_ipc_callbacks);
#ifdef SUPPORT_COROSYNC
/* Allows us to block shutdown */
@@ -420,10 +425,7 @@ main(int argc, char **argv)
#endif
if (pcmk__locate_sbd() > 0) {
- setenv("PCMK_watchdog", "true", 1);
running_with_sbd = TRUE;
- } else {
- setenv("PCMK_watchdog", "false", 1);
}
switch (find_and_track_existing_processes()) {
diff --git a/daemons/pacemakerd/pacemakerd.h b/daemons/pacemakerd/pacemakerd.h
index b2a6864..ee6facf 100644
--- a/daemons/pacemakerd/pacemakerd.h
+++ b/daemons/pacemakerd/pacemakerd.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2010-2022 the Pacemaker project contributors
+ * Copyright 2010-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -14,7 +14,7 @@
#define MAX_RESPAWN 100
extern GMainLoop *mainloop;
-extern struct qb_ipcs_service_handlers mcp_ipc_callbacks;
+extern struct qb_ipcs_service_handlers pacemakerd_ipc_callbacks;
extern const char *pacemakerd_state;
extern gboolean running_with_sbd;
extern unsigned int shutdown_complete_state_reported_to;
@@ -23,7 +23,7 @@ extern crm_trigger_t *shutdown_trigger;
extern crm_trigger_t *startup_trigger;
extern time_t subdaemon_check_progress;
-gboolean mcp_read_config(void);
+gboolean pacemakerd_read_config(void);
gboolean cluster_connect_cfg(void);
void cluster_disconnect_cfg(void);
diff --git a/daemons/pacemakerd/pcmkd_corosync.c b/daemons/pacemakerd/pcmkd_corosync.c
index 2648756..8a1a867 100644
--- a/daemons/pacemakerd/pcmkd_corosync.c
+++ b/daemons/pacemakerd/pcmkd_corosync.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2010-2022 the Pacemaker project contributors
+ * Copyright 2010-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -82,7 +82,7 @@ cluster_reconnect_cb(gpointer data)
mainloop_timer_del(reconnect_timer);
reconnect_timer = NULL;
crm_notice("Cluster reconnect succeeded");
- mcp_read_config();
+ pacemakerd_read_config();
restart_cluster_subdaemons();
return G_SOURCE_REMOVE;
} else {
@@ -260,7 +260,7 @@ get_config_opt(uint64_t unused, cmap_handle_t object_handle, const char *key, ch
}
gboolean
-mcp_read_config(void)
+pacemakerd_read_config(void)
{
cs_error_t rc = CS_OK;
int retries = 0;
@@ -327,8 +327,10 @@ mcp_read_config(void)
crm_info("Reading configuration for %s stack",
name_for_cluster_type(stack));
- pcmk__set_env_option(PCMK__ENV_CLUSTER_TYPE, "corosync");
- pcmk__set_env_option(PCMK__ENV_QUORUM_TYPE, "corosync");
+ pcmk__set_env_option(PCMK__ENV_CLUSTER_TYPE, "corosync", true);
+
+ // @COMPAT Drop at 3.0.0; added unused in 1.1.9
+ pcmk__set_env_option(PCMK__ENV_QUORUM_TYPE, "corosync", true);
// If debug logging is not configured, check whether corosync has it
if (pcmk__env_option(PCMK__ENV_DEBUG) == NULL) {
@@ -337,13 +339,13 @@ mcp_read_config(void)
get_config_opt(config, local_handle, "logging.debug", &debug_enabled, "off");
if (crm_is_true(debug_enabled)) {
- pcmk__set_env_option(PCMK__ENV_DEBUG, "1");
+ pcmk__set_env_option(PCMK__ENV_DEBUG, "1", true);
if (get_crm_log_level() < LOG_DEBUG) {
set_crm_log_level(LOG_DEBUG);
}
} else {
- pcmk__set_env_option(PCMK__ENV_DEBUG, "0");
+ pcmk__set_env_option(PCMK__ENV_DEBUG, "0", true);
}
free(debug_enabled);
diff --git a/daemons/pacemakerd/pcmkd_messages.c b/daemons/pacemakerd/pcmkd_messages.c
index 7ed9899..4e6f822 100644
--- a/daemons/pacemakerd/pcmkd_messages.c
+++ b/daemons/pacemakerd/pcmkd_messages.c
@@ -269,7 +269,7 @@ pcmk_ipc_dispatch(qb_ipcs_connection_t * qbc, void *data, size_t size)
return 0;
}
-struct qb_ipcs_service_handlers mcp_ipc_callbacks = {
+struct qb_ipcs_service_handlers pacemakerd_ipc_callbacks = {
.connection_accept = pcmk_ipc_accept,
.connection_created = NULL,
.msg_process = pcmk_ipc_dispatch,
diff --git a/daemons/pacemakerd/pcmkd_subdaemons.c b/daemons/pacemakerd/pcmkd_subdaemons.c
index 3b08ecc..21e432e 100644
--- a/daemons/pacemakerd/pcmkd_subdaemons.c
+++ b/daemons/pacemakerd/pcmkd_subdaemons.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2010-2022 the Pacemaker project contributors
+ * Copyright 2010-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -307,7 +307,7 @@ pcmk_process_exit(pcmk_child_t * child)
} else if (!child->respawn) {
/* nothing to do */
- } else if (crm_is_true(getenv("PCMK_fail_fast"))) {
+ } else if (crm_is_true(pcmk__env_option(PCMK__ENV_FAIL_FAST))) {
crm_err("Rebooting system because of %s", child->name);
pcmk__panic(__func__);
@@ -353,8 +353,8 @@ pcmk_shutdown_worker(gpointer user_data)
" if it vitally depends on some other daemons"
" going down in a controlled way already,"
" or locate and kill the correct %s process"
- " on your own; set PCMK_fail_fast=1 to avoid"
- " this altogether next time around",
+ " on your own; set PCMK_" PCMK__ENV_FAIL_FAST "=1"
+ " to avoid this altogether next time around",
child->name, (long) SHUTDOWN_ESCALATION_PERIOD,
child->command);
}
@@ -389,6 +389,7 @@ pcmk_shutdown_worker(gpointer user_data)
return TRUE;
}
+ // @COMPAT Drop shutdown delay at 3.0.0
{
const char *delay = pcmk__env_option(PCMK__ENV_SHUTDOWN_DELAY);
if(delay) {
@@ -423,8 +424,8 @@ start_child(pcmk_child_t * child)
gid_t gid = 0;
gboolean use_valgrind = FALSE;
gboolean use_callgrind = FALSE;
- const char *env_valgrind = getenv("PCMK_valgrind_enabled");
- const char *env_callgrind = getenv("PCMK_callgrind_enabled");
+ const char *env_valgrind = pcmk__env_option(PCMK__ENV_VALGRIND_ENABLED);
+ const char *env_callgrind = pcmk__env_option(PCMK__ENV_CALLGRIND_ENABLED);
child->active_before_startup = false;
child->check_count = 0;
@@ -712,14 +713,16 @@ find_and_track_existing_processes(void)
continue;
}
+ // @TODO Functionize more of this to reduce nesting
pcmk_children[i].respawn_count = rounds;
switch (rc) {
case pcmk_rc_ok:
if (pcmk_children[i].pid == PCMK__SPECIAL_PID) {
- if (crm_is_true(getenv("PCMK_fail_fast"))) {
+ if (crm_is_true(pcmk__env_option(PCMK__ENV_FAIL_FAST))) {
crm_crit("Cannot reliably track pre-existing"
" authentic process behind %s IPC on this"
- " platform and PCMK_fail_fast requested",
+ " platform and PCMK_" PCMK__ENV_FAIL_FAST
+ " requested",
pcmk_children[i].endpoint);
return EOPNOTSUPP;
} else if (pcmk_children[i].respawn_count == WAIT_TRIES) {
@@ -727,9 +730,9 @@ find_and_track_existing_processes(void)
" on this platform untrackable, process"
" behind %s IPC is stable (was in %d"
" previous samples) so rather than"
- " bailing out (PCMK_fail_fast not"
- " requested), we just switch to a less"
- " optimal IPC liveness monitoring"
+ " bailing out (PCMK_" PCMK__ENV_FAIL_FAST
+ " not requested), we just switch to a"
+ " less optimal IPC liveness monitoring"
" (not very suitable for heavy load)",
pcmk_children[i].name, WAIT_TRIES - 1);
crm_warn("The process behind %s IPC cannot be"
@@ -822,7 +825,7 @@ init_children_processes(void *user_data)
*
* This may be useful for the daemons to know
*/
- setenv("PCMK_respawned", "true", 1);
+ pcmk__set_env_option(PCMK__ENV_RESPAWNED, "true", false);
pacemakerd_state = XML_PING_ATTR_PACEMAKERDSTATE_RUNNING;
return TRUE;
}
diff --git a/daemons/schedulerd/Makefile.am b/daemons/schedulerd/Makefile.am
index 57e819b..fab8e1a 100644
--- a/daemons/schedulerd/Makefile.am
+++ b/daemons/schedulerd/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2004-2021 the Pacemaker project contributors
+# Copyright 2004-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -10,7 +10,8 @@
include $(top_srcdir)/mk/common.mk
include $(top_srcdir)/mk/man.mk
-AM_CPPFLAGS += -I$(top_builddir) -I$(top_srcdir)
+AM_CPPFLAGS += -I$(top_builddir) \
+ -I$(top_srcdir)
halibdir = $(CRM_DAEMON_DIR)
@@ -26,27 +27,34 @@ endif
noinst_HEADERS = pacemaker-schedulerd.h
-pacemaker_schedulerd_CFLAGS = $(CFLAGS_HARDENED_EXE)
+pacemaker_schedulerd_CFLAGS = $(CFLAGS_HARDENED_EXE)
pacemaker_schedulerd_LDFLAGS = $(LDFLAGS_HARDENED_EXE)
-pacemaker_schedulerd_LDADD = $(top_builddir)/lib/common/libcrmcommon.la \
- $(top_builddir)/lib/pengine/libpe_status.la \
- $(top_builddir)/lib/pacemaker/libpacemaker.la
+pacemaker_schedulerd_LDADD = $(top_builddir)/lib/pacemaker/libpacemaker.la
+pacemaker_schedulerd_LDADD += $(top_builddir)/lib/pengine/libpe_status.la
+pacemaker_schedulerd_LDADD += $(top_builddir)/lib/common/libcrmcommon.la
+
# libcib for get_object_root()
pacemaker_schedulerd_SOURCES = pacemaker-schedulerd.c
pacemaker_schedulerd_SOURCES += schedulerd_messages.c
+.PHONY: install-exec-local
install-exec-local:
$(INSTALL) -d -m 750 $(DESTDIR)/$(PE_STATE_DIR)
-chown $(CRM_DAEMON_USER):$(CRM_DAEMON_GROUP) $(DESTDIR)/$(PE_STATE_DIR)
-if BUILD_LEGACY_LINKS
+.PHONY: install-exec-hook
install-exec-hook:
+if BUILD_LEGACY_LINKS
cd $(DESTDIR)$(CRM_DAEMON_DIR) && rm -f pengine && $(LN_S) pacemaker-schedulerd pengine
+endif
+.PHONY: uninstall-hook
uninstall-hook:
+if BUILD_LEGACY_LINKS
cd $(DESTDIR)$(CRM_DAEMON_DIR) && rm -f pengine
endif
+.PHONY: uninstall-local
uninstall-local:
-rmdir $(DESTDIR)/$(PE_STATE_DIR)
diff --git a/daemons/schedulerd/pacemaker-schedulerd.h b/daemons/schedulerd/pacemaker-schedulerd.h
index cbb07e1..75b7d38 100644
--- a/daemons/schedulerd/pacemaker-schedulerd.h
+++ b/daemons/schedulerd/pacemaker-schedulerd.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2004-2022 the Pacemaker project contributors
+ * Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -11,7 +11,7 @@
#define PCMK__PACEMAKER_SCHEDULERD__H
#include <crm_internal.h>
-#include <crm/pengine/pe_types.h>
+#include <crm/common/scheduler.h>
extern pcmk__output_t *logger_out;
extern pcmk__output_t *out;
diff --git a/daemons/schedulerd/schedulerd_messages.c b/daemons/schedulerd/schedulerd_messages.c
index 1c124d2..5a97365 100644
--- a/daemons/schedulerd/schedulerd_messages.c
+++ b/daemons/schedulerd/schedulerd_messages.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2004-2022 the Pacemaker project contributors
+ * Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -22,12 +22,12 @@
static GHashTable *schedulerd_handlers = NULL;
-static pe_working_set_t *
+static pcmk_scheduler_t *
init_working_set(void)
{
- pe_working_set_t *data_set = pe_new_working_set();
+ pcmk_scheduler_t *scheduler = pe_new_working_set();
- CRM_ASSERT(data_set != NULL);
+ CRM_ASSERT(scheduler != NULL);
crm_config_error = FALSE;
crm_config_warning = FALSE;
@@ -35,8 +35,8 @@ init_working_set(void)
was_processing_error = FALSE;
was_processing_warning = FALSE;
- data_set->priv = logger_out;
- return data_set;
+ scheduler->priv = logger_out;
+ return scheduler;
}
static xmlNode *
@@ -72,7 +72,7 @@ handle_pecalc_request(pcmk__request_t *request)
xmlNode *reply = NULL;
bool is_repoke = false;
bool process = true;
- pe_working_set_t *data_set = init_working_set();
+ pcmk_scheduler_t *scheduler = init_working_set();
pcmk__ipc_send_ack(request->ipc_client, request->ipc_id, request->ipc_flags,
"ack", NULL, CRM_EX_INDETERMINATE);
@@ -81,9 +81,9 @@ handle_pecalc_request(pcmk__request_t *request)
CRM_FEATURE_SET);
converted = copy_xml(xml_data);
if (!cli_config_update(&converted, NULL, TRUE)) {
- data_set->graph = create_xml_node(NULL, XML_TAG_GRAPH);
- crm_xml_add_int(data_set->graph, "transition_id", 0);
- crm_xml_add_int(data_set->graph, "cluster-delay", 0);
+ scheduler->graph = create_xml_node(NULL, XML_TAG_GRAPH);
+ crm_xml_add_int(scheduler->graph, "transition_id", 0);
+ crm_xml_add_int(scheduler->graph, "cluster-delay", 0);
process = false;
free(digest);
@@ -98,9 +98,9 @@ handle_pecalc_request(pcmk__request_t *request)
if (process) {
pcmk__schedule_actions(converted,
- pe_flag_no_counts
- |pe_flag_no_compat
- |pe_flag_show_utilization, data_set);
+ pcmk_sched_no_counts
+ |pcmk_sched_no_compat
+ |pcmk_sched_show_utilization, scheduler);
}
// Get appropriate index into series[] array
@@ -112,7 +112,7 @@ handle_pecalc_request(pcmk__request_t *request)
series_id = 2;
}
- value = pe_pref(data_set->config_hash, series[series_id].param);
+ value = pe_pref(scheduler->config_hash, series[series_id].param);
if ((value == NULL)
|| (pcmk__scan_min_int(value, &series_wrap, -1) != pcmk_rc_ok)) {
series_wrap = series[series_id].wrap;
@@ -126,8 +126,8 @@ handle_pecalc_request(pcmk__request_t *request)
crm_trace("Series %s: wrap=%d, seq=%u, pref=%s",
series[series_id].name, series_wrap, seq, value);
- data_set->input = NULL;
- reply = create_reply(msg, data_set->graph);
+ scheduler->input = NULL;
+ reply = create_reply(msg, scheduler->graph);
if (reply == NULL) {
pcmk__format_result(&request->result, CRM_EX_ERROR, PCMK_EXEC_ERROR,
@@ -172,7 +172,7 @@ handle_pecalc_request(pcmk__request_t *request)
done:
free_xml(converted);
- pe_free_working_set(data_set);
+ pe_free_working_set(scheduler);
return reply;
}
diff --git a/devel/Makefile.am b/devel/Makefile.am
index 94581e1..b50f097 100644
--- a/devel/Makefile.am
+++ b/devel/Makefile.am
@@ -35,10 +35,12 @@ COCCI_FILES ?= coccinelle/string-any-of.cocci \
dist_noinst_SCRIPTS = coccinelle/test/testrunner.sh
-EXTRA_DIST = README gdbhelpers $(COCCI_FILES) \
- coccinelle/ref-passed-variables-inited.cocci \
- coccinelle/rename-fn.cocci \
- coccinelle/test/ref-passed-variables-inited.input.c \
+EXTRA_DIST = README \
+ gdbhelpers \
+ $(COCCI_FILES) \
+ coccinelle/ref-passed-variables-inited.cocci \
+ coccinelle/rename-fn.cocci \
+ coccinelle/test/ref-passed-variables-inited.input.c \
coccinelle/test/ref-passed-variables-inited.output
# Any file in this list is allowed to use any of the pcmk__ internal functions.
@@ -51,6 +53,7 @@ MAY_USE_INTERNAL_FILES = $(shell find .. -path "../lib/*.c" -o -path "../lib/*pr
# may be applied.
OTHER_FILES = $(shell find ../include -name '*h' -a \! -name '*internal.h' -a \! -path '../include/pcmki/*')
+.PHONY: cocci
cocci:
-for cf in $(COCCI_FILES); do \
for f in $(MAY_USE_INTERNAL_FILES); do \
@@ -61,9 +64,11 @@ cocci:
done ; \
done
+.PHONY: cocci-inplace
cocci-inplace:
$(MAKE) $(AM_MAKEFLAGS) _SPATCH_FLAGS=--in-place cocci
+.PHONY: cocci-test
cocci-test:
for f in coccinelle/test/*.c; do \
coccinelle/test/testrunner.sh $$f; \
@@ -78,6 +83,7 @@ cocci-test:
# See scan-build(1) for possible checkers (leave empty to use default set)
CLANG_checkers ?=
+.PHONY: clang
clang:
OUT=$$(cd $(top_builddir) \
&& scan-build $(CLANG_checkers:%=-enable-checker %) \
@@ -158,6 +164,8 @@ coverity-clean:
## cppcheck
+GLIB_CFLAGS ?= $(pkg-config --cflags glib-2.0)
+
# Use CPPCHECK_ARGS to pass extra cppcheck options, e.g.:
# --enable={warning,style,performance,portability,information,all}
# --inconclusive --std=posix
@@ -167,6 +175,7 @@ CPPCHECK_ARGS ?=
CPPCHECK_DIRS = replace lib daemons tools
CPPCHECK_OUT = $(abs_top_builddir)/cppcheck.out
+.PHONY: cppcheck
cppcheck:
cppcheck $(CPPCHECK_ARGS) -I $(top_srcdir)/include \
--output-file=$(CPPCHECK_OUT) \
@@ -191,21 +200,26 @@ COVERAGE_DIR = $(top_builddir)/coverage
.PHONY: coverage
coverage: coverage-partial-clean
cd $(top_builddir) \
- && $(MAKE) $(AM_MAKEFLAGS) core \
+ && $(MAKE) $(AM_MAKEFLAGS) \
&& lcov --no-external --exclude='*_test.c' -c -i -d . \
-o pacemaker_base.info \
&& $(MAKE) $(AM_MAKEFLAGS) check \
&& lcov --no-external --exclude='*_test.c' -c -d . \
-o pacemaker_test.info \
&& lcov -a pacemaker_base.info -a pacemaker_test.info \
- -o pacemaker_total.info
- genhtml $(top_builddir)/pacemaker_total.info -o $(COVERAGE_DIR) -s
+ -o pacemaker_total.info \
+ && lcov --remove pacemaker_total.info -o pacemaker_filtered.info\
+ "$(abs_top_builddir)/tools/*" \
+ "$(abs_top_builddir)/daemons/*/*" \
+ "$(abs_top_builddir)/replace/*" \
+ "$(abs_top_builddir)/lib/gnu/*"
+ genhtml $(top_builddir)/pacemaker_filtered.info -o $(COVERAGE_DIR) -s -t "Pacemaker code coverage"
# Check coverage of CLI regression tests
.PHONY: coverage-cts
coverage-cts: coverage-partial-clean
cd $(top_builddir) \
- && $(MAKE) $(AM_MAKEFLAGS) core \
+ && $(MAKE) $(AM_MAKEFLAGS) \
&& lcov --no-external -c -i -d tools -o pacemaker_base.info \
&& cts/cts-cli \
&& lcov --no-external -c -d tools -o pacemaker_test.info \
@@ -277,6 +291,7 @@ INDENT_PACEMAKER_STYLE = --blank-lines-after-declarations \
--swallow-optional-blank-lines \
--tab-size8
+.PHONY: indent
indent:
VERSION_CONTROL=none \
find $(INDENT_DIRS) -type f -name "*.[ch]" \
@@ -284,6 +299,30 @@ indent:
-exec indent $(INDENT_PACEMAKER_STYLE) $(INDENT_OPTS) \{\} \;
#
+# Check whether copyrights have been updated appropriately
+# (Set COMMIT to desired commit or commit range to check, defaulting to HEAD,
+# or set it empty to check uncommitted changes)
+#
+YEAR = $(shell date +%Y)
+MODIFIED_FILES = $(shell case "$(COMMIT)" in \
+ [0-9a-f]*$(rparen) \
+ git diff-tree --no-commit-id \
+ --name-only "$(COMMIT)" -r ;; \
+ *$(rparen) \
+ cd "$(top_srcdir)"; \
+ git ls-files --modified ;; \
+ esac)
+
+.PHONY: copyright
+copyright:
+ @cd "$(top_srcdir)" && for file in $(MODIFIED_FILES); do \
+ if ! grep 'opyright .*$(YEAR).* Pacemaker' "$$file" \
+ >/dev/null 2>&1; then \
+ echo "$$file"; \
+ fi; \
+ done
+
+#
# Scratch file for ad-hoc testing
#
@@ -291,5 +330,6 @@ EXTRA_PROGRAMS = scratch
nodist_scratch_SOURCES = scratch.c
scratch_LDADD = $(top_builddir)/lib/common/libcrmcommon.la
+.PHONY: clean-local
clean-local: coverage-clean coverity-clean cppcheck-clean
-rm -f $(EXTRA_PROGRAMS)
diff --git a/doc/Makefile.am b/doc/Makefile.am
index 1400145..a40ddfe 100644
--- a/doc/Makefile.am
+++ b/doc/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2003-2021 the Pacemaker project contributors
+# Copyright 2003-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -13,7 +13,10 @@ include $(top_srcdir)/mk/release.mk
# What formats to use for book uploads (i.e. "make www";
# use BOOK_FORMATS in sphinx subdirectory to change local builds)
-BOOK_FORMATS ?= html singlehtml pdf epub
+BOOK_FORMATS ?= html \
+ singlehtml \
+ pdf \
+ epub
# SNMP MIB
mibdir = $(datadir)/snmp/mibs
@@ -25,7 +28,8 @@ DEPRECATED_GENERATED =
if BUILD_ASCIIDOC
DEPRECATED_GENERATED += $(DEPRECATED_ORIGINAL:%.txt=%.html)
endif
-DEPRECATED_ALL = $(DEPRECATED_ORIGINAL) $(DEPRECATED_GENERATED)
+DEPRECATED_ALL = $(DEPRECATED_ORIGINAL) \
+ $(DEPRECATED_GENERATED)
doc_DATA = $(DEPRECATED_ALL)
noinst_SCRIPTS = abi-check
@@ -73,14 +77,17 @@ deprecated-clean:
# Annotated source code as HTML
# Cleaning first ensures we don't index unrelated stuff like RPM sources
+.PHONY: global
global:
$(MAKE) $(AM_MAKEFLAGS) -C .. clean-generic
$(MAKE) $(AM_MAKEFLAGS) -C ../rpm rpm-clean
cd .. && gtags -q && htags -sanhIT doc
+.PHONY: global-upload
global-upload: global
rsync $(RSYNC_OPTS) HTML/ "$(RSYNC_DEST)/$(PACKAGE)/global/$(TAG)/"
+.PHONY: global-clean
global-clean:
-rm -rf HTML
@@ -93,43 +100,53 @@ global-clean:
%.7.html: %.7
groff -mandoc `man -w ./$<` -T html > $@
+.PHONY: manhtml
manhtml:
$(MAKE) $(AM_MAKEFLAGS) -C .. all
find .. -name "[a-z]*.[78]" -exec $(MAKE) $(AM_MAKEFLAGS) \{\}.html \;
+.PHONY: manhtml-upload
manhtml-upload: manhtml
find .. -name "[a-z]*.[78].html" -exec \
rsync $(RSYNC_OPTS) \{\} "$(RSYNC_DEST)/$(PACKAGE)/man/" \;
+.PHONY: manhtml-clean
manhtml-clean:
-find .. -name "[a-z]*.[78].html" -exec rm \{\} \;
# API documentation as HTML
+.PHONY: doxygen
doxygen: Doxyfile
doxygen Doxyfile
+.PHONY: doxygen-upload
doxygen-upload: doxygen
rsync $(RSYNC_OPTS) api/html/ "$(RSYNC_DEST)/$(PACKAGE)/doxygen/$(TAG)/"
+.PHONY: doxygen-clean
doxygen-clean:
-rm -rf api
# ABI compatibility report as HTML
+.PHONY: abi
abi: abi-check
./abi-check $(PACKAGE) $(LAST_RELEASE) $(TAG)
+.PHONY: abi-www
abi-www:
export RSYNC_DEST=$(RSYNC_DEST); ./abi-check -u $(PACKAGE) $(LAST_RELEASE) $(TAG)
+.PHONY: abi-clean
abi-clean:
-rm -rf abi_dumps compat_reports
# The main documentation books (which are actually in the sphinx subdirectory)
+.PHONY: books-upload
books-upload:
$(MAKE) $(AM_MAKEFLAGS) -C sphinx clean
$(MAKE) $(AM_MAKEFLAGS) -C sphinx \
@@ -142,11 +159,13 @@ books-upload:
.PHONY: www
www: clean-local deprecated-upload manhtml-upload global-upload doxygen-upload books-upload
+.PHONY: clean-local
clean-local: global-clean manhtml-clean doxygen-clean abi-clean deprecated-clean
# "make check" will cause "make all" to be run, which means docs will get built
# as a part of running tests if they haven't already. That seems unnecessary, so
# override the default check-recursive rule with this one that just returns. If
# we ever need to add tests to this directory, this rule will have to come out.
+.PHONY: check-recursive
check-recursive:
@true
diff --git a/doc/abi-check.in b/doc/abi-check.in
index 5a5e253..6b6a8d3 100755
--- a/doc/abi-check.in
+++ b/doc/abi-check.in
@@ -1,6 +1,6 @@
#!@BASH_PATH@
#
-# Copyright 2011-2022 the Pacemaker project contributors
+# Copyright 2011-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -29,6 +29,12 @@ tag() {
fi
}
+sed_in_place() {
+ cp -p "$1" "$1.$$"
+ sed -e "$2" "$1" > "$1.$$"
+ mv "$1.$$" "$1"
+}
+
# Strip anything up to and including a dash from the argument
version() {
echo "$1" | sed s:.*-::
@@ -103,7 +109,7 @@ extract_one() {
# Remove "doc" from SUBDIRS in Makefile (but why?)
BUILD_ROOT="$(pwd)/$BUILD_ROOT"
- sed -i.sed 's: doc::' "$BUILD_ROOT/Makefile.am"
+ sed_in_place "$BUILD_ROOT/Makefile.am" 's: doc::'
# Run ABI dump
abi_config "$PACKAGE" "$VERSION" "$BUILD_ROOT" "$DESC"
diff --git a/doc/sphinx/Clusters_from_Scratch/apache.rst b/doc/sphinx/Clusters_from_Scratch/apache.rst
index e4eddff..c5c155e 100644
--- a/doc/sphinx/Clusters_from_Scratch/apache.rst
+++ b/doc/sphinx/Clusters_from_Scratch/apache.rst
@@ -316,7 +316,7 @@ have to worry about whether you can handle the load after a failover.
To do this, we create a location constraint.
In the location constraint below, we are saying the ``WebSite`` resource
-prefers the node ``pcmk-1`` with a score of ``50``. Here, the score indicates
+prefers the node ``pcmk-2`` with a score of ``50``. Here, the score indicates
how strongly we'd like the resource to run at this location.
.. code-block:: console
diff --git a/doc/sphinx/Clusters_from_Scratch/cluster-setup.rst b/doc/sphinx/Clusters_from_Scratch/cluster-setup.rst
index 0a7a7a5..437b5f8 100644
--- a/doc/sphinx/Clusters_from_Scratch/cluster-setup.rst
+++ b/doc/sphinx/Clusters_from_Scratch/cluster-setup.rst
@@ -114,14 +114,14 @@ Start and enable the daemon by issuing the following commands on each node:
# systemctl enable pcsd.service
Created symlink from /etc/systemd/system/multi-user.target.wants/pcsd.service to /usr/lib/systemd/system/pcsd.service.
-The installed packages will create an ``hacluster`` user with a disabled password.
-While this is fine for running ``pcs`` commands locally,
+The installed packages will create an |CRM_DAEMON_USER| user with a disabled
+password. While this is fine for running ``pcs`` commands locally,
the account needs a login password in order to perform such tasks as syncing
the Corosync configuration, or starting and stopping the cluster on other nodes.
This tutorial will make use of such commands,
-so now we will set a password for the ``hacluster`` user, using the same password
-on both nodes:
+so now we will set a password for the |CRM_DAEMON_USER| user, using the same
+password on both nodes:
.. code-block:: console
diff --git a/doc/sphinx/Makefile.am b/doc/sphinx/Makefile.am
index c4ade5c..e48e19a 100644
--- a/doc/sphinx/Makefile.am
+++ b/doc/sphinx/Makefile.am
@@ -55,7 +55,8 @@ DOTS = $(wildcard shared/images/*.dot)
# Vector sources for generated PNGs (including SVG equivalents of DOTS, created
# manually using dot)
-SVGS = $(wildcard shared/images/pcmk-*.svg) $(DOTS:%.dot=%.svg)
+SVGS = $(wildcard shared/images/pcmk-*.svg) \
+ $(DOTS:%.dot=%.svg)
# PNG images generated from SVGS
#
@@ -71,28 +72,33 @@ PNGS_Pacemaker_Remote = $(wildcard Pacemaker_Remote/images/*.png)
STATIC_FILES = $(wildcard _static/*.css)
-EXTRA_DIST = $(wildcard */*.rst) $(DOTS) $(SVGS) \
- $(PNGS_Clusters_from_Scratch) \
- $(PNGS_Pacemaker_Explained) \
- $(PNGS_Pacemaker_Remote) \
- $(wildcard Pacemaker_Python_API/_templates/*rst) \
- $(STATIC_FILES) \
+EXTRA_DIST = $(wildcard */*.rst) $(DOTS) $(SVGS) \
+ $(PNGS_Clusters_from_Scratch) \
+ $(PNGS_Pacemaker_Explained) \
+ $(PNGS_Pacemaker_Remote) \
+ $(wildcard Pacemaker_Python_API/_templates/*rst) \
+ $(STATIC_FILES) \
conf.py.in
# recursive, preserve symlinks/permissions/times, verbose, compress,
# don't cross filesystems, sparse, show progress
RSYNC_OPTS = -rlptvzxS --progress
+PACKAGE_SERIES=$(shell echo "$VERSION" | awk -F. '{ print $1"."$2 }')
+
BOOK_RSYNC_DEST = $(RSYNC_DEST)/$(PACKAGE)/doc/$(PACKAGE_SERIES)
BOOK = none
-DEPS_intro = shared/pacemaker-intro.rst $(PNGS_GENERATED)
+DEPS_intro = shared/pacemaker-intro.rst \
+ $(PNGS_GENERATED)
-DEPS_Clusters_from_Scratch = $(DEPS_intro) $(PNGS_Clusters_from_Scratch)
+DEPS_Clusters_from_Scratch = $(DEPS_intro) \
+ $(PNGS_Clusters_from_Scratch)
DEPS_Pacemaker_Administration = $(DEPS_intro)
DEPS_Pacemaker_Development =
-DEPS_Pacemaker_Explained = $(DEPS_intro) $(PNGS_Pacemaker_Explained)
+DEPS_Pacemaker_Explained = $(DEPS_intro) \
+ $(PNGS_Pacemaker_Explained)
DEPS_Pacemaker_Python_API = ../../python
DEPS_Pacemaker_Remote = $(PNGS_Pacemaker_Remote)
@@ -120,6 +126,14 @@ $(BOOKS:%=%/conf.py): conf.py.in
-e 's/%BOOK_TITLE%/$(subst _, ,$(@:%/conf.py=%))/g' \
-e 's#%SRC_DIR%#$(abs_srcdir)#g' \
-e 's#%ABS_TOP_SRCDIR%#$(abs_top_srcdir)#g' \
+ -e 's#%CONFIGDIR%#@CONFIGDIR@#g' \
+ -e 's#%CRM_BLACKBOX_DIR%#@CRM_BLACKBOX_DIR@#g' \
+ -e 's#%CRM_DAEMON_GROUP%#@CRM_DAEMON_GROUP@#g' \
+ -e 's#%CRM_DAEMON_USER%#@CRM_DAEMON_USER@#g' \
+ -e 's#%CRM_LOG_DIR%#@CRM_LOG_DIR@#g' \
+ -e 's#%CRM_SCHEMA_DIRECTORY%#@CRM_SCHEMA_DIRECTORY@#g' \
+ -e 's#%PACEMAKER_CONFIG_DIR%#@PACEMAKER_CONFIG_DIR@#g' \
+ -e 's#%PCMK_GNUTLS_PRIORITIES%#@PCMK_GNUTLS_PRIORITIES@#g' \
$(<) > "$@"
$(BOOK)/_build: $(STATIC_FILES) $(BOOK)/conf.py $(DEPS_$(BOOK)) $(wildcard $(srcdir)/$(BOOK)/*.rst)
@@ -160,15 +174,21 @@ if BUILD_SPHINX_DOCS
done
@rsync $(RSYNC_OPTS) "$(builddir)/build-$(PACKAGE_SERIES).txt" \
"$(RSYNC_DEST)/$(PACKAGE)/doc"
+endif
+.PHONY: all-local
all-local:
+if BUILD_SPHINX_DOCS
@for book in $(BOOKS); do \
$(MAKE) $(AM_MAKEFLAGS) BOOK=$$book \
PAPER="$(PAPER)" SPHINXFLAGS="$(SPHINXFLAGS)" \
BOOK_FORMATS="$(BOOK_FORMATS)" $$book/_build; \
done
+endif
+.PHONY: install-data-local
install-data-local: all-local
+if BUILD_SPHINX_DOCS
$(AM_V_at)for book in $(BOOKS); do \
for format in $(BOOK_FORMATS); do \
formatdir="$$book/_build/$$format"; \
@@ -183,13 +203,17 @@ install-data-local: all-local
done; \
done; \
done
+endif
+.PHONY: uninstall-local
uninstall-local:
+if BUILD_SPHINX_DOCS
$(AM_V_at)for book in $(BOOKS); do \
rm -rf "$(DESTDIR)/$(docdir)/$$book"; \
done
endif
+.PHONY: clean-local
clean-local:
$(AM_V_at)-rm -rf \
$(BOOKS:%="$(builddir)/%/_build") \
diff --git a/doc/sphinx/Pacemaker_Administration/administrative.rst b/doc/sphinx/Pacemaker_Administration/administrative.rst
new file mode 100644
index 0000000..7c8b346
--- /dev/null
+++ b/doc/sphinx/Pacemaker_Administration/administrative.rst
@@ -0,0 +1,150 @@
+.. index::
+ single: administrative mode
+
+Administrative Modes
+--------------------
+
+Intrusive administration can be performed on a Pacemaker cluster without
+causing resource failures, recovery, and fencing, by putting the cluster or a
+subset of it into an administrative mode.
+
+Pacemaker supports several administrative modes:
+
+* Maintenance mode for the entire cluster, specific nodes, or specific
+ resources
+* Unmanaged resources
+* Disabled configuration items
+* Standby mode for specific nodes
+
+Rules may be used to automatically set any of these modes for specific times or
+other conditions.
+
+
+.. index::
+ pair: administrative mode; maintenance mode
+
+.. _maintenance_mode:
+
+Maintenance Mode
+################
+
+In maintenance mode, the cluster will not start or stop resources. Recurring
+monitors for affected resources will be paused, except those specifying
+``role`` as ``Stopped``.
+
+To put a specific resource into maintenance mode, set the resource's
+``maintenance`` meta-attribute to ``true``.
+
+To put all active resources on a specific node into maintenance mode, set the
+node's ``maintenance`` node attribute to ``true``. When enabled, this overrides
+resource-specific maintenance mode.
+
+.. warning::
+
+ Restarting Pacemaker on a node that is in single-node maintenance mode will
+ likely lead to undesirable effects. If ``maintenance`` is set as a transient
+ attribute, it will be erased when Pacemaker is stopped, which will
+ immediately take the node out of maintenance mode and likely get it fenced.
+ If set as a permanent attribute, any resources active on the node will have
+ their local history erased when Pacemaker is restarted, so the cluster will
+ no longer consider them running on the node and thus will consider them
+ managed again, allowing them to be started elsewhere.
+
+To put all resources in the cluster into maintenance mode, set the
+``maintenance-mode`` cluster option to ``true``. When enabled, this overrides
+node- or resource- specific maintenance mode.
+
+Maintenance mode, at any level, overrides other administrative modes.
+
+
+.. index::
+ pair: administrative mode; unmanaged resources
+
+.. _unmanaged_resources:
+
+Unmanaged Resources
+###################
+
+An unmanaged resource will not be started or stopped by the cluster. A resource
+may become unmanaged in several ways:
+
+* The administrator may set the ``is-managed`` resource meta-attribute to
+ ``false`` (whether for a specific resource, or all resources without an
+ explicit setting via ``rsc_defaults``)
+* :ref:`Maintenance mode <maintenance_mode>` causes affected resources to
+ become unmanaged (and overrides any ``is-managed`` setting)
+* Certain types of failure cause affected resources to become unmanaged. These
+ include:
+
+ * Failed stop operations when the ``stonith-enabled`` cluster property is set
+ to ``false``
+ * Failure of an operation that has ``on-fail`` set to ``block``
+ * A resource detected as incorrectly active on more than one node when its
+ ``multiple-active`` meta-attribute is set to ``block``
+ * A resource constrained by a revoked ``rsc_ticket`` with ``loss-policy`` set
+ to ``freeze``
+ * Resources with ``requires`` set (or defaulting) to anything other than
+ ``nothing`` in a partition that loses quorum when the ``no-quorum-policy``
+ cluster option is set to ``freeze``
+
+Recurring actions are not affected by unmanaging a resource.
+
+.. warning::
+
+ Manually starting an unmanaged resource on a different node is strongly
+ discouraged. It will at least cause the cluster to consider the resource
+ failed, and may require the resource's ``target-role`` to be set to
+ ``Stopped`` then ``Started`` in order for recovery to succeed.
+
+
+.. index::
+ pair: administrative mode; disabled configuration
+
+.. _disabled_configuration:
+
+Disabled Configuration
+######################
+
+Some configuration elements disable particular behaviors:
+
+* The ``stonith-enabled`` cluster option, when set to ``false``, disables node
+ fencing. This is highly discouraged, as it can lead to data unavailability,
+ loss, or corruption.
+
+* The ``stop-all-resources`` cluster option, when set to ``true``, causes all
+ resources to be stopped.
+
+* Certain elements support an ``enabled`` meta-attribute, which if set to
+ ``false``, causes the cluster to act as if the specific element is not
+ configured. These include ``op``, ``alert`` *(since 2.1.6)*, and
+ ``recipient`` *(since 2.1.6)*. ``enabled`` may be set for specific ``op``
+ elements, or all operations without an explicit setting via ``op_defaults``.
+
+
+.. index::
+ pair: administrative mode; standby
+
+.. _standby:
+
+Standby Mode
+############
+
+When a node is put into standby, all resources will be moved away from the
+node, and all recurring operations will be stopped on the node, except those
+specifying ``role`` as ``Stopped`` (which will be newly initiated if
+appropriate).
+
+A node may be put into standby mode by setting its ``standby`` node attribute
+to ``true``. The attribute may be queried and set using the ``crm_standby``
+tool.
+
+
+.. index::
+ pair: administrative mode; rules
+
+Rules
+#####
+
+Rules may be used to set administrative mode options automatically according to
+various criteria such as date and time. See the "Rules" chapter of the
+*Pacemaker Explained* document for details.
diff --git a/doc/sphinx/Pacemaker_Administration/alerts.rst b/doc/sphinx/Pacemaker_Administration/alerts.rst
index c0f54c6..42efc8d 100644
--- a/doc/sphinx/Pacemaker_Administration/alerts.rst
+++ b/doc/sphinx/Pacemaker_Administration/alerts.rst
@@ -287,7 +287,7 @@ Special concerns when writing alert agents:
this into consideration, for example by queueing resource-intensive actions
into some other instance, instead of directly executing them.
-* Alert agents are run as the ``hacluster`` user, which has a minimal set
+* Alert agents are run as the |CRM_DAEMON_USER| user, which has a minimal set
of permissions. If an agent requires additional privileges, it is
recommended to configure ``sudo`` to allow the agent to run the necessary
commands as another user with the appropriate privileges.
@@ -297,7 +297,7 @@ Special concerns when writing alert agents:
user-configured ``timestamp-format``), ``CRM_alert_recipient,`` and all
instance attributes. Mostly this is needed simply to protect against
configuration errors, but if some user can modify the CIB without having
- ``hacluster``-level access to the cluster nodes, it is a potential security
+ |CRM_DAEMON_USER| access to the cluster nodes, it is a potential security
concern as well, to avoid the possibility of code injection.
.. note:: **ocf:pacemaker:ClusterMon compatibility**
@@ -308,4 +308,4 @@ Special concerns when writing alert agents:
passed to alert agents are available prepended with ``CRM_notify_``
as well as ``CRM_alert_``. One break in compatibility is that ``ClusterMon``
ran external scripts as the ``root`` user, while alert agents are run as the
- ``hacluster`` user.
+ |CRM_DAEMON_USER| user.
diff --git a/doc/sphinx/Pacemaker_Administration/configuring.rst b/doc/sphinx/Pacemaker_Administration/configuring.rst
index 415dd81..295c96a 100644
--- a/doc/sphinx/Pacemaker_Administration/configuring.rst
+++ b/doc/sphinx/Pacemaker_Administration/configuring.rst
@@ -189,48 +189,53 @@ cluster even if the machine itself is not in the same cluster. To do this, one
simply sets up a number of environment variables and runs the same commands as
when working on a cluster node.
-.. table:: **Environment Variables Used to Connect to Remote Instances of the CIB**
-
- +----------------------+-----------+------------------------------------------------+
- | Environment Variable | Default | Description |
- +======================+===========+================================================+
- | CIB_user | $USER | .. index:: |
- | | | single: CIB_user |
- | | | single: environment variable; CIB_user |
- | | | |
- | | | The user to connect as. Needs to be |
- | | | part of the ``haclient`` group on |
- | | | the target host. |
- +----------------------+-----------+------------------------------------------------+
- | CIB_passwd | | .. index:: |
- | | | single: CIB_passwd |
- | | | single: environment variable; CIB_passwd |
- | | | |
- | | | The user's password. Read from the |
- | | | command line if unset. |
- +----------------------+-----------+------------------------------------------------+
- | CIB_server | localhost | .. index:: |
- | | | single: CIB_server |
- | | | single: environment variable; CIB_server |
- | | | |
- | | | The host to contact |
- +----------------------+-----------+------------------------------------------------+
- | CIB_port | | .. index:: |
- | | | single: CIB_port |
- | | | single: environment variable; CIB_port |
- | | | |
- | | | The port on which to contact the server; |
- | | | required. |
- +----------------------+-----------+------------------------------------------------+
- | CIB_encrypted | TRUE | .. index:: |
- | | | single: CIB_encrypted |
- | | | single: environment variable; CIB_encrypted |
- | | | |
- | | | Whether to encrypt network traffic |
- +----------------------+-----------+------------------------------------------------+
+.. list-table:: **Environment Variables Used to Connect to Remote Instances of the CIB**
+ :class: longtable
+ :widths: 2 2 5
+ :header-rows: 1
+
+ * - Environment Variable
+ - Default
+ - Description
+ * - .. index::
+ single: CIB_user
+ single: environment variable; CIB_user
+
+ CIB_user
+ - |CRM_DAEMON_USER_RAW|
+ - The user to connect as. Needs to be part of the |CRM_DAEMON_GROUP| group
+ on the target host.
+ * - .. index::
+ single: CIB_passwd
+ single: environment variable; CIB_passwd
+
+ CIB_passwd
+ -
+ - The user's password. Read from the command line if unset.
+ * - .. index::
+ single: CIB_server
+ single: environment variable; CIB_server
+
+ CIB_server
+ - localhost
+ - The host to contact
+ * - .. index::
+ single: CIB_port
+ single: environment variable; CIB_port
+
+ CIB_port
+ -
+ - The port on which to contact the server; required
+ * - .. index::
+ single: CIB_encrypted
+ single: environment variable; CIB_encrypted
+
+ CIB_encrypted
+ - true
+ - Whether to encrypt network traffic
So, if **c001n01** is an active cluster node and is listening on port 1234
-for connections, and **someuser** is a member of the **haclient** group,
+for connections, and **someuser** is a member of the |CRM_DAEMON_GROUP| group,
then the following would prompt for **someuser**'s password and return
the cluster's current configuration:
@@ -243,27 +248,9 @@ For security reasons, the cluster does not listen for remote connections by
default. If you wish to allow remote access, you need to set the
``remote-tls-port`` (encrypted) or ``remote-clear-port`` (unencrypted) CIB
properties (i.e., those kept in the ``cib`` tag, like ``num_updates`` and
-``epoch``).
-
-.. table:: **Extra top-level CIB properties for remote access**
-
- +----------------------+-----------+------------------------------------------------------+
- | CIB Property | Default | Description |
- +======================+===========+======================================================+
- | remote-tls-port | | .. index:: |
- | | | single: remote-tls-port |
- | | | single: CIB property; remote-tls-port |
- | | | |
- | | | Listen for encrypted remote connections |
- | | | on this port. |
- +----------------------+-----------+------------------------------------------------------+
- | remote-clear-port | | .. index:: |
- | | | single: remote-clear-port |
- | | | single: CIB property; remote-clear-port |
- | | | |
- | | | Listen for plaintext remote connections |
- | | | on this port. |
- +----------------------+-----------+------------------------------------------------------+
+``epoch``). Encrypted communication is keyless, which makes it subject to
+man-in-the-middle attacks, and thus either option should be used only on
+protected networks.
.. important::
diff --git a/doc/sphinx/Pacemaker_Administration/index.rst b/doc/sphinx/Pacemaker_Administration/index.rst
index 327ad31..af89380 100644
--- a/doc/sphinx/Pacemaker_Administration/index.rst
+++ b/doc/sphinx/Pacemaker_Administration/index.rst
@@ -22,6 +22,8 @@ Table of Contents
cluster
configuring
tools
+ administrative
+ moving
troubleshooting
upgrading
alerts
diff --git a/doc/sphinx/Pacemaker_Explained/advanced-options.rst b/doc/sphinx/Pacemaker_Administration/moving.rst
index 20ab79e..3d6a92a 100644
--- a/doc/sphinx/Pacemaker_Explained/advanced-options.rst
+++ b/doc/sphinx/Pacemaker_Administration/moving.rst
@@ -1,171 +1,11 @@
-Advanced Configuration
-----------------------
-
-.. index::
- single: start-delay; operation attribute
- single: interval-origin; operation attribute
- single: interval; interval-origin
- single: operation; interval-origin
- single: operation; start-delay
-
-Specifying When Recurring Actions are Performed
-###############################################
-
-By default, recurring actions are scheduled relative to when the resource
-started. In some cases, you might prefer that a recurring action start relative
-to a specific date and time. For example, you might schedule an in-depth
-monitor to run once every 24 hours, and want it to run outside business hours.
-
-To do this, set the operation's ``interval-origin``. The cluster uses this point
-to calculate the correct ``start-delay`` such that the operation will occur
-at ``interval-origin`` plus a multiple of the operation interval.
-
-For example, if the recurring operation's interval is 24h, its
-``interval-origin`` is set to 02:00, and it is currently 14:32, then the
-cluster would initiate the operation after 11 hours and 28 minutes.
-
-The value specified for ``interval`` and ``interval-origin`` can be any
-date/time conforming to the
-`ISO8601 standard <https://en.wikipedia.org/wiki/ISO_8601>`_. By way of
-example, to specify an operation that would run on the first Monday of
-2021 and every Monday after that, you would add:
-
-.. topic:: Example recurring action that runs relative to base date/time
-
- .. code-block:: xml
-
- <op id="intensive-monitor" name="monitor" interval="P7D" interval-origin="2021-W01-1"/>
-
-.. index::
- single: resource; failure recovery
- single: operation; failure recovery
-
-.. _failure-handling:
-
-Handling Resource Failure
-#########################
-
-By default, Pacemaker will attempt to recover failed resources by restarting
-them. However, failure recovery is highly configurable.
-
-.. index::
- single: resource; failure count
- single: operation; failure count
-
-Failure Counts
-______________
-
-Pacemaker tracks resource failures for each combination of node, resource, and
-operation (start, stop, monitor, etc.).
-
-You can query the fail count for a particular node, resource, and/or operation
-using the ``crm_failcount`` command. For example, to see how many times the
-10-second monitor for ``myrsc`` has failed on ``node1``, run:
-
-.. code-block:: none
-
- # crm_failcount --query -r myrsc -N node1 -n monitor -I 10s
-
-If you omit the node, ``crm_failcount`` will use the local node. If you omit
-the operation and interval, ``crm_failcount`` will display the sum of the fail
-counts for all operations on the resource.
-
-You can use ``crm_resource --cleanup`` or ``crm_failcount --delete`` to clear
-fail counts. For example, to clear the above monitor failures, run:
-
-.. code-block:: none
-
- # crm_resource --cleanup -r myrsc -N node1 -n monitor -I 10s
-
-If you omit the resource, ``crm_resource --cleanup`` will clear failures for
-all resources. If you omit the node, it will clear failures on all nodes. If
-you omit the operation and interval, it will clear the failures for all
-operations on the resource.
-
-.. note::
-
- Even when cleaning up only a single operation, all failed operations will
- disappear from the status display. This allows us to trigger a re-check of
- the resource's current status.
-
-Higher-level tools may provide other commands for querying and clearing
-fail counts.
-
-The ``crm_mon`` tool shows the current cluster status, including any failed
-operations. To see the current fail counts for any failed resources, call
-``crm_mon`` with the ``--failcounts`` option. This shows the fail counts per
-resource (that is, the sum of any operation fail counts for the resource).
-
-.. index::
- single: migration-threshold; resource meta-attribute
- single: resource; migration-threshold
-
-Failure Response
-________________
-
-Normally, if a running resource fails, pacemaker will try to stop it and start
-it again. Pacemaker will choose the best location to start it each time, which
-may be the same node that it failed on.
-
-However, if a resource fails repeatedly, it is possible that there is an
-underlying problem on that node, and you might desire trying a different node
-in such a case. Pacemaker allows you to set your preference via the
-``migration-threshold`` resource meta-attribute. [#]_
-
-If you define ``migration-threshold`` to *N* for a resource, it will be banned
-from the original node after *N* failures there.
-
-.. note::
-
- The ``migration-threshold`` is per *resource*, even though fail counts are
- tracked per *operation*. The operation fail counts are added together
- to compare against the ``migration-threshold``.
-
-By default, fail counts remain until manually cleared by an administrator
-using ``crm_resource --cleanup`` or ``crm_failcount --delete`` (hopefully after
-first fixing the failure's cause). It is possible to have fail counts expire
-automatically by setting the ``failure-timeout`` resource meta-attribute.
-
-.. important::
-
- A successful operation does not clear past failures. If a recurring monitor
- operation fails once, succeeds many times, then fails again days later, its
- fail count is 2. Fail counts are cleared only by manual intervention or
- failure timeout.
-
-For example, setting ``migration-threshold`` to 2 and ``failure-timeout`` to
-``60s`` would cause the resource to move to a new node after 2 failures, and
-allow it to move back (depending on stickiness and constraint scores) after one
-minute.
-
-.. note::
-
- ``failure-timeout`` is measured since the most recent failure. That is, older
- failures do not individually time out and lower the fail count. Instead, all
- failures are timed out simultaneously (and the fail count is reset to 0) if
- there is no new failure for the timeout period.
-
-There are two exceptions to the migration threshold: when a resource either
-fails to start or fails to stop.
-
-If the cluster property ``start-failure-is-fatal`` is set to ``true`` (which is
-the default), start failures cause the fail count to be set to ``INFINITY`` and
-thus always cause the resource to move immediately.
-
-Stop failures are slightly different and crucial. If a resource fails to stop
-and fencing is enabled, then the cluster will fence the node in order to be
-able to start the resource elsewhere. If fencing is disabled, then the cluster
-has no way to continue and will not try to start the resource elsewhere, but
-will try to stop it again after any failure timeout or clearing.
+Moving Resources
+----------------
.. index::
single: resource; move
-Moving Resources
-################
-
Moving Resources Manually
-_________________________
+#########################
There are primarily two occasions when you would want to move a resource from
its current location: when the whole node is under maintenance, and when a
@@ -176,7 +16,7 @@ single resource needs to be moved.
single: node; standby mode
Standby Mode
-~~~~~~~~~~~~
+____________
Since everything eventually comes down to a score, you could create constraints
for every resource to prevent them from running on one node. While Pacemaker
@@ -215,7 +55,7 @@ A cluster node in standby mode will not run resources, but still contributes to
quorum, and may fence or be fenced by nodes.
Moving One Resource
-~~~~~~~~~~~~~~~~~~~
+___________________
When only one resource is required to move, we could do this by creating
location constraints. However, once again we provide a user-friendly shortcut
@@ -281,9 +121,10 @@ constraint will prevent the resource from running on that node until
cluster node is no longer available!
In some cases, such as when ``resource-stickiness`` is set to ``INFINITY``, it
-is possible that you will end up with the problem described in
-:ref:`node-score-equal`. The tool can detect some of these cases and deals with
-them by creating both positive and negative constraints. For example:
+is possible that you will end up with nodes with the same score, forcing the
+cluster to choose one (which may not be the one you want). The tool can detect
+some of these cases and deals with them by creating both positive and negative
+constraints. For example:
.. code-block:: xml
@@ -293,7 +134,7 @@ them by creating both positive and negative constraints. For example:
which has the same long-term consequences as discussed earlier.
Moving Resources Due to Connectivity Changes
-____________________________________________
+############################################
You can configure the cluster to move resources when external connectivity is
lost in two steps.
@@ -303,7 +144,7 @@ lost in two steps.
single: ping resource
Tell Pacemaker to Monitor Connectivity
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+______________________________________
First, add an ``ocf:pacemaker:ping`` resource to the cluster. The ``ping``
resource uses the system utility of the same name to a test whether a list of
@@ -372,12 +213,12 @@ with a description of the most interesting parameters.
deal with the connectivity status that ``ocf:pacemaker:ping`` is recording.
Tell Pacemaker How to Interpret the Connectivity Data
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+_____________________________________________________
.. important::
- Before attempting the following, make sure you understand
- :ref:`rules`.
+ Before attempting the following, make sure you understand rules. See the
+ "Rules" chapter of the *Pacemaker Explained* document for details.
There are a number of ways to use the connectivity data.
@@ -462,125 +303,3 @@ nodes provided they have connectivity to at least three (again assuming that
<expression id="ping-prefer" attribute="pingd" operation="defined"/>
</rule>
</rsc_location>
-
-
-.. _live-migration:
-
-Migrating Resources
-___________________
-
-Normally, when the cluster needs to move a resource, it fully restarts the
-resource (that is, it stops the resource on the current node and starts it on
-the new node).
-
-However, some types of resources, such as many virtual machines, are able to
-move to another location without loss of state (often referred to as live
-migration or hot migration). In pacemaker, this is called resource migration.
-Pacemaker can be configured to migrate a resource when moving it, rather than
-restarting it.
-
-Not all resources are able to migrate; see the
-:ref:`migration checklist <migration_checklist>` below. Even those that can,
-won't do so in all situations. Conceptually, there are two requirements from
-which the other prerequisites follow:
-
-* The resource must be active and healthy at the old location; and
-* everything required for the resource to run must be available on both the old
- and new locations.
-
-The cluster is able to accommodate both *push* and *pull* migration models by
-requiring the resource agent to support two special actions: ``migrate_to``
-(performed on the current location) and ``migrate_from`` (performed on the
-destination).
-
-In push migration, the process on the current location transfers the resource
-to the new location where is it later activated. In this scenario, most of the
-work would be done in the ``migrate_to`` action and, if anything, the
-activation would occur during ``migrate_from``.
-
-Conversely for pull, the ``migrate_to`` action is practically empty and
-``migrate_from`` does most of the work, extracting the relevant resource state
-from the old location and activating it.
-
-There is no wrong or right way for a resource agent to implement migration, as
-long as it works.
-
-.. _migration_checklist:
-
-.. topic:: Migration Checklist
-
- * The resource may not be a clone.
- * The resource agent standard must be OCF.
- * The resource must not be in a failed or degraded state.
- * The resource agent must support ``migrate_to`` and ``migrate_from``
- actions, and advertise them in its meta-data.
- * The resource must have the ``allow-migrate`` meta-attribute set to
- ``true`` (which is not the default).
-
-If an otherwise migratable resource depends on another resource via an ordering
-constraint, there are special situations in which it will be restarted rather
-than migrated.
-
-For example, if the resource depends on a clone, and at the time the resource
-needs to be moved, the clone has instances that are stopping and instances that
-are starting, then the resource will be restarted. The scheduler is not yet
-able to model this situation correctly and so takes the safer (if less optimal)
-path.
-
-Also, if a migratable resource depends on a non-migratable resource, and both
-need to be moved, the migratable resource will be restarted.
-
-
-.. index::
- single: reload
- single: reload-agent
-
-Reloading an Agent After a Definition Change
-############################################
-
-The cluster automatically detects changes to the configuration of active
-resources. The cluster's normal response is to stop the service (using the old
-definition) and start it again (with the new definition). This works, but some
-resource agents are smarter and can be told to use a new set of options without
-restarting.
-
-To take advantage of this capability, the resource agent must:
-
-* Implement the ``reload-agent`` action. What it should do depends completely
- on your application!
-
- .. note::
-
- Resource agents may also implement a ``reload`` action to make the managed
- service reload its own *native* configuration. This is different from
- ``reload-agent``, which makes effective changes in the resource's
- *Pacemaker* configuration (specifically, the values of the agent's
- reloadable parameters).
-
-* Advertise the ``reload-agent`` operation in the ``actions`` section of its
- meta-data.
-
-* Set the ``reloadable`` attribute to 1 in the ``parameters`` section of
- its meta-data for any parameters eligible to be reloaded after a change.
-
-Once these requirements are satisfied, the cluster will automatically know to
-reload the resource (instead of restarting) when a reloadable parameter
-changes.
-
-.. note::
-
- Metadata will not be re-read unless the resource needs to be started. If you
- edit the agent of an already active resource to set a parameter reloadable,
- the resource may restart the first time the parameter value changes.
-
-.. note::
-
- If both a reloadable and non-reloadable parameter are changed
- simultaneously, the resource will be restarted.
-
-.. rubric:: Footnotes
-
-.. [#] The naming of this option was perhaps unfortunate as it is easily
- confused with live migration, the process of moving a resource from one
- node to another without stopping it. Xen virtual guests are the most
- common example of resources that can be migrated in this manner.
diff --git a/doc/sphinx/Pacemaker_Administration/pcs-crmsh.rst b/doc/sphinx/Pacemaker_Administration/pcs-crmsh.rst
index 61ab4e6..3eda60a 100644
--- a/doc/sphinx/Pacemaker_Administration/pcs-crmsh.rst
+++ b/doc/sphinx/Pacemaker_Administration/pcs-crmsh.rst
@@ -118,14 +118,11 @@ Manage Resources
.. topic:: Create a Resource
.. code-block:: none
-
- crmsh # crm configure primitive ClusterIP ocf:heartbeat:IPaddr2 \
- params ip=192.168.122.120 cidr_netmask=24 \
- op monitor interval=30s
+ crmsh # crm configure primitive ClusterIP IPaddr2 params ip=192.168.122.120 cidr_netmask=24
pcs # pcs resource create ClusterIP IPaddr2 ip=192.168.122.120 cidr_netmask=24
-pcs determines the standard and provider (``ocf:heartbeat``) automatically
-since ``IPaddr2`` is unique, and automatically creates operations (including
+Both crmsh and pcs determine the standard and provider (``ocf:heartbeat``) automatically
+since ``IPaddr2`` is unique, and automatically create operations (including
monitor) based on the agent's meta-data.
.. topic:: Show Configuration of All Resources
@@ -270,6 +267,10 @@ edited and verified before committing to the live configuration:
crmsh # crm configure ms WebDataClone WebData \
meta master-max=1 master-node-max=1 \
clone-max=2 clone-node-max=1 notify=true
+ crmsh # crm configure clone WebDataClone WebData \
+ meta promotable=true \
+ promoted-max=1 promoted-node-max=1 \
+ clone-max=2 clone-node-max=1 notify=true
pcs-0.9 # pcs resource master WebDataClone WebData \
master-max=1 master-node-max=1 \
clone-max=2 clone-node-max=1 notify=true
@@ -277,6 +278,7 @@ edited and verified before committing to the live configuration:
promoted-max=1 promoted-node-max=1 \
clone-max=2 clone-node-max=1 notify=true
+crmsh supports both ways ('configure ms' is deprecated) to configure promotable clone since crmsh 4.4.0.
pcs will generate the clone name automatically if it is omitted from the
command line.
diff --git a/doc/sphinx/Pacemaker_Development/c.rst b/doc/sphinx/Pacemaker_Development/c.rst
index 66ce3b2..b03ddae 100644
--- a/doc/sphinx/Pacemaker_Development/c.rst
+++ b/doc/sphinx/Pacemaker_Development/c.rst
@@ -225,8 +225,8 @@ a ``GHashTable *`` member, the argument should be marked as ``[in,out]`` if the
function inserts data into the table, even if the struct members themselves are
not changed. However, an argument is not ``[in,out]`` if something reachable
via the argument is modified via a separate argument. For example, both
-``pe_resource_t`` and ``pe_node_t`` contain pointers to their
-``pe_working_set_t`` and thus indirectly to each other, but if the function
+``pcmk_resource_t`` and ``pcmk_node_t`` contain pointers to their
+``pcmk_scheduler_t`` and thus indirectly to each other, but if the function
modifies the resource via the resource argument, the node argument does not
have to be ``[in,out]``.
@@ -745,10 +745,20 @@ readability and logging consistency.
Functions
#########
+Function Naming
+_______________
+
Function names should be unique across the entire project, to allow for
individual tracing via ``PCMK_trace_functions``, and make it easier to search
code and follow detail logs.
+A common function signature is a comparison function that returns 0 if its
+arguments are equal for sorting purposes, -1 if the first argument should sort
+first, and 1 is the second argument should sort first. Such a function should
+have ``cmp`` in its name, to parallel ``strcmp()``; ``sort`` should only be
+used in the names of functions that sort an entire list (typically using a
+``cmp`` function).
+
Function Definitions
____________________
diff --git a/doc/sphinx/Pacemaker_Development/components.rst b/doc/sphinx/Pacemaker_Development/components.rst
index e14df26..5086fa8 100644
--- a/doc/sphinx/Pacemaker_Development/components.rst
+++ b/doc/sphinx/Pacemaker_Development/components.rst
@@ -301,7 +301,7 @@ directly. This allows them to run using a ``CIB_file`` without the cluster
needing to be active.
The main entry point for the scheduler code is
-``lib/pacemaker/pcmk_sched_allocate.c:pcmk__schedule_actions()``. It sets
+``lib/pacemaker/pcmk_scheduler.c:pcmk__schedule_actions()``. It sets
defaults and calls a series of functions for the scheduling. Some key steps:
* ``unpack_cib()`` parses most of the CIB XML into data structures, and
@@ -315,7 +315,7 @@ defaults and calls a series of functions for the scheduling. Some key steps:
the CIB status section. This is used to decide whether certain
actions need to be done, such as deleting orphan resources, forcing a restart
when a resource definition changes, etc.
-* ``allocate_resources()`` assigns resources to nodes.
+* ``assign_resources()`` assigns resources to nodes.
* ``schedule_resource_actions()`` schedules resource-specific actions (which
might or might not end up in the final graph).
* ``pcmk__apply_orderings()`` processes ordering constraints in order to modify
@@ -335,7 +335,7 @@ Working with the scheduler is difficult. Challenges include:
* It produces an insane amount of log messages at debug and trace levels.
You can put resource ID(s) in the ``PCMK_trace_tags`` environment variable to
enable trace-level messages only when related to specific resources.
-* Different parts of the main ``pe_working_set_t`` structure are finalized at
+* Different parts of the main ``pcmk_scheduler_t`` structure are finalized at
different points in the scheduling process, so you have to keep in mind
whether information you're using at one point of the code can possibly change
later. For example, data unpacked from the CIB can safely be used anytime
@@ -347,24 +347,24 @@ Working with the scheduler is difficult. Challenges include:
.. index::
- single: pe_working_set_t
+ single: pcmk_scheduler_t
Cluster Working Set
___________________
-The main data object for the scheduler is ``pe_working_set_t``, which contains
+The main data object for the scheduler is ``pcmk_scheduler_t``, which contains
all information needed about nodes, resources, constraints, etc., both as the
raw CIB XML and parsed into more usable data structures, plus the resulting
-transition graph XML. The variable name is usually ``data_set``.
+transition graph XML. The variable name is usually ``scheduler``.
.. index::
- single: pe_resource_t
+ single: pcmk_resource_t
Resources
_________
-``pe_resource_t`` is the data object representing cluster resources. A resource
-has a variant: primitive (a.k.a. native), group, clone, or bundle.
+``pcmk_resource_t`` is the data object representing cluster resources. A
+resource has a variant: primitive (a.k.a. native), group, clone, or bundle.
The resource object has members for two sets of methods,
``resource_object_functions_t`` from the ``libpe_status`` public API, and
@@ -374,45 +374,45 @@ The resource object has members for two sets of methods,
The object functions have basic capabilities such as unpacking the resource
XML, and determining the current or planned location of the resource.
-The allocation functions have more obscure capabilities needed for scheduling,
+The assignment functions have more obscure capabilities needed for scheduling,
such as processing location and ordering constraints. For example,
``pcmk__create_internal_constraints()`` simply calls the
``internal_constraints()`` method for each top-level resource in the cluster.
.. index::
- single: pe_node_t
+ single: pcmk_node_t
Nodes
_____
-Allocation of resources to nodes is done by choosing the node with the highest
+Assignment of resources to nodes is done by choosing the node with the highest
score for a given resource. The scheduler does a bunch of processing to
-generate the scores, then the actual allocation is straightforward.
+generate the scores, then the actual assignment is straightforward.
-Node lists are frequently used. For example, ``pe_working_set_t`` has a
+Node lists are frequently used. For example, ``pcmk_scheduler_t`` has a
``nodes`` member which is a list of all nodes in the cluster, and
-``pe_resource_t`` has a ``running_on`` member which is a list of all nodes on
-which the resource is (or might be) active. These are lists of ``pe_node_t``
+``pcmk_resource_t`` has a ``running_on`` member which is a list of all nodes on
+which the resource is (or might be) active. These are lists of ``pcmk_node_t``
objects.
-The ``pe_node_t`` object contains a ``struct pe_node_shared_s *details`` member
-with all node information that is independent of resource allocation (the node
-name, etc.).
+The ``pcmk_node_t`` object contains a ``struct pe_node_shared_s *details``
+member with all node information that is independent of resource assignment
+(the node name, etc.).
The working set's ``nodes`` member contains the original of this information.
-All other node lists contain copies of ``pe_node_t`` where only the ``details``
-member points to the originals in the working set's ``nodes`` list. In this
-way, the other members of ``pe_node_t`` (such as ``weight``, which is the node
-score) may vary by node list, while the common details are shared.
+All other node lists contain copies of ``pcmk_node_t`` where only the
+``details`` member points to the originals in the working set's ``nodes`` list.
+In this way, the other members of ``pcmk_node_t`` (such as ``weight``, which is
+the node score) may vary by node list, while the common details are shared.
.. index::
- single: pe_action_t
+ single: pcmk_action_t
single: pe_action_flags
Actions
_______
-``pe_action_t`` is the data object representing actions that might need to be
+``pcmk_action_t`` is the data object representing actions that might need to be
taken. These could be resource actions, cluster-wide actions such as fencing a
node, or "pseudo-actions" which are abstractions used as convenient points for
ordering other actions against.
@@ -443,7 +443,7 @@ Colocation constraints come into play in these parts of the scheduler code:
* When choosing roles for promotable clone instances, so colocations involving
a specific role can affect which instances are promoted
-The resource allocation functions have several methods related to colocations:
+The resource assignment functions have several methods related to colocations:
* ``apply_coloc_score():`` This applies a colocation's score to either the
dependent's allowed node scores (if called while resources are being
diff --git a/doc/sphinx/Pacemaker_Development/helpers.rst b/doc/sphinx/Pacemaker_Development/helpers.rst
index 3fcb48d..6bd1926 100644
--- a/doc/sphinx/Pacemaker_Development/helpers.rst
+++ b/doc/sphinx/Pacemaker_Development/helpers.rst
@@ -476,14 +476,13 @@ The Pacemaker build process uses ``lcov`` and special make targets to generate
an HTML coverage report that can be inspected with any web browser.
To start, you'll need to install the ``lcov`` package which is included in most
-distributions. Next, reconfigure and rebuild the source tree:
+distributions. Next, reconfigure the source tree:
.. code-block:: none
$ ./configure --with-coverage
- $ make
-Then simply run ``make coverage``. This will do the same thing as ``make check``,
+Then run ``make -C devel coverage``. This will do the same thing as ``make check``,
but will generate a bunch of intermediate files as part of the compiler's output.
Essentially, the coverage tools run all the unit tests and make a note if a given
line if code is executed as a part of some test program. This will include not
diff --git a/doc/sphinx/Pacemaker_Explained/acls.rst b/doc/sphinx/Pacemaker_Explained/acls.rst
index 67d5d15..c3de39d 100644
--- a/doc/sphinx/Pacemaker_Explained/acls.rst
+++ b/doc/sphinx/Pacemaker_Explained/acls.rst
@@ -6,9 +6,9 @@
Access Control Lists (ACLs)
---------------------------
-By default, the ``root`` user or any user in the ``haclient`` group can modify
-Pacemaker's CIB without restriction. Pacemaker offers *access control lists
-(ACLs)* to provide more fine-grained authorization.
+By default, the ``root`` user or any user in the |CRM_DAEMON_GROUP| group can
+modify Pacemaker's CIB without restriction. Pacemaker offers *access control
+lists (ACLs)* to provide more fine-grained authorization.
.. important::
@@ -24,7 +24,7 @@ In order to use ACLs:
* The ``enable-acl`` :ref:`cluster option <cluster_options>` must be set to
true.
-* Desired users must have user accounts in the ``haclient`` group on all
+* Desired users must have user accounts in the |CRM_DAEMON_GROUP| group on all
cluster nodes in the cluster.
* If your CIB was created before Pacemaker 1.1.12, it might need to be updated
@@ -275,9 +275,9 @@ elements.
.. important::
- The ``root`` and ``hacluster`` user accounts always have full access to the
- CIB, regardless of ACLs. For all other user accounts, when ``enable-acl`` is
- true, permission to all parts of the CIB is denied by default (permissions
+ The ``root`` and |CRM_DAEMON_USER| user accounts always have full access to
+ the CIB, regardless of ACLs. For all other user accounts, when ``enable-acl``
+ is true, permission to all parts of the CIB is denied by default (permissions
must be explicitly granted).
ACL Examples
@@ -436,8 +436,8 @@ the CIB, such as ``crm_attribute`` when managing permanent node attributes,
``crm_mon``, and ``cibadmin``.
However, command-line tools that communicate directly with Pacemaker daemons
-via IPC are not affected by ACLs. For example, users in the ``haclient`` group
-may still do the following, regardless of ACLs:
+via IPC are not affected by ACLs. For example, users in the |CRM_DAEMON_GROUP|
+group may still do the following, regardless of ACLs:
* Query transient node attribute values using ``crm_attribute`` and
``attrd_updater``.
diff --git a/doc/sphinx/Pacemaker_Explained/cluster-options.rst b/doc/sphinx/Pacemaker_Explained/cluster-options.rst
new file mode 100644
index 0000000..77bd7e6
--- /dev/null
+++ b/doc/sphinx/Pacemaker_Explained/cluster-options.rst
@@ -0,0 +1,921 @@
+Cluster-Wide Configuration
+--------------------------
+
+.. index::
+ pair: XML element; cib
+ pair: XML element; configuration
+
+Configuration Layout
+####################
+
+The cluster is defined by the Cluster Information Base (CIB), which uses XML
+notation. The simplest CIB, an empty one, looks like this:
+
+.. topic:: An empty configuration
+
+ .. code-block:: xml
+
+ <cib crm_feature_set="3.6.0" validate-with="pacemaker-3.5" epoch="1" num_updates="0" admin_epoch="0">
+ <configuration>
+ <crm_config/>
+ <nodes/>
+ <resources/>
+ <constraints/>
+ </configuration>
+ <status/>
+ </cib>
+
+The empty configuration above contains the major sections that make up a CIB:
+
+* ``cib``: The entire CIB is enclosed with a ``cib`` element. Certain
+ fundamental settings are defined as attributes of this element.
+
+ * ``configuration``: This section -- the primary focus of this document --
+ contains traditional configuration information such as what resources the
+ cluster serves and the relationships among them.
+
+ * ``crm_config``: cluster-wide configuration options
+
+ * ``nodes``: the machines that host the cluster
+
+ * ``resources``: the services run by the cluster
+
+ * ``constraints``: indications of how resources should be placed
+
+ * ``status``: This section contains the history of each resource on each
+ node. Based on this data, the cluster can construct the complete current
+ state of the cluster. The authoritative source for this section is the
+ local executor (pacemaker-execd process) on each cluster node, and the
+ cluster will occasionally repopulate the entire section. For this reason,
+ it is never written to disk, and administrators are advised against
+ modifying it in any way.
+
+In this document, configuration settings will be described as properties or
+options based on how they are defined in the CIB:
+
+* Properties are XML attributes of an XML element.
+
+* Options are name-value pairs expressed as ``nvpair`` child elements of an XML
+ element.
+
+Normally, you will use command-line tools that abstract the XML, so the
+distinction will be unimportant; both properties and options are cluster
+settings you can tweak.
+
+Configuration Value Types
+#########################
+
+Throughout this document, configuration values will be designated as having one
+of the following types:
+
+.. list-table:: **Configuration Value Types**
+ :class: longtable
+ :widths: 1 3
+ :header-rows: 1
+
+ * - Type
+ - Description
+ * - .. _boolean:
+
+ .. index::
+ pair: type; boolean
+
+ boolean
+ - Case-insensitive text value where ``1``, ``yes``, ``y``, ``on``,
+ and ``true`` evaluate as true and ``0``, ``no``, ``n``, ``off``,
+ ``false``, and unset evaluate as false
+ * - .. _date_time:
+
+ .. index::
+ pair: type; date/time
+
+ date/time
+ - Textual timestamp like ``Sat Dec 21 11:47:45 2013``
+ * - .. _duration:
+
+ .. index::
+ pair: type; duration
+
+ duration
+ - A time duration, specified either like a :ref:`timeout <timeout>` or an
+ `ISO 8601 duration <https://en.wikipedia.org/wiki/ISO_8601#Durations>`_.
+ A duration may be up to approximately 49 days but is intended for much
+ smaller time periods.
+ * - .. _enumeration:
+
+ .. index::
+ pair: type; enumeration
+
+ enumeration
+ - Text that must be one of a set of defined values (which will be listed
+ in the description)
+ * - .. _integer:
+
+ .. index::
+ pair: type; integer
+
+ integer
+ - 32-bit signed integer value (-2,147,483,648 to 2,147,483,647)
+ * - .. _nonnegative_integer:
+
+ .. index::
+ pair: type; nonnegative integer
+
+ nonnegative integer
+ - 32-bit nonnegative integer value (0 to 2,147,483,647)
+ * - .. _port:
+
+ .. index::
+ pair: type; port
+
+ port
+ - Integer TCP port number (0 to 65535)
+ * - .. _score:
+
+ .. index::
+ pair: type; score
+
+ score
+ - A Pacemaker score can be an integer between -1,000,000 and 1,000,000, or
+ a string alias: ``INFINITY`` or ``+INFINITY`` is equivalent to
+ 1,000,000, ``-INFINITY`` is equivalent to -1,000,000, and ``red``,
+ ``yellow``, and ``green`` are equivalent to integers as described in
+ :ref:`node-health`.
+ * - .. _text:
+
+ .. index::
+ pair: type; text
+
+ text
+ - A text string
+ * - .. _timeout:
+
+ .. index::
+ pair: type; timeout
+
+ timeout
+ - A time duration, specified as a bare number (in which case it is
+ considered to be in seconds) or a number with a unit (``ms`` or ``msec``
+ for milliseconds, ``us`` or ``usec`` for microseconds, ``s`` or ``sec``
+ for seconds, ``m`` or ``min`` for minutes, ``h`` or ``hr`` for hours)
+ optionally with whitespace before and/or after the number.
+ * - .. _version:
+
+ .. index::
+ pair: type; version
+
+ version
+ - Version number (any combination of alphanumeric characters, dots, and
+ dashes, starting with a number).
+
+
+Scores
+______
+
+Scores are integral to how Pacemaker works. Practically everything from moving
+a resource to deciding which resource to stop in a degraded cluster is achieved
+by manipulating scores in some way.
+
+Scores are calculated per resource and node. Any node with a negative score for
+a resource can't run that resource. The cluster places a resource on the node
+with the highest score for it.
+
+Score addition and subtraction follow these rules:
+
+* Any value (including ``INFINITY``) - ``INFINITY`` = ``-INFINITY``
+* ``INFINITY`` + any value other than ``-INFINITY`` = ``INFINITY``
+
+.. note::
+
+ What if you want to use a score higher than 1,000,000? Typically this possibility
+ arises when someone wants to base the score on some external metric that might
+ go above 1,000,000.
+
+ The short answer is you can't.
+
+ The long answer is it is sometimes possible work around this limitation
+ creatively. You may be able to set the score to some computed value based on
+ the external metric rather than use the metric directly. For nodes, you can
+ store the metric as a node attribute, and query the attribute when computing
+ the score (possibly as part of a custom resource agent).
+
+
+CIB Properties
+##############
+
+Certain settings are defined by CIB properties (that is, attributes of the
+``cib`` tag) rather than with the rest of the cluster configuration in the
+``configuration`` section.
+
+The reason is simply a matter of parsing. These options are used by the
+configuration database which is, by design, mostly ignorant of the content it
+holds. So the decision was made to place them in an easy-to-find location.
+
+.. list-table:: **CIB Properties**
+ :class: longtable
+ :widths: 2 2 2 5
+ :header-rows: 1
+
+ * - Name
+ - Type
+ - Default
+ - Description
+ * - .. _admin_epoch:
+
+ .. index::
+ pair: admin_epoch; cib
+
+ admin_epoch
+ - :ref:`nonnegative integer <nonnegative_integer>`
+ - 0
+ - When a node joins the cluster, the cluster asks the node with the
+ highest (``admin_epoch``, ``epoch``, ``num_updates``) tuple to replace
+ the configuration on all the nodes -- which makes setting them correctly
+ very important. ``admin_epoch`` is never modified by the cluster; you
+ can use this to make the configurations on any inactive nodes obsolete.
+ * - .. _epoch:
+
+ .. index::
+ pair: epoch; cib
+
+ epoch
+ - :ref:`nonnegative integer <nonnegative_integer>`
+ - 0
+ - The cluster increments this every time the CIB's configuration section
+ is updated.
+ * - .. _num_updates:
+
+ .. index::
+ pair: num_updates; cib
+
+ num_updates
+ - :ref:`nonnegative integer <nonnegative_integer>`
+ - 0
+ - The cluster increments this every time the CIB's configuration or status
+ sections are updated, and resets it to 0 when epoch changes.
+ * - .. _validate_with:
+
+ .. index::
+ pair: validate-with; cib
+
+ validate-with
+ - :ref:`enumeration <enumeration>`
+ -
+ - Determines the type of XML validation that will be done on the
+ configuration. Allowed values are ``none`` (in which case the cluster
+ will not require that updates conform to expected syntax) and the base
+ names of schema files installed on the local machine (for example,
+ "pacemaker-3.9")
+ * - .. _remote_tls_port:
+
+ .. index::
+ pair: remote-tls-port; cib
+
+ remote-tls-port
+ - :ref:`port <port>`
+ -
+ - If set, the CIB manager will listen for anonymously encrypted remote
+ connections on this port, to allow CIB administration from hosts not in
+ the cluster. No key is used, so this should be used only on a protected
+ network where man-in-the-middle attacks can be avoided.
+ * - .. _remote_clear_port:
+
+ .. index::
+ pair: remote-clear-port; cib
+
+ remote-clear-port
+ - :ref:`port <port>`
+ -
+ - If set to a TCP port number, the CIB manager will listen for remote
+ connections on this port, to allow for CIB administration from hosts not
+ in the cluster. No encryption is used, so this should be used only on a
+ protected network.
+ * - .. _cib_last_written:
+
+ .. index::
+ pair: cib-last-written; cib
+
+ cib-last-written
+ - :ref:`date/time <date_time>`
+ -
+ - Indicates when the configuration was last written to disk. Maintained by
+ the cluster; for informational purposes only.
+ * - .. _have_quorum:
+
+ .. index::
+ pair: have-quorum; cib
+
+ have-quorum
+ - :ref:`boolean <boolean>`
+ -
+ - Indicates whether the cluster has quorum. If false, the cluster's
+ response is determined by ``no-quorum-policy`` (see below). Maintained
+ by the cluster.
+ * - .. _dc_uuid:
+
+ .. index::
+ pair: dc-uuid; cib
+
+ dc-uuid
+ - :ref:`text <text>`
+ -
+ - Node ID of the cluster's current designated controller (DC). Used and
+ maintained by the cluster.
+
+
+.. _cluster_options:
+
+Cluster Options
+###############
+
+Cluster options, as you might expect, control how the cluster behaves when
+confronted with various situations.
+
+They are grouped into sets within the ``crm_config`` section. In advanced
+configurations, there may be more than one set. (This will be described later
+in the chapter on :ref:`rules` where we will show how to have the cluster use
+different sets of options during working hours than during weekends.) For now,
+we will describe the simple case where each option is present at most once.
+
+You can obtain an up-to-date list of cluster options, including their default
+values, by running the ``man pacemaker-schedulerd`` and
+``man pacemaker-controld`` commands.
+
+.. list-table:: **Cluster Options**
+ :class: longtable
+ :widths: 2 2 2 5
+ :header-rows: 1
+
+ * - Name
+ - Type
+ - Default
+ - Description
+ * - .. _cluster_name:
+
+ .. index::
+ pair: cluster option; cluster-name
+
+ cluster-name
+ - :ref:`text <text>`
+ -
+ - An (optional) name for the cluster as a whole. This is mostly for users'
+ convenience for use as desired in administration, but can be used in the
+ Pacemaker configuration in :ref:`rules` (as the ``#cluster-name``
+ :ref:`node attribute <node-attribute-expressions-special>`). It may also
+ be used by higher-level tools when displaying cluster information, and
+ by certain resource agents (for example, the ``ocf:heartbeat:GFS2``
+ agent stores the cluster name in filesystem meta-data).
+ * - .. _dc_version:
+
+ .. index::
+ pair: cluster option; dc-version
+
+ dc-version
+ - :ref:`version <version>`
+ - *detected*
+ - Version of Pacemaker on the cluster's designated controller (DC).
+ Maintained by the cluster, and intended for diagnostic purposes.
+ * - .. _cluster_infrastructure:
+
+ .. index::
+ pair: cluster option; cluster-infrastructure
+
+ cluster-infrastructure
+ - :ref:`text <text>`
+ - *detected*
+ - The messaging layer with which Pacemaker is currently running.
+ Maintained by the cluster, and intended for informational and diagnostic
+ purposes.
+ * - .. _no_quorum_policy:
+
+ .. index::
+ pair: cluster option; no-quorum-policy
+
+ no-quorum-policy
+ - :ref:`enumeration <enumeration>`
+ - stop
+ - What to do when the cluster does not have quorum. Allowed values:
+
+ * ``ignore:`` continue all resource management
+ * ``freeze:`` continue resource management, but don't recover resources
+ from nodes not in the affected partition
+ * ``stop:`` stop all resources in the affected cluster partition
+ * ``demote:`` demote promotable resources and stop all other resources
+ in the affected cluster partition *(since 2.0.5)*
+ * ``suicide:`` fence all nodes in the affected cluster partition
+ * - .. _batch_limit:
+
+ .. index::
+ pair: cluster option; batch-limit
+
+ batch-limit
+ - :ref:`integer <integer>`
+ - 0
+ - The maximum number of actions that the cluster may execute in parallel
+ across all nodes. The ideal value will depend on the speed and load
+ of your network and cluster nodes. If zero, the cluster will impose a
+ dynamically calculated limit only when any node has high load. If -1,
+ the cluster will not impose any limit.
+ * - .. _migration_limit:
+
+ .. index::
+ pair: cluster option; migration-limit
+
+ migration-limit
+ - :ref:`integer <integer>`
+ - -1
+ - The number of :ref:`live migration <live-migration>` actions that the
+ cluster is allowed to execute in parallel on a node. A value of -1 means
+ unlimited.
+ * - .. _symmetric_cluster:
+
+ .. index::
+ pair: cluster option; symmetric-cluster
+
+ symmetric-cluster
+ - :ref:`boolean <boolean>`
+ - true
+ - If true, resources can run on any node by default. If false, a resource
+ is allowed to run on a node only if a
+ :ref:`location constraint <location-constraint>` enables it.
+ * - .. _stop_all_resources:
+
+ .. index::
+ pair: cluster option; stop-all-resources
+
+ stop-all-resources
+ - :ref:`boolean <boolean>`
+ - false
+ - Whether all resources should be disallowed from running (can be useful
+ during maintenance or troubleshooting)
+ * - .. _stop_orphan_resources:
+
+ .. index::
+ pair: cluster option; stop-orphan-resources
+
+ stop-orphan-resources
+ - :ref:`boolean <boolean>`
+ - true
+ - Whether resources that have been deleted from the configuration should
+ be stopped. This value takes precedence over
+ :ref:`is-managed <is_managed>` (that is, even unmanaged resources will
+ be stopped when orphaned if this value is ``true``).
+ * - .. _stop_orphan_actions:
+
+ .. index::
+ pair: cluster option; stop-orphan-actions
+
+ stop-orphan-actions
+ - :ref:`boolean <boolean>`
+ - true
+ - Whether recurring :ref:`operations <operation>` that have been deleted
+ from the configuration should be cancelled
+ * - .. _start_failure_is_fatal:
+
+ .. index::
+ pair: cluster option; start-failure-is-fatal
+
+ start-failure-is-fatal
+ - :ref:`boolean <boolean>`
+ - true
+ - Whether a failure to start a resource on a particular node prevents
+ further start attempts on that node. If ``false``, the cluster will
+ decide whether the node is still eligible based on the resource's
+ current failure count and ``migration-threshold``.
+ * - .. _enable_startup_probes:
+
+ .. index::
+ pair: cluster option; enable-startup-probes
+
+ enable-startup-probes
+ - :ref:`boolean <boolean>`
+ - true
+ - Whether the cluster should check the pre-existing state of resources
+ when the cluster starts
+ * - .. _maintenance_mode:
+
+ .. index::
+ pair: cluster option; maintenance-mode
+
+ maintenance-mode
+ - :ref:`boolean <boolean>`
+ - false
+ - If true, the cluster will not start or stop any resource in the cluster,
+ and any recurring operations (expect those specifying ``role`` as
+ ``Stopped``) will be paused. If true, this overrides the
+ :ref:`maintenance <node_maintenance>` node attribute,
+ :ref:`is-managed <is_managed>` and :ref:`maintenance <rsc_maintenance>`
+ resource meta-attributes, and :ref:`enabled <op_enabled>` operation
+ meta-attribute.
+ * - .. _stonith_enabled:
+
+ .. index::
+ pair: cluster option; stonith-enabled
+
+ stonith-enabled
+ - :ref:`boolean <boolean>`
+ - true
+ - Whether the cluster is allowed to fence nodes (for example, failed nodes
+ and nodes with resources that can't be stopped).
+
+ If true, at least one fence device must be configured before resources
+ are allowed to run.
+
+ If false, unresponsive nodes are immediately assumed to be running no
+ resources, and resource recovery on online nodes starts without any
+ further protection (which can mean *data loss* if the unresponsive node
+ still accesses shared storage, for example). See also the
+ :ref:`requires <requires>` resource meta-attribute.
+ * - .. _stonith_action:
+
+ .. index::
+ pair: cluster option; stonith-action
+
+ stonith-action
+ - :ref:`enumeration <enumeration>`
+ - reboot
+ - Action the cluster should send to the fence agent when a node must be
+ fenced. Allowed values are ``reboot``, ``off``, and (for legacy agents
+ only) ``poweroff``.
+ * - .. _stonith_timeout:
+
+ .. index::
+ pair: cluster option; stonith-timeout
+
+ stonith-timeout
+ - :ref:`duration <duration>`
+ - 60s
+ - How long to wait for ``on``, ``off``, and ``reboot`` fence actions to
+ complete by default.
+ * - .. _stonith_max_attempts:
+
+ .. index::
+ pair: cluster option; stonith-max-attempts
+
+ stonith-max-attempts
+ - :ref:`score <score>`
+ - 10
+ - How many times fencing can fail for a target before the cluster will no
+ longer immediately re-attempt it. Any value below 1 will be ignored, and
+ the default will be used instead.
+ * - .. _stonith_watchdog_timeout:
+
+ .. index::
+ pair: cluster option; stonith-watchdog-timeout
+
+ stonith-watchdog-timeout
+ - :ref:`timeout <timeout>`
+ - 0
+ - If nonzero, and the cluster detects ``have-watchdog`` as ``true``, then
+ watchdog-based self-fencing will be performed via SBD when fencing is
+ required, without requiring a fencing resource explicitly configured.
+
+ If this is set to a positive value, unseen nodes are assumed to
+ self-fence within this much time.
+
+ **Warning:** It must be ensured that this value is larger than the
+ ``SBD_WATCHDOG_TIMEOUT`` environment variable on all nodes. Pacemaker
+ verifies the settings individually on all nodes and prevents startup or
+ shuts down if configured wrongly on the fly. It is strongly recommended
+ that ``SBD_WATCHDOG_TIMEOUT`` be set to the same value on all nodes.
+
+ If this is set to a negative value, and ``SBD_WATCHDOG_TIMEOUT`` is set,
+ twice that value will be used.
+
+ **Warning:** In this case, it is essential (and currently not verified
+ by pacemaker) that ``SBD_WATCHDOG_TIMEOUT`` is set to the same value on
+ all nodes.
+ * - .. _concurrent-fencing:
+
+ .. index::
+ pair: cluster option; concurrent-fencing
+
+ concurrent-fencing
+ - :ref:`boolean <boolean>`
+ - false
+ - Whether the cluster is allowed to initiate multiple fence actions
+ concurrently. Fence actions initiated externally, such as via the
+ ``stonith_admin`` tool or an application such as DLM, or by the fencer
+ itself such as recurring device monitors and ``status`` and ``list``
+ commands, are not limited by this option.
+ * - .. _fence_reaction:
+
+ .. index::
+ pair: cluster option; fence-reaction
+
+ fence-reaction
+ - :ref:`enumeration <enumeration>`
+ - stop
+ - How should a cluster node react if notified of its own fencing? A
+ cluster node may receive notification of its own fencing if fencing is
+ misconfigured, or if fabric fencing is in use that doesn't cut cluster
+ communication. Allowed values are ``stop`` to attempt to immediately
+ stop Pacemaker and stay stopped, or ``panic`` to attempt to immediately
+ reboot the local node, falling back to stop on failure. The default is
+ likely to be changed to ``panic`` in a future release. *(since 2.0.3)*
+ * - .. _priority_fencing_delay:
+
+ .. index::
+ pair: cluster option; priority-fencing-delay
+
+ priority-fencing-delay
+ - :ref:`duration <duration>`
+ - 0
+ - Apply this delay to any fencing targeting the lost nodes with the
+ highest total resource priority in case we don't have the majority of
+ the nodes in our cluster partition, so that the more significant nodes
+ potentially win any fencing match (especially meaningful in a
+ split-brain of a 2-node cluster). A promoted resource instance takes the
+ resource's priority plus 1 if the resource's priority is not 0. Any
+ static or random delays introduced by ``pcmk_delay_base`` and
+ ``pcmk_delay_max`` configured for the corresponding fencing resources
+ will be added to this delay. This delay should be significantly greater
+ than (safely twice) the maximum delay from those parameters. *(since
+ 2.0.4)*
+ * - .. _node_pending_timeout:
+
+ .. index::
+ pair: cluster option; node-pending-timeout
+
+ node-pending-timeout
+ - :ref:`duration <duration>`
+ - 0
+ - Fence nodes that do not join the controller process group within this
+ much time after joining the cluster, to allow the cluster to continue
+ managing resources. A value of 0 means never fence pending nodes. Setting the value to 2h means fence nodes after 2 hours.
+ *(since 2.1.7)*
+ * - .. _cluster_delay:
+
+ .. index::
+ pair: cluster option; cluster-delay
+
+ cluster-delay
+ - :ref:`duration <duration>`
+ - 60s
+ - If the DC requires an action to be executed on another node, it will
+ consider the action failed if it does not get a response from the other
+ node within this time (beyond the action's own timeout). The ideal value
+ will depend on the speed and load of your network and cluster nodes.
+ * - .. _dc_deadtime:
+
+ .. index::
+ pair: cluster option; dc-deadtime
+
+ dc-deadtime
+ - :ref:`duration <duration>`
+ - 20s
+ - How long to wait for a response from other nodes when electing a DC. The
+ ideal value will depend on the speed and load of your network and
+ cluster nodes.
+ * - .. _cluster_ipc_limit:
+
+ .. index::
+ pair: cluster option; cluster-ipc-limit
+
+ cluster-ipc-limit
+ - :ref:`nonnegative integer <nonnegative_integer>`
+ - 500
+ - The maximum IPC message backlog before one cluster daemon will
+ disconnect another. This is of use in large clusters, for which a good
+ value is the number of resources in the cluster multiplied by the number
+ of nodes. The default of 500 is also the minimum. Raise this if you see
+ "Evicting client" log messages for cluster daemon process IDs.
+ * - .. _pe_error_series_max:
+
+ .. index::
+ pair: cluster option; pe-error-series-max
+
+ pe-error-series-max
+ - :ref:`integer <integer>`
+ - -1
+ - The number of scheduler inputs resulting in errors to save. These inputs
+ can be helpful during troubleshooting and when reporting issues. A
+ negative value means save all inputs, and 0 means save none.
+ * - .. _pe_warn_series_max:
+
+ .. index::
+ pair: cluster option; pe-warn-series-max
+
+ pe-warn-series-max
+ - :ref:`integer <integer>`
+ - 5000
+ - The number of scheduler inputs resulting in warnings to save. These
+ inputs can be helpful during troubleshooting and when reporting issues.
+ A negative value means save all inputs, and 0 means save none.
+ * - .. _pe_input_series_max:
+
+ .. index::
+ pair: cluster option; pe-input-series-max
+
+ pe-input-series-max
+ - :ref:`integer <integer>`
+ - 4000
+ - The number of "normal" scheduler inputs to save. These inputs can be
+ helpful during troubleshooting and when reporting issues. A negative
+ value means save all inputs, and 0 means save none.
+ * - .. _enable_acl:
+
+ .. index::
+ pair: cluster option; enable-acl
+
+ enable-acl
+ - :ref:`boolean <boolean>`
+ - false
+ - Whether :ref:`access control lists <acl>` should be used to authorize
+ CIB modifications
+ * - .. _placement_strategy:
+
+ .. index::
+ pair: cluster option; placement-strategy
+
+ placement-strategy
+ - :ref:`enumeration <enumeration>`
+ - default
+ - How the cluster should assign resources to nodes (see
+ :ref:`utilization`). Allowed values are ``default``, ``utilization``,
+ ``balanced``, and ``minimal``.
+ * - .. _node_health_strategy:
+
+ .. index::
+ pair: cluster option; node-health-strategy
+
+ node-health-strategy
+ - :ref:`enumeration <enumeration>`
+ - none
+ - How the cluster should react to :ref:`node health <node-health>`
+ attributes. Allowed values are ``none``, ``migrate-on-red``,
+ ``only-green``, ``progressive``, and ``custom``.
+ * - .. _node_health_base:
+
+ .. index::
+ pair: cluster option; node-health-base
+
+ node-health-base
+ - :ref:`score <score>`
+ - 0
+ - The base health score assigned to a node. Only used when
+ ``node-health-strategy`` is ``progressive``.
+ * - .. _node_health_green:
+
+ .. index::
+ pair: cluster option; node-health-green
+
+ node-health-green
+ - :ref:`score <score>`
+ - 0
+ - The score to use for a node health attribute whose value is ``green``.
+ Only used when ``node-health-strategy`` is ``progressive`` or
+ ``custom``.
+ * - .. _node_health_yellow:
+
+ .. index::
+ pair: cluster option; node-health-yellow
+
+ node-health-yellow
+ - :ref:`score <score>`
+ - 0
+ - The score to use for a node health attribute whose value is ``yellow``.
+ Only used when ``node-health-strategy`` is ``progressive`` or
+ ``custom``.
+ * - .. _node_health_red:
+
+ .. index::
+ pair: cluster option; node-health-red
+
+ node-health-red
+ - :ref:`score <score>`
+ - 0
+ - The score to use for a node health attribute whose value is ``red``.
+ Only used when ``node-health-strategy`` is ``progressive`` or
+ ``custom``.
+ * - .. _cluster_recheck_interval:
+
+ .. index::
+ pair: cluster option; cluster-recheck-interval
+
+ cluster-recheck-interval
+ - :ref:`duration <duration>`
+ - 15min
+ - Pacemaker is primarily event-driven, and looks ahead to know when to
+ recheck the cluster for failure timeouts and most time-based rules
+ *(since 2.0.3)*. However, it will also recheck the cluster after this
+ amount of inactivity. This has two goals: rules with ``date_spec`` are
+ only guaranteed to be checked this often, and it also serves as a
+ fail-safe for some kinds of scheduler bugs. A value of 0 disables this
+ polling.
+ * - .. _shutdown_lock:
+
+ .. index::
+ pair: cluster option; shutdown-lock
+
+ shutdown-lock
+ - :ref:`boolean <boolean>`
+ - false
+ - The default of false allows active resources to be recovered elsewhere
+ when their node is cleanly shut down, which is what the vast majority of
+ users will want. However, some users prefer to make resources highly
+ available only for failures, with no recovery for clean shutdowns. If
+ this option is true, resources active on a node when it is cleanly shut
+ down are kept "locked" to that node (not allowed to run elsewhere) until
+ they start again on that node after it rejoins (or for at most
+ ``shutdown-lock-limit``, if set). Stonith resources and Pacemaker Remote
+ connections are never locked. Clone and bundle instances and the
+ promoted role of promotable clones are currently never locked, though
+ support could be added in a future release. Locks may be manually
+ cleared using the ``--refresh`` option of ``crm_resource`` (both the
+ resource and node must be specified; this works with remote nodes if
+ their connection resource's ``target-role`` is set to ``Stopped``, but
+ not if Pacemaker Remote is stopped on the remote node without disabling
+ the connection resource). *(since 2.0.4)*
+ * - .. _shutdown_lock_limit:
+
+ .. index::
+ pair: cluster option; shutdown-lock-limit
+
+ shutdown-lock-limit
+ - :ref:`duration <duration>`
+ - 0
+ - If ``shutdown-lock`` is true, and this is set to a nonzero time
+ duration, locked resources will be allowed to start after this much time
+ has passed since the node shutdown was initiated, even if the node has
+ not rejoined. (This works with remote nodes only if their connection
+ resource's ``target-role`` is set to ``Stopped``.) *(since 2.0.4)*
+ * - .. _remove_after_stop:
+
+ .. index::
+ pair: cluster option; remove-after-stop
+
+ remove-after-stop
+ - :ref:`boolean <boolean>`
+ - false
+ - *Deprecated* Whether the cluster should remove resources from
+ Pacemaker's executor after they are stopped. Values other than the
+ default are, at best, poorly tested and potentially dangerous. This
+ option is deprecated and will be removed in a future release.
+ * - .. _startup_fencing:
+
+ .. index::
+ pair: cluster option; startup-fencing
+
+ startup-fencing
+ - :ref:`boolean <boolean>`
+ - true
+ - *Advanced Use Only:* Whether the cluster should fence unseen nodes at
+ start-up. Setting this to false is unsafe, because the unseen nodes
+ could be active and running resources but unreachable. ``dc-deadtime``
+ acts as a grace period before this fencing, since a DC must be elected
+ to schedule fencing.
+ * - .. _election_timeout:
+
+ .. index::
+ pair: cluster option; election-timeout
+
+ election-timeout
+ - :ref:`duration <duration>`
+ - 2min
+ - *Advanced Use Only:* If a winner is not declared within this much time
+ of starting an election, the node that initiated the election will
+ declare itself the winner.
+ * - .. _shutdown_escalation:
+
+ .. index::
+ pair: cluster option; shutdown-escalation
+
+ shutdown-escalation
+ - :ref:`duration <duration>`
+ - 20min
+ - *Advanced Use Only:* The controller will exit immediately if a shutdown
+ does not complete within this much time.
+ * - .. _join_integration_timeout:
+
+ .. index::
+ pair: cluster option; join-integration-timeout
+
+ join-integration-timeout
+ - :ref:`duration <duration>`
+ - 3min
+ - *Advanced Use Only:* If you need to adjust this value, it probably
+ indicates the presence of a bug.
+ * - .. _join_finalization_timeout:
+
+ .. index::
+ pair: cluster option; join-finalization-timeout
+
+ join-finalization-timeout
+ - :ref:`duration <duration>`
+ - 30min
+ - *Advanced Use Only:* If you need to adjust this value, it probably
+ indicates the presence of a bug.
+ * - .. _transition_delay:
+
+ .. index::
+ pair: cluster option; transition-delay
+
+ transition-delay
+ - :ref:`duration <duration>`
+ - 0s
+ - *Advanced Use Only:* Delay cluster recovery for the configured interval
+ to allow for additional or related events to occur. This can be useful
+ if your configuration is sensitive to the order in which ping updates
+ arrive. Enabling this option will slow down cluster recovery under all
+ conditions.
diff --git a/doc/sphinx/Pacemaker_Explained/advanced-resources.rst b/doc/sphinx/Pacemaker_Explained/collective.rst
index a61b76d..a4fa9dc 100644
--- a/doc/sphinx/Pacemaker_Explained/advanced-resources.rst
+++ b/doc/sphinx/Pacemaker_Explained/collective.rst
@@ -1,5 +1,13 @@
-Advanced Resource Types
------------------------
+.. index:
+ single: collective resource
+ single: resource; collective
+
+Collective Resources
+--------------------
+
+Pacemaker supports several types of *collective* resources, which consist of
+multiple, related resource instances.
+
.. index:
single: group resource
@@ -540,11 +548,11 @@ been promoted before they can start.
Clone Stickiness
________________
-To achieve a stable allocation pattern, clones are slightly sticky by
-default. If no value for ``resource-stickiness`` is provided, the clone
-will use a value of 1. Being a small value, it causes minimal
-disturbance to the score calculations of other resources but is enough
-to prevent Pacemaker from needlessly moving copies around the cluster.
+To achieve stable assignments, clones are slightly sticky by default. If no
+value for ``resource-stickiness`` is provided, the clone will use a value of 1.
+Being a small value, it causes minimal disturbance to the score calculations of
+other resources but is enough to prevent Pacemaker from needlessly moving
+instances around the cluster.
.. note::
diff --git a/doc/sphinx/Pacemaker_Explained/constraints.rst b/doc/sphinx/Pacemaker_Explained/constraints.rst
index ab34c9f..a78d6c2 100644
--- a/doc/sphinx/Pacemaker_Explained/constraints.rst
+++ b/doc/sphinx/Pacemaker_Explained/constraints.rst
@@ -7,49 +7,6 @@
Resource Constraints
--------------------
-.. index::
- single: resource; score
- single: node; score
-
-Scores
-######
-
-Scores of all kinds are integral to how the cluster works.
-Practically everything from moving a resource to deciding which
-resource to stop in a degraded cluster is achieved by manipulating
-scores in some way.
-
-Scores are calculated per resource and node. Any node with a
-negative score for a resource can't run that resource. The cluster
-places a resource on the node with the highest score for it.
-
-Infinity Math
-_____________
-
-Pacemaker implements **INFINITY** (or equivalently, **+INFINITY**) internally as a
-score of 1,000,000. Addition and subtraction with it follow these three basic
-rules:
-
-* Any value + **INFINITY** = **INFINITY**
-
-* Any value - **INFINITY** = -**INFINITY**
-
-* **INFINITY** - **INFINITY** = **-INFINITY**
-
-.. note::
-
- What if you want to use a score higher than 1,000,000? Typically this possibility
- arises when someone wants to base the score on some external metric that might
- go above 1,000,000.
-
- The short answer is you can't.
-
- The long answer is it is sometimes possible work around this limitation
- creatively. You may be able to set the score to some computed value based on
- the external metric rather than use the metric directly. For nodes, you can
- store the metric as a node attribute, and query the attribute when computing
- the score (possibly as part of a custom resource agent).
-
.. _location-constraint:
.. index::
@@ -434,6 +391,20 @@ Because the above example lets ``symmetrical`` default to TRUE, **Webserver**
must be stopped before **Database** can be stopped, and **Webserver** should be
stopped before **IP** if they both need to be stopped.
+Symmetric and asymmetric ordering
+_________________________________
+
+A mandatory symmetric ordering of "start A then start B" implies not only that
+the start actions must be ordered, but that B is not allowed to be active
+unless A is active. For example, if the ordering is added to the configuration
+when A is stopped (due to target-role, failure, etc.) and B is already active,
+then B will be stopped.
+
+By contrast, asymmetric ordering of "start A then start B" means the stops can
+occur in either order, which implies that B *can* remain active in the same
+situation.
+
+
.. index::
single: colocation
single: constraint; colocation
@@ -535,8 +506,8 @@ _____________________
| | | If ``rsc`` and ``with-rsc`` are specified, and ``rsc`` |
| | | is a :ref:`promotable clone <s-resource-promotable>`, |
| | | the constraint applies only to ``rsc`` instances in |
- | | | this role. Allowed values: ``Started``, ``Promoted``, |
- | | | ``Unpromoted``. For details, see |
+ | | | this role. Allowed values: ``Started``, ``Stopped``, |
+ | | | ``Promoted``, ``Unpromoted``. For details, see |
| | | :ref:`promotable-clone-constraints`. |
+----------------+----------------+--------------------------------------------------------+
| with-rsc-role | Started | .. index:: |
@@ -548,8 +519,8 @@ _____________________
| | | ``with-rsc`` is a |
| | | :ref:`promotable clone <s-resource-promotable>`, the |
| | | constraint applies only to ``with-rsc`` instances in |
- | | | this role. Allowed values: ``Started``, ``Promoted``, |
- | | | ``Unpromoted``. For details, see |
+ | | | this role. Allowed values: ``Started``, ``Stopped``, |
+ | | | ``Promoted``, ``Unpromoted``. For details, see |
| | | :ref:`promotable-clone-constraints`. |
+----------------+----------------+--------------------------------------------------------+
| influence | value of | .. index:: |
diff --git a/doc/sphinx/Pacemaker_Explained/index.rst b/doc/sphinx/Pacemaker_Explained/index.rst
index de2ddd9..63387f3 100644
--- a/doc/sphinx/Pacemaker_Explained/index.rst
+++ b/doc/sphinx/Pacemaker_Explained/index.rst
@@ -18,15 +18,16 @@ Table of Contents
:numbered:
intro
- options
+ local-options
+ cluster-options
nodes
resources
+ operations
constraints
fencing
alerts
rules
- advanced-options
- advanced-resources
+ collective
reusing-configuration
utilization
acls
diff --git a/doc/sphinx/Pacemaker_Explained/local-options.rst b/doc/sphinx/Pacemaker_Explained/local-options.rst
new file mode 100644
index 0000000..91eda66
--- /dev/null
+++ b/doc/sphinx/Pacemaker_Explained/local-options.rst
@@ -0,0 +1,515 @@
+Host-Local Configuration
+------------------------
+
+.. index::
+ pair: XML element; configuration
+
+.. note:: Directory and file paths below may differ on your system depending on
+ your Pacemaker build settings. Check your Pacemaker configuration
+ file to find the correct paths.
+
+Pacemaker supports several host-local configuration options. These options can
+be configured on each node in the main Pacemaker configuration file
+(|PCMK_CONFIG_FILE|) in the format ``<NAME>="<VALUE>"``. They work by setting
+environment variables when Pacemaker daemons start up.
+
+.. list-table:: **Local Options**
+ :class: longtable
+ :widths: 2 2 2 5
+ :header-rows: 1
+
+ * - Name
+ - Type
+ - Default
+ - Description
+ * - .. _pcmk_logfacility:
+
+ .. index::
+ pair: node option; PCMK_logfacility
+
+ PCMK_logfacility
+ - :ref:`enumeration <enumeration>`
+ - daemon
+ - Enable logging via the system log or journal, using the specified log
+ facility. Messages sent here are of value to all Pacemaker
+ administrators. This can be disabled using ``none``, but that is not
+ recommended. Allowed values:
+
+ * ``none``
+ * ``daemon``
+ * ``user``
+ * ``local0``
+ * ``local1``
+ * ``local2``
+ * ``local3``
+ * ``local4``
+ * ``local5``
+ * ``local6``
+ * ``local7``
+
+ * - .. _pcmk_logpriority:
+
+ .. index::
+ pair:: node option; PCMK_logpriority
+
+ PCMK_logpriority
+ - :ref:`enumeration <enumeration>`
+ - notice
+ - Unless system logging is disabled using ``PCMK_logfacility=none``,
+ messages of the specified log severity and higher will be sent to the
+ system log. The default is appropriate for most installations. Allowed
+ values:
+
+ * ``emerg``
+ * ``alert``
+ * ``crit``
+ * ``error``
+ * ``warning``
+ * ``notice``
+ * ``info``
+ * ``debug``
+
+ * - .. _pcmk_logfile:
+
+ .. index::
+ pair:: node option; PCMK_logfile
+
+ PCMK_logfile
+ - :ref:`text <text>`
+ - |PCMK_LOG_FILE|
+ - Unless set to ``none``, more detailed log messages will be sent to the
+ specified file (in addition to the system log, if enabled). These
+ messages may have extended information, and will include messages of info
+ severity. This log is of more use to developers and advanced system
+ administrators, and when reporting problems.
+
+ * - .. _pcmk_logfile_mode:
+
+ .. index::
+ pair:: node option; PCMK_logfile_mode
+
+ PCMK_logfile_mode
+ - :ref:`text <text>`
+ - 0660
+ - Pacemaker will set the permissions on the detail log to this value (see
+ ``chmod(1)``).
+
+ * - .. _pcmk_debug:
+
+ .. index::
+ pair:: node option; PCMK_debug
+
+ PCMK_debug
+ - :ref:`enumeration <enumeration>`
+ - no
+ - Whether to send debug severity messages to the detail log. This may be
+ set for all subsystems (``yes`` or ``no``) or for specific (comma-
+ separated) subsystems. Allowed subsystems are:
+
+ * ``pacemakerd``
+ * ``pacemaker-attrd``
+ * ``pacemaker-based``
+ * ``pacemaker-controld``
+ * ``pacemaker-execd``
+ * ``pacemaker-fenced``
+ * ``pacemaker-schedulerd``
+
+ Example: ``PCMK_debug="pacemakerd,pacemaker-execd"``
+
+ * - .. _pcmk_stderr:
+
+ .. index::
+ pair:: node option; PCMK_stderr
+
+ PCMK_stderr
+ - :ref:`boolean <boolean>`
+ - no
+ - *Advanced Use Only:* Whether to send daemon log messages to stderr. This
+ would be useful only during troubleshooting, when starting Pacemaker
+ manually on the command line.
+
+ Setting this option in the configuration file is pointless, since the
+ file is not read when starting Pacemaker manually. However, it can be set
+ directly as an environment variable on the command line.
+
+ * - .. _pcmk_trace_functions:
+
+ .. index::
+ pair:: node option; PCMK_trace_functions
+
+ PCMK_trace_functions
+ - :ref:`text <text>`
+ -
+ - *Advanced Use Only:* Send debug and trace severity messages from these
+ (comma-separated) source code functions to the detail log.
+
+ Example:
+ ``PCMK_trace_functions="func1,func2"``
+
+ * - .. _pcmk_trace_files:
+
+ .. index::
+ pair:: node option; PCMK_trace_files
+
+ PCMK_trace_files
+ - :ref:`text <text>`
+ -
+ - *Advanced Use Only:* Send debug and trace severity messages from all
+ functions in these (comma-separated) source file names to the detail log.
+
+ Example: ``PCMK_trace_files="file1.c,file2.c"``
+
+ * - .. _pcmk_trace_formats:
+
+ .. index::
+ pair:: node option; PCMK_trace_formats
+
+ PCMK_trace_formats
+ - :ref:`text <text>`
+ -
+ - *Advanced Use Only:* Send trace severity messages that are generated by
+ these (comma-separated) format strings in the source code to the detail
+ log.
+
+ Example: ``PCMK_trace_formats="Error: %s (%d)"``
+
+ * - .. _pcmk_trace_tags:
+
+ .. index::
+ pair:: node option; PCMK_trace_tags
+
+ PCMK_trace_tags
+ - :ref:`text <text>`
+ -
+ - *Advanced Use Only:* Send debug and trace severity messages related to
+ these (comma-separated) resource IDs to the detail log.
+
+ Example: ``PCMK_trace_tags="client-ip,dbfs"``
+
+ * - .. _pcmk_blackbox:
+
+ .. index::
+ pair:: node option; PCMK_blackbox
+
+ PCMK_blackbox
+ - :ref:`enumeration <enumeration>`
+ - no
+ - *Advanced Use Only:* Enable blackbox logging globally (``yes`` or ``no``)
+ or by subsystem. A blackbox contains a rolling buffer of all logs (of all
+ severities). Blackboxes are stored under |CRM_BLACKBOX_DIR| by default,
+ by default, and their contents can be viewed using the ``qb-blackbox(8)``
+ command.
+
+ The blackbox recorder can be enabled at start using this variable, or at
+ runtime by sending a Pacemaker subsystem daemon process a ``SIGUSR1`` or
+ ``SIGTRAP`` signal, and disabled by sending ``SIGUSR2`` (see
+ ``kill(1)``). The blackbox will be written after a crash, assertion
+ failure, or ``SIGTRAP`` signal.
+
+ See :ref:`PCMK_debug <pcmk_debug>` for allowed subsystems.
+
+ Example:
+ ``PCMK_blackbox="pacemakerd,pacemaker-execd"``
+
+ * - .. _pcmk_trace_blackbox:
+
+ .. index::
+ pair:: node option; PCMK_trace_blackbox
+
+ PCMK_trace_blackbox
+ - :ref:`enumeration <enumeration>`
+ -
+ - *Advanced Use Only:* Write a blackbox whenever the message at the
+ specified function and line is logged. Multiple entries may be comma-
+ separated.
+
+ Example: ``PCMK_trace_blackbox="remote.c:144,remote.c:149"``
+
+ * - .. _pcmk_node_start_state:
+
+ .. index::
+ pair:: node option; PCMK_node_start_state
+
+ PCMK_node_start_state
+ - :ref:`enumeration <enumeration>`
+ - default
+ - By default, the local host will join the cluster in an online or standby
+ state when Pacemaker first starts depending on whether it was previously
+ put into standby mode. If this variable is set to ``standby`` or
+ ``online``, it will force the local host to join in the specified state.
+
+ * - .. _pcmk_node_action_limit:
+
+ .. index::
+ pair:: node option; PCMK_node_action_limit
+
+ PCMK_node_action_limit
+ - :ref:`nonnegative integer <nonnegative_integer>`
+ -
+ - Specify the maximum number of jobs that can be scheduled on this node. If
+ set, this overrides the ``node-action-limit`` cluster property for this
+ node.
+
+ * - .. _pcmk_shutdown_delay:
+
+ .. index::
+ pair:: node option; PCMK_shutdown_delay
+
+ PCMK_shutdown_delay
+ - :ref:`timeout <timeout>`
+ -
+ - Specify a delay before shutting down ``pacemakerd`` after shutting down
+ all other Pacemaker daemons.
+
+ * - .. _pcmk_fail_fast:
+
+ .. index::
+ pair:: node option; PCMK_fail_fast
+
+ PCMK_fail_fast
+ - :ref:`boolean <boolean>`
+ - no
+ - By default, if a Pacemaker subsystem crashes, the main ``pacemakerd``
+ process will attempt to restart it. If this variable is set to ``yes``,
+ ``pacemakerd`` will panic the local host instead.
+
+ * - .. _pcmk_panic_action:
+
+ .. index::
+ pair:: node option; PCMK_panic_action
+
+ PCMK_panic_action
+ - :ref:`enumeration <enumeration>`
+ - reboot
+ - Pacemaker will panic the local host under certain conditions. By default,
+ this means rebooting the host. This variable can change that behavior: if
+ ``crash``, trigger a kernel crash (useful if you want a kernel dump to
+ investigate); if ``sync-reboot`` or ``sync-crash``, synchronize
+ filesystems before rebooting the host or triggering a kernel crash. The
+ sync values are more likely to preserve log messages, but with the risk
+ that the host may be left active if the synchronization hangs.
+
+ * - .. _pcmk_authkey_location:
+
+ .. index::
+ pair:: node option; PCMK_authkey_location
+
+ PCMK_authkey_location
+ - :ref:`text <text>`
+ - |PCMK_AUTHKEY_FILE|
+ - Use the contents of this file as the authorization key to use with
+ Pacemaker Remote connections. This file must be readable by Pacemaker
+ daemons (that is, it must allow read permissions to either the
+ |CRM_DAEMON_USER| user or the |CRM_DAEMON_GROUP| group), and its contents
+ must be identical on all nodes.
+
+ * - .. _pcmk_remote_address:
+
+ .. index::
+ pair:: node option; PCMK_remote_address
+
+ PCMK_remote_address
+ - :ref:`text <text>`
+ -
+ - By default, if the Pacemaker Remote service is run on the local node, it
+ will listen for connections on all IP addresses. This may be set to one
+ address to listen on instead, as a resolvable hostname or as a numeric
+ IPv4 or IPv6 address. When resolving names or listening on all addresses,
+ IPv6 will be preferred if available. When listening on an IPv6 address,
+ IPv4 clients will be supported via IPv4-mapped IPv6 addresses.
+
+ Example: ``PCMK_remote_address="192.0.2.1"``
+
+ * - .. _pcmk_remote_port:
+
+ .. index::
+ pair:: node option; PCMK_remote_port
+
+ PCMK_remote_port
+ - :ref:`port <port>`
+ - 3121
+ - Use this TCP port number for Pacemaker Remote node connections. This
+ value must be the same on all nodes.
+
+ * - .. _pcmk_remote_pid1:
+
+ .. index::
+ pair:: node option; PCMK_remote_pid1
+
+ PCMK_remote_pid1
+ - :ref:`enumeration <enumeration>`
+ - default
+ - *Advanced Use Only:* When a bundle resource's ``run-command`` option is
+ left to default, Pacemaker Remote runs as PID 1 in the bundle's
+ containers. When it does so, it loads environment variables from the
+ container's |PCMK_INIT_ENV_FILE| and performs the PID 1 responsibility of
+ reaping dead subprocesses.
+
+ This option controls whether those actions are performed when Pacemaker
+ Remote is not running as PID 1. It is intended primarily for developer
+ testing but can be useful when ``run-command`` is set to a separate,
+ custom PID 1 process that launches Pacemaker Remote.
+
+ * ``full``: Pacemaker Remote loads environment variables from
+ |PCMK_INIT_ENV_FILE| and reaps dead subprocesses.
+ * ``vars``: Pacemaker Remote loads environment variables from
+ |PCMK_INIT_ENV_FILE| but does not reap dead subprocesses.
+ * ``default``: Pacemaker Remote performs neither action.
+
+ If Pacemaker Remote is running as PID 1, this option is ignored, and the
+ behavior is the same as for ``full``.
+
+ * - .. _pcmk_tls_priorities:
+
+ .. index::
+ pair:: node option; PCMK_tls_priorities
+
+ PCMK_tls_priorities
+ - :ref:`text <text>`
+ - |PCMK_GNUTLS_PRIORITIES|
+ - *Advanced Use Only:* These GnuTLS cipher priorities will be used for TLS
+ connections (whether for Pacemaker Remote connections or remote CIB
+ access, when enabled). See:
+
+ https://gnutls.org/manual/html_node/Priority-Strings.html
+
+ Pacemaker will append ``":+ANON-DH"`` for remote CIB access and
+ ``":+DHE-PSK:+PSK"`` for Pacemaker Remote connections, as they are
+ required for the respective functionality.
+
+ Example:
+ ``PCMK_tls_priorities="SECURE128:+SECURE192"``
+
+ * - .. _pcmk_dh_min_bits:
+
+ .. index::
+ pair:: node option; PCMK_dh_min_bits
+
+ PCMK_dh_min_bits
+ - :ref:`nonnegative integer <nonnegative_integer>`
+ - 0 (no minimum)
+ - *Advanced Use Only:* Set a lower bound on the bit length of the prime
+ number generated for Diffie-Hellman parameters needed by TLS connections.
+ The default is no minimum.
+
+ The server (Pacemaker Remote daemon, or CIB manager configured to accept
+ remote clients) will use this value to provide a floor for the value
+ recommended by the GnuTLS library. The library will only accept a limited
+ number of specific values, which vary by library version, so setting
+ these is recommended only when required for compatibility with specific
+ client versions.
+
+ Clients (connecting cluster nodes or remote CIB commands) will require
+ that the server use a prime of at least this size. This is recommended
+ only when the value must be lowered in order for the client's GnuTLS
+ library to accept a connection to an older server.
+
+ * - .. _pcmk_dh_max_bits:
+
+ .. index::
+ pair:: node option; PCMK_dh_max_bits
+
+ PCMK_dh_max_bits
+ - :ref:`nonnegative integer <nonnegative_integer>`
+ - 0 (no maximum)
+ - *Advanced Use Only:* Set an upper bound on the bit length of the prime
+ number generated for Diffie-Hellman parameters needed by TLS connections.
+ The default is no maximum.
+
+ The server (Pacemaker Remote daemon, or CIB manager configured to accept
+ remote clients) will use this value to provide a ceiling for the value
+ recommended by the GnuTLS library. The library will only accept a limited
+ number of specific values, which vary by library version, so setting
+ these is recommended only when required for compatibility with specific
+ client versions.
+
+ Clients do not use ``PCMK_dh_max_bits``.
+
+ * - .. _pcmk_ipc_type:
+
+ .. index::
+ pair:: node option; PCMK_ipc_type
+
+ PCMK_ipc_type
+ - :ref:`enumeration <enumeration>`
+ - shared-mem
+ - *Advanced Use Only:* Force use of a particular IPC method. Allowed values:
+
+ * ``shared-mem``
+ * ``socket``
+ * ``posix``
+ * ``sysv``
+
+ * - .. _pcmk_ipc_buffer:
+
+ .. index::
+ pair:: node option; PCMK_ipc_buffer
+
+ PCMK_ipc_buffer
+ - :ref:`nonnegative integer <nonnegative_integer>`
+ - 131072
+ - *Advanced Use Only:* Specify an IPC buffer size in bytes. This can be
+ useful when connecting to large clusters that result in messages
+ exceeding the default size (which will also result in log messages
+ referencing this variable).
+
+ * - .. _pcmk_cluster_type:
+
+ .. index::
+ pair:: node option; PCMK_cluster_type
+
+ PCMK_cluster_type
+ - :ref:`enumeration <enumeration>`
+ - corosync
+ - *Advanced Use Only:* Specify the cluster layer to be used. If unset,
+ Pacemaker will detect and use a supported cluster layer, if available.
+ Currently, ``"corosync"`` is the only supported cluster layer. If
+ multiple layers are supported in the future, this will allow overriding
+ Pacemaker's automatic detection to select a specific one.
+
+ * - .. _pcmk_schema_directory:
+
+ .. index::
+ pair:: node option; PCMK_schema_directory
+
+ PCMK_schema_directory
+ - :ref:`text <text>`
+ - |CRM_SCHEMA_DIRECTORY|
+ - *Advanced Use Only:* Specify an alternate location for RNG schemas and
+ XSL transforms.
+
+ * - .. _pcmk_valgrind_enabled:
+
+ .. index::
+ pair:: node option; PCMK_valgrind_enabled
+
+ PCMK_valgrind_enabled
+ - :ref:`enumeration <enumeration>`
+ - no
+ - *Advanced Use Only:* Whether subsystem daemons should be run under
+ ``valgrind``. Allowed values are the same as for ``PCMK_debug``.
+
+ * - .. _pcmk_callgrind_enabled:
+
+ .. index::
+ pair:: node option; PCMK_callgrind_enabled
+
+ PCMK_callgrind_enabled
+ - :ref:`enumeration <enumeration>`
+ - no
+ - *Advanced Use Only:* Whether subsystem daemons should be run under
+ ``valgrind`` with the ``callgrind`` tool enabled. Allowed values are the
+ same as for ``PCMK_debug``.
+
+ * - .. _valgrind_opts:
+
+ .. index::
+ pair:: node option; VALGRIND_OPTS
+
+ VALGRIND_OPTS
+ - :ref:`text <text>`
+ -
+ - *Advanced Use Only:* Pass these options to valgrind, when enabled (see
+ ``valgrind(1)``). ``"--vgdb=no"`` should usually be specified because
+ ``pacemaker-execd`` can lower privileges when executing commands, which
+ would otherwise leave a bunch of unremovable files in ``/tmp``.
diff --git a/doc/sphinx/Pacemaker_Explained/nodes.rst b/doc/sphinx/Pacemaker_Explained/nodes.rst
index 6fcadb3..378b067 100644
--- a/doc/sphinx/Pacemaker_Explained/nodes.rst
+++ b/doc/sphinx/Pacemaker_Explained/nodes.rst
@@ -105,6 +105,9 @@ To read back the value that was just set:
The ``--type nodes`` indicates that this is a permanent node attribute;
``--type status`` would indicate a transient node attribute.
+
+.. _special_node_attributes:
+
Special node attributes
#######################
@@ -154,35 +157,26 @@ unset to be false, and anything else to be an error.
| | ``crm_resource --cleanup`` commands rather |
| | than directly. |
+----------------------------+-----------------------------------------------------+
- | maintenance | .. index:: |
- | | pair: node attribute; maintenance |
+ | maintenance | .. _node_maintenance: |
| | |
- | | Similar to the ``maintenance-mode`` |
- | | :ref:`cluster option <cluster_options>`, but |
- | | for a single node. If true, resources will |
- | | not be started or stopped on the node, |
- | | resources and individual clone instances |
- | | running on the node will become unmanaged, |
- | | and any recurring operations for those will |
- | | be cancelled. |
+ | | .. index:: |
+ | | pair: node attribute; maintenance |
| | |
- | | **Warning:** Restarting pacemaker on a node that is |
- | | in single-node maintenance mode will likely |
- | | lead to undesirable effects. If |
- | | ``maintenance`` is set as a transient |
- | | attribute, it will be erased when |
- | | Pacemaker is stopped, which will |
- | | immediately take the node out of |
- | | maintenance mode and likely get it |
- | | fenced. Even if permanent, if Pacemaker |
- | | is restarted, any resources active on the |
- | | node will have their local history erased |
- | | when the node rejoins, so the cluster |
- | | will no longer consider them running on |
- | | the node and thus will consider them |
- | | managed again, leading them to be started |
- | | elsewhere. This behavior might be |
- | | improved in a future release. |
+ | | If true, the cluster will not start or stop any |
+ | | resources on this node. Any resources active on the |
+ | | node become unmanaged, and any recurring operations |
+ | | for those resources (except those specifying |
+ | | ``role`` as ``Stopped``) will be paused. The |
+ | | :ref:`maintenance-mode <maintenance_mode>` cluster |
+ | | option, if true, overrides this. If this attribute |
+ | | is true, it overrides the |
+ | | :ref:`is-managed <is_managed>` and |
+ | | :ref:`maintenance <rsc_maintenance>` |
+ | | meta-attributes of affected resources and |
+ | | :ref:`enabled <op_enabled>` meta-attribute for |
+ | | affected recurring actions. Pacemaker should not be |
+ | | restarted on a node that is in single-node |
+ | | maintenance mode. |
+----------------------------+-----------------------------------------------------+
| probe_complete | .. index:: |
| | pair: node attribute; probe_complete |
diff --git a/doc/sphinx/Pacemaker_Explained/operations.rst b/doc/sphinx/Pacemaker_Explained/operations.rst
new file mode 100644
index 0000000..b1ad65d
--- /dev/null
+++ b/doc/sphinx/Pacemaker_Explained/operations.rst
@@ -0,0 +1,623 @@
+.. index::
+ single: resource; action
+ single: resource; operation
+
+.. _operation:
+
+Resource Operations
+-------------------
+
+*Operations* are actions the cluster can perform on a resource by calling the
+resource agent. Resource agents must support certain common operations such as
+start, stop, and monitor, and may implement any others.
+
+Operations may be explicitly configured for two purposes: to override defaults
+for options (such as timeout) that the cluster will use whenever it initiates
+the operation, and to run an operation on a recurring basis (for example, to
+monitor the resource for failure).
+
+.. topic:: An OCF resource with a non-default start timeout
+
+ .. code-block:: xml
+
+ <primitive id="Public-IP" class="ocf" type="IPaddr" provider="heartbeat">
+ <operations>
+ <op id="Public-IP-start" name="start" timeout="60s"/>
+ </operations>
+ <instance_attributes id="params-public-ip">
+ <nvpair id="public-ip-addr" name="ip" value="192.0.2.2"/>
+ </instance_attributes>
+ </primitive>
+
+Pacemaker identifies operations by a combination of name and interval, so this
+combination must be unique for each resource. That is, you should not configure
+two operations for the same resource with the same name and interval.
+
+.. _operation_properties:
+
+Operation Properties
+####################
+
+Operation properties may be specified directly in the ``op`` element as
+XML attributes, or in a separate ``meta_attributes`` block as ``nvpair`` elements.
+XML attributes take precedence over ``nvpair`` elements if both are specified.
+
+.. table:: **Properties of an Operation**
+ :class: longtable
+ :widths: 1 2 3
+
+ +----------------+-----------------------------------+-----------------------------------------------------+
+ | Field | Default | Description |
+ +================+===================================+=====================================================+
+ | id | | .. index:: |
+ | | | single: id; action property |
+ | | | single: action; property, id |
+ | | | |
+ | | | A unique name for the operation. |
+ +----------------+-----------------------------------+-----------------------------------------------------+
+ | name | | .. index:: |
+ | | | single: name; action property |
+ | | | single: action; property, name |
+ | | | |
+ | | | The action to perform. This can be any action |
+ | | | supported by the agent; common values include |
+ | | | ``monitor``, ``start``, and ``stop``. |
+ +----------------+-----------------------------------+-----------------------------------------------------+
+ | interval | 0 | .. index:: |
+ | | | single: interval; action property |
+ | | | single: action; property, interval |
+ | | | |
+ | | | How frequently (in seconds) to perform the |
+ | | | operation. A value of 0 means "when needed". |
+ | | | A positive value defines a *recurring action*, |
+ | | | which is typically used with |
+ | | | :ref:`monitor <s-resource-monitoring>`. |
+ +----------------+-----------------------------------+-----------------------------------------------------+
+ | timeout | | .. index:: |
+ | | | single: timeout; action property |
+ | | | single: action; property, timeout |
+ | | | |
+ | | | How long to wait before declaring the action |
+ | | | has failed |
+ +----------------+-----------------------------------+-----------------------------------------------------+
+ | on-fail | Varies by action: | .. index:: |
+ | | | single: on-fail; action property |
+ | | * ``stop``: ``fence`` if | single: action; property, on-fail |
+ | | ``stonith-enabled`` is true | |
+ | | or ``block`` otherwise | The action to take if this action ever fails. |
+ | | * ``demote``: ``on-fail`` of the | Allowed values: |
+ | | ``monitor`` action with | |
+ | | ``role`` set to ``Promoted``, | * ``ignore:`` Pretend the resource did not fail. |
+ | | if present, enabled, and | * ``block:`` Don't perform any further operations |
+ | | configured to a value other | on the resource. |
+ | | than ``demote``, or ``restart`` | * ``stop:`` Stop the resource and do not start |
+ | | otherwise | it elsewhere. |
+ | | * all other actions: ``restart`` | * ``demote:`` Demote the resource, without a |
+ | | | full restart. This is valid only for ``promote`` |
+ | | | actions, and for ``monitor`` actions with both |
+ | | | a nonzero ``interval`` and ``role`` set to |
+ | | | ``Promoted``; for any other action, a |
+ | | | configuration error will be logged, and the |
+ | | | default behavior will be used. *(since 2.0.5)* |
+ | | | * ``restart:`` Stop the resource and start it |
+ | | | again (possibly on a different node). |
+ | | | * ``fence:`` STONITH the node on which the |
+ | | | resource failed. |
+ | | | * ``standby:`` Move *all* resources away from the |
+ | | | node on which the resource failed. |
+ +----------------+-----------------------------------+-----------------------------------------------------+
+ | enabled | TRUE | .. _op_enabled: |
+ | | | |
+ | | | .. index:: |
+ | | | single: enabled; action property |
+ | | | single: action; property, enabled |
+ | | | |
+ | | | If ``false``, ignore this operation definition. |
+ | | | This does not suppress all actions of this type, |
+ | | | but is typically used to pause a recurring monitor. |
+ | | | This can complement the resource being unmanaged |
+ | | | (:ref:`is-managed <is_managed>` set to ``false``), |
+ | | | which does not stop recurring operations. |
+ | | | Maintenance mode, which does stop configured this |
+ | | | monitors, overrides this setting. Allowed values: |
+ | | | ``true``, ``false``. |
+ +----------------+-----------------------------------+-----------------------------------------------------+
+ | record-pending | TRUE | .. index:: |
+ | | | single: record-pending; action property |
+ | | | single: action; property, record-pending |
+ | | | |
+ | | | If ``true``, the intention to perform the operation |
+ | | | is recorded so that GUIs and CLI tools can indicate |
+ | | | that an operation is in progress. This is best set |
+ | | | as an *operation default* |
+ | | | (see :ref:`s-operation-defaults`). Allowed values: |
+ | | | ``true``, ``false``. |
+ +----------------+-----------------------------------+-----------------------------------------------------+
+ | role | | .. index:: |
+ | | | single: role; action property |
+ | | | single: action; property, role |
+ | | | |
+ | | | Run the operation only on node(s) that the cluster |
+ | | | thinks should be in the specified role. This only |
+ | | | makes sense for recurring ``monitor`` operations. |
+ | | | Allowed (case-sensitive) values: ``Stopped``, |
+ | | | ``Started``, and in the case of :ref:`promotable |
+ | | | clone resources <s-resource-promotable>`, |
+ | | | ``Unpromoted`` and ``Promoted``. |
+ +----------------+-----------------------------------+-----------------------------------------------------+
+
+.. note::
+
+ When ``on-fail`` is set to ``demote``, recovery from failure by a successful
+ demote causes the cluster to recalculate whether and where a new instance
+ should be promoted. The node with the failure is eligible, so if promotion
+ scores have not changed, it will be promoted again.
+
+ There is no direct equivalent of ``migration-threshold`` for the promoted
+ role, but the same effect can be achieved with a location constraint using a
+ :ref:`rule <rules>` with a node attribute expression for the resource's fail
+ count.
+
+ For example, to immediately ban the promoted role from a node with any
+ failed promote or promoted instance monitor:
+
+ .. code-block:: xml
+
+ <rsc_location id="loc1" rsc="my_primitive">
+ <rule id="rule1" score="-INFINITY" role="Promoted" boolean-op="or">
+ <expression id="expr1" attribute="fail-count-my_primitive#promote_0"
+ operation="gte" value="1"/>
+ <expression id="expr2" attribute="fail-count-my_primitive#monitor_10000"
+ operation="gte" value="1"/>
+ </rule>
+ </rsc_location>
+
+ This example assumes that there is a promotable clone of the ``my_primitive``
+ resource (note that the primitive name, not the clone name, is used in the
+ rule), and that there is a recurring 10-second-interval monitor configured for
+ the promoted role (fail count attributes specify the interval in
+ milliseconds).
+
+.. _s-resource-monitoring:
+
+Monitoring Resources for Failure
+################################
+
+When Pacemaker first starts a resource, it runs one-time ``monitor`` operations
+(referred to as *probes*) to ensure the resource is running where it's
+supposed to be, and not running where it's not supposed to be. (This behavior
+can be affected by the ``resource-discovery`` location constraint property.)
+
+Other than those initial probes, Pacemaker will *not* (by default) check that
+the resource continues to stay healthy [#]_. You must configure ``monitor``
+operations explicitly to perform these checks.
+
+.. topic:: An OCF resource with a recurring health check
+
+ .. code-block:: xml
+
+ <primitive id="Public-IP" class="ocf" type="IPaddr" provider="heartbeat">
+ <operations>
+ <op id="Public-IP-start" name="start" timeout="60s"/>
+ <op id="Public-IP-monitor" name="monitor" interval="60s"/>
+ </operations>
+ <instance_attributes id="params-public-ip">
+ <nvpair id="public-ip-addr" name="ip" value="192.0.2.2"/>
+ </instance_attributes>
+ </primitive>
+
+By default, a ``monitor`` operation will ensure that the resource is running
+where it is supposed to. The ``target-role`` property can be used for further
+checking.
+
+For example, if a resource has one ``monitor`` operation with
+``interval=10 role=Started`` and a second ``monitor`` operation with
+``interval=11 role=Stopped``, the cluster will run the first monitor on any nodes
+it thinks *should* be running the resource, and the second monitor on any nodes
+that it thinks *should not* be running the resource (for the truly paranoid,
+who want to know when an administrator manually starts a service by mistake).
+
+.. note::
+
+ Currently, monitors with ``role=Stopped`` are not implemented for
+ :ref:`clone <s-resource-clone>` resources.
+
+
+.. _s-operation-defaults:
+
+Setting Global Defaults for Operations
+######################################
+
+You can change the global default values for operation properties
+in a given cluster. These are defined in an ``op_defaults`` section
+of the CIB's ``configuration`` section, and can be set with
+``crm_attribute``. For example,
+
+.. code-block:: none
+
+ # crm_attribute --type op_defaults --name timeout --update 20s
+
+would default each operation's ``timeout`` to 20 seconds. If an
+operation's definition also includes a value for ``timeout``, then that
+value would be used for that operation instead.
+
+When Implicit Operations Take a Long Time
+#########################################
+
+The cluster will always perform a number of implicit operations: ``start``,
+``stop`` and a non-recurring ``monitor`` operation used at startup to check
+whether the resource is already active. If one of these is taking too long,
+then you can create an entry for them and specify a longer timeout.
+
+.. topic:: An OCF resource with custom timeouts for its implicit actions
+
+ .. code-block:: xml
+
+ <primitive id="Public-IP" class="ocf" type="IPaddr" provider="heartbeat">
+ <operations>
+ <op id="public-ip-startup" name="monitor" interval="0" timeout="90s"/>
+ <op id="public-ip-start" name="start" interval="0" timeout="180s"/>
+ <op id="public-ip-stop" name="stop" interval="0" timeout="15min"/>
+ </operations>
+ <instance_attributes id="params-public-ip">
+ <nvpair id="public-ip-addr" name="ip" value="192.0.2.2"/>
+ </instance_attributes>
+ </primitive>
+
+Multiple Monitor Operations
+###########################
+
+Provided no two operations (for a single resource) have the same name
+and interval, you can have as many ``monitor`` operations as you like.
+In this way, you can do a superficial health check every minute and
+progressively more intense ones at higher intervals.
+
+To tell the resource agent what kind of check to perform, you need to
+provide each monitor with a different value for a common parameter.
+The OCF standard creates a special parameter called ``OCF_CHECK_LEVEL``
+for this purpose and dictates that it is "made available to the
+resource agent without the normal ``OCF_RESKEY`` prefix".
+
+Whatever name you choose, you can specify it by adding an
+``instance_attributes`` block to the ``op`` tag. It is up to each
+resource agent to look for the parameter and decide how to use it.
+
+.. topic:: An OCF resource with two recurring health checks, performing
+ different levels of checks specified via ``OCF_CHECK_LEVEL``.
+
+ .. code-block:: xml
+
+ <primitive id="Public-IP" class="ocf" type="IPaddr" provider="heartbeat">
+ <operations>
+ <op id="public-ip-health-60" name="monitor" interval="60">
+ <instance_attributes id="params-public-ip-depth-60">
+ <nvpair id="public-ip-depth-60" name="OCF_CHECK_LEVEL" value="10"/>
+ </instance_attributes>
+ </op>
+ <op id="public-ip-health-300" name="monitor" interval="300">
+ <instance_attributes id="params-public-ip-depth-300">
+ <nvpair id="public-ip-depth-300" name="OCF_CHECK_LEVEL" value="20"/>
+ </instance_attributes>
+ </op>
+ </operations>
+ <instance_attributes id="params-public-ip">
+ <nvpair id="public-ip-level" name="ip" value="192.0.2.2"/>
+ </instance_attributes>
+ </primitive>
+
+Disabling a Monitor Operation
+#############################
+
+The easiest way to stop a recurring monitor is to just delete it.
+However, there can be times when you only want to disable it
+temporarily. In such cases, simply add ``enabled=false`` to the
+operation's definition.
+
+.. topic:: Example of an OCF resource with a disabled health check
+
+ .. code-block:: xml
+
+ <primitive id="Public-IP" class="ocf" type="IPaddr" provider="heartbeat">
+ <operations>
+ <op id="public-ip-check" name="monitor" interval="60s" enabled="false"/>
+ </operations>
+ <instance_attributes id="params-public-ip">
+ <nvpair id="public-ip-addr" name="ip" value="192.0.2.2"/>
+ </instance_attributes>
+ </primitive>
+
+This can be achieved from the command line by executing:
+
+.. code-block:: none
+
+ # cibadmin --modify --xml-text '<op id="public-ip-check" enabled="false"/>'
+
+Once you've done whatever you needed to do, you can then re-enable it with
+
+.. code-block:: none
+
+ # cibadmin --modify --xml-text '<op id="public-ip-check" enabled="true"/>'
+
+
+.. index::
+ single: start-delay; operation attribute
+ single: interval-origin; operation attribute
+ single: interval; interval-origin
+ single: operation; interval-origin
+ single: operation; start-delay
+
+Specifying When Recurring Actions are Performed
+###############################################
+
+By default, recurring actions are scheduled relative to when the resource
+started. In some cases, you might prefer that a recurring action start relative
+to a specific date and time. For example, you might schedule an in-depth
+monitor to run once every 24 hours, and want it to run outside business hours.
+
+To do this, set the operation's ``interval-origin``. The cluster uses this point
+to calculate the correct ``start-delay`` such that the operation will occur
+at ``interval-origin`` plus a multiple of the operation interval.
+
+For example, if the recurring operation's interval is 24h, its
+``interval-origin`` is set to 02:00, and it is currently 14:32, then the
+cluster would initiate the operation after 11 hours and 28 minutes.
+
+The value specified for ``interval`` and ``interval-origin`` can be any
+date/time conforming to the
+`ISO8601 standard <https://en.wikipedia.org/wiki/ISO_8601>`_. By way of
+example, to specify an operation that would run on the first Monday of
+2021 and every Monday after that, you would add:
+
+.. topic:: Example recurring action that runs relative to base date/time
+
+ .. code-block:: xml
+
+ <op id="intensive-monitor" name="monitor" interval="P7D" interval-origin="2021-W01-1"/>
+
+
+.. index::
+ single: resource; failure recovery
+ single: operation; failure recovery
+
+.. _failure-handling:
+
+Handling Resource Failure
+#########################
+
+By default, Pacemaker will attempt to recover failed resources by restarting
+them. However, failure recovery is highly configurable.
+
+.. index::
+ single: resource; failure count
+ single: operation; failure count
+
+Failure Counts
+______________
+
+Pacemaker tracks resource failures for each combination of node, resource, and
+operation (start, stop, monitor, etc.).
+
+You can query the fail count for a particular node, resource, and/or operation
+using the ``crm_failcount`` command. For example, to see how many times the
+10-second monitor for ``myrsc`` has failed on ``node1``, run:
+
+.. code-block:: none
+
+ # crm_failcount --query -r myrsc -N node1 -n monitor -I 10s
+
+If you omit the node, ``crm_failcount`` will use the local node. If you omit
+the operation and interval, ``crm_failcount`` will display the sum of the fail
+counts for all operations on the resource.
+
+You can use ``crm_resource --cleanup`` or ``crm_failcount --delete`` to clear
+fail counts. For example, to clear the above monitor failures, run:
+
+.. code-block:: none
+
+ # crm_resource --cleanup -r myrsc -N node1 -n monitor -I 10s
+
+If you omit the resource, ``crm_resource --cleanup`` will clear failures for
+all resources. If you omit the node, it will clear failures on all nodes. If
+you omit the operation and interval, it will clear the failures for all
+operations on the resource.
+
+.. note::
+
+ Even when cleaning up only a single operation, all failed operations will
+ disappear from the status display. This allows us to trigger a re-check of
+ the resource's current status.
+
+Higher-level tools may provide other commands for querying and clearing
+fail counts.
+
+The ``crm_mon`` tool shows the current cluster status, including any failed
+operations. To see the current fail counts for any failed resources, call
+``crm_mon`` with the ``--failcounts`` option. This shows the fail counts per
+resource (that is, the sum of any operation fail counts for the resource).
+
+.. index::
+ single: migration-threshold; resource meta-attribute
+ single: resource; migration-threshold
+
+Failure Response
+________________
+
+Normally, if a running resource fails, pacemaker will try to stop it and start
+it again. Pacemaker will choose the best location to start it each time, which
+may be the same node that it failed on.
+
+However, if a resource fails repeatedly, it is possible that there is an
+underlying problem on that node, and you might desire trying a different node
+in such a case. Pacemaker allows you to set your preference via the
+``migration-threshold`` resource meta-attribute. [#]_
+
+If you define ``migration-threshold`` to *N* for a resource, it will be banned
+from the original node after *N* failures there.
+
+.. note::
+
+ The ``migration-threshold`` is per *resource*, even though fail counts are
+ tracked per *operation*. The operation fail counts are added together
+ to compare against the ``migration-threshold``.
+
+By default, fail counts remain until manually cleared by an administrator
+using ``crm_resource --cleanup`` or ``crm_failcount --delete`` (hopefully after
+first fixing the failure's cause). It is possible to have fail counts expire
+automatically by setting the ``failure-timeout`` resource meta-attribute.
+
+.. important::
+
+ A successful operation does not clear past failures. If a recurring monitor
+ operation fails once, succeeds many times, then fails again days later, its
+ fail count is 2. Fail counts are cleared only by manual intervention or
+ failure timeout.
+
+For example, setting ``migration-threshold`` to 2 and ``failure-timeout`` to
+``60s`` would cause the resource to move to a new node after 2 failures, and
+allow it to move back (depending on stickiness and constraint scores) after one
+minute.
+
+.. note::
+
+ ``failure-timeout`` is measured since the most recent failure. That is, older
+ failures do not individually time out and lower the fail count. Instead, all
+ failures are timed out simultaneously (and the fail count is reset to 0) if
+ there is no new failure for the timeout period.
+
+There are two exceptions to the migration threshold: when a resource either
+fails to start or fails to stop.
+
+If the cluster property ``start-failure-is-fatal`` is set to ``true`` (which is
+the default), start failures cause the fail count to be set to ``INFINITY`` and
+thus always cause the resource to move immediately.
+
+Stop failures are slightly different and crucial. If a resource fails to stop
+and fencing is enabled, then the cluster will fence the node in order to be
+able to start the resource elsewhere. If fencing is disabled, then the cluster
+has no way to continue and will not try to start the resource elsewhere, but
+will try to stop it again after any failure timeout or clearing.
+
+
+.. index::
+ single: reload
+ single: reload-agent
+
+Reloading an Agent After a Definition Change
+############################################
+
+The cluster automatically detects changes to the configuration of active
+resources. The cluster's normal response is to stop the service (using the old
+definition) and start it again (with the new definition). This works, but some
+resource agents are smarter and can be told to use a new set of options without
+restarting.
+
+To take advantage of this capability, the resource agent must:
+
+* Implement the ``reload-agent`` action. What it should do depends completely
+ on your application!
+
+ .. note::
+
+ Resource agents may also implement a ``reload`` action to make the managed
+ service reload its own *native* configuration. This is different from
+ ``reload-agent``, which makes effective changes in the resource's
+ *Pacemaker* configuration (specifically, the values of the agent's
+ reloadable parameters).
+
+* Advertise the ``reload-agent`` operation in the ``actions`` section of its
+ meta-data.
+
+* Set the ``reloadable`` attribute to 1 in the ``parameters`` section of
+ its meta-data for any parameters eligible to be reloaded after a change.
+
+Once these requirements are satisfied, the cluster will automatically know to
+reload the resource (instead of restarting) when a reloadable parameter
+changes.
+
+.. note::
+
+ Metadata will not be re-read unless the resource needs to be started. If you
+ edit the agent of an already active resource to set a parameter reloadable,
+ the resource may restart the first time the parameter value changes.
+
+.. note::
+
+ If both a reloadable and non-reloadable parameter are changed
+ simultaneously, the resource will be restarted.
+
+
+
+.. _live-migration:
+
+Migrating Resources
+###################
+
+Normally, when the cluster needs to move a resource, it fully restarts the
+resource (that is, it stops the resource on the current node and starts it on
+the new node).
+
+However, some types of resources, such as many virtual machines, are able to
+move to another location without loss of state (often referred to as live
+migration or hot migration). In pacemaker, this is called live migration.
+Pacemaker can be configured to migrate a resource when moving it, rather than
+restarting it.
+
+Not all resources are able to migrate; see the
+:ref:`migration checklist <migration_checklist>` below. Even those that can,
+won't do so in all situations. Conceptually, there are two requirements from
+which the other prerequisites follow:
+
+* The resource must be active and healthy at the old location; and
+* everything required for the resource to run must be available on both the old
+ and new locations.
+
+The cluster is able to accommodate both *push* and *pull* migration models by
+requiring the resource agent to support two special actions: ``migrate_to``
+(performed on the current location) and ``migrate_from`` (performed on the
+destination).
+
+In push migration, the process on the current location transfers the resource
+to the new location where is it later activated. In this scenario, most of the
+work would be done in the ``migrate_to`` action and, if anything, the
+activation would occur during ``migrate_from``.
+
+Conversely for pull, the ``migrate_to`` action is practically empty and
+``migrate_from`` does most of the work, extracting the relevant resource state
+from the old location and activating it.
+
+There is no wrong or right way for a resource agent to implement migration, as
+long as it works.
+
+.. _migration_checklist:
+
+.. topic:: Migration Checklist
+
+ * The resource may not be a clone.
+ * The resource agent standard must be OCF.
+ * The resource must not be in a failed or degraded state.
+ * The resource agent must support ``migrate_to`` and ``migrate_from``
+ actions, and advertise them in its meta-data.
+ * The resource must have the ``allow-migrate`` meta-attribute set to
+ ``true`` (which is not the default).
+
+If an otherwise migratable resource depends on another resource via an ordering
+constraint, there are special situations in which it will be restarted rather
+than migrated.
+
+For example, if the resource depends on a clone, and at the time the resource
+needs to be moved, the clone has instances that are stopping and instances that
+are starting, then the resource will be restarted. The scheduler is not yet
+able to model this situation correctly and so takes the safer (if less optimal)
+path.
+
+Also, if a migratable resource depends on a non-migratable resource, and both
+need to be moved, the migratable resource will be restarted.
+.. rubric:: Footnotes
+
+.. [#] Currently, anyway. Automatic monitoring operations may be added in a future
+ version of Pacemaker.
+
+.. [#] The naming of this option was perhaps unfortunate as it is easily
+ confused with live migration, the process of moving a resource from one
+ node to another without stopping it. Xen virtual guests are the most
+ common example of resources that can be migrated in this manner.
diff --git a/doc/sphinx/Pacemaker_Explained/options.rst b/doc/sphinx/Pacemaker_Explained/options.rst
deleted file mode 100644
index ee0511c..0000000
--- a/doc/sphinx/Pacemaker_Explained/options.rst
+++ /dev/null
@@ -1,622 +0,0 @@
-Cluster-Wide Configuration
---------------------------
-
-.. index::
- pair: XML element; cib
- pair: XML element; configuration
-
-Configuration Layout
-####################
-
-The cluster is defined by the Cluster Information Base (CIB), which uses XML
-notation. The simplest CIB, an empty one, looks like this:
-
-.. topic:: An empty configuration
-
- .. code-block:: xml
-
- <cib crm_feature_set="3.6.0" validate-with="pacemaker-3.5" epoch="1" num_updates="0" admin_epoch="0">
- <configuration>
- <crm_config/>
- <nodes/>
- <resources/>
- <constraints/>
- </configuration>
- <status/>
- </cib>
-
-The empty configuration above contains the major sections that make up a CIB:
-
-* ``cib``: The entire CIB is enclosed with a ``cib`` element. Certain
- fundamental settings are defined as attributes of this element.
-
- * ``configuration``: This section -- the primary focus of this document --
- contains traditional configuration information such as what resources the
- cluster serves and the relationships among them.
-
- * ``crm_config``: cluster-wide configuration options
-
- * ``nodes``: the machines that host the cluster
-
- * ``resources``: the services run by the cluster
-
- * ``constraints``: indications of how resources should be placed
-
- * ``status``: This section contains the history of each resource on each
- node. Based on this data, the cluster can construct the complete current
- state of the cluster. The authoritative source for this section is the
- local executor (pacemaker-execd process) on each cluster node, and the
- cluster will occasionally repopulate the entire section. For this reason,
- it is never written to disk, and administrators are advised against
- modifying it in any way.
-
-In this document, configuration settings will be described as properties or
-options based on how they are defined in the CIB:
-
-* Properties are XML attributes of an XML element.
-
-* Options are name-value pairs expressed as ``nvpair`` child elements of an XML
- element.
-
-Normally, you will use command-line tools that abstract the XML, so the
-distinction will be unimportant; both properties and options are cluster
-settings you can tweak.
-
-CIB Properties
-##############
-
-Certain settings are defined by CIB properties (that is, attributes of the
-``cib`` tag) rather than with the rest of the cluster configuration in the
-``configuration`` section.
-
-The reason is simply a matter of parsing. These options are used by the
-configuration database which is, by design, mostly ignorant of the content it
-holds. So the decision was made to place them in an easy-to-find location.
-
-.. table:: **CIB Properties**
- :class: longtable
- :widths: 1 3
-
- +------------------+-----------------------------------------------------------+
- | Attribute | Description |
- +==================+===========================================================+
- | admin_epoch | .. index:: |
- | | pair: admin_epoch; cib |
- | | |
- | | When a node joins the cluster, the cluster performs a |
- | | check to see which node has the best configuration. It |
- | | asks the node with the highest (``admin_epoch``, |
- | | ``epoch``, ``num_updates``) tuple to replace the |
- | | configuration on all the nodes -- which makes setting |
- | | them, and setting them correctly, very important. |
- | | ``admin_epoch`` is never modified by the cluster; you can |
- | | use this to make the configurations on any inactive nodes |
- | | obsolete. |
- | | |
- | | **Warning:** Never set this value to zero. In such cases, |
- | | the cluster cannot tell the difference between your |
- | | configuration and the "empty" one used when nothing is |
- | | found on disk. |
- +------------------+-----------------------------------------------------------+
- | epoch | .. index:: |
- | | pair: epoch; cib |
- | | |
- | | The cluster increments this every time the configuration |
- | | is updated (usually by the administrator). |
- +------------------+-----------------------------------------------------------+
- | num_updates | .. index:: |
- | | pair: num_updates; cib |
- | | |
- | | The cluster increments this every time the configuration |
- | | or status is updated (usually by the cluster) and resets |
- | | it to 0 when epoch changes. |
- +------------------+-----------------------------------------------------------+
- | validate-with | .. index:: |
- | | pair: validate-with; cib |
- | | |
- | | Determines the type of XML validation that will be done |
- | | on the configuration. If set to ``none``, the cluster |
- | | will not verify that updates conform to the DTD (nor |
- | | reject ones that don't). |
- +------------------+-----------------------------------------------------------+
- | cib-last-written | .. index:: |
- | | pair: cib-last-written; cib |
- | | |
- | | Indicates when the configuration was last written to |
- | | disk. Maintained by the cluster; for informational |
- | | purposes only. |
- +------------------+-----------------------------------------------------------+
- | have-quorum | .. index:: |
- | | pair: have-quorum; cib |
- | | |
- | | Indicates if the cluster has quorum. If false, this may |
- | | mean that the cluster cannot start resources or fence |
- | | other nodes (see ``no-quorum-policy`` below). Maintained |
- | | by the cluster. |
- +------------------+-----------------------------------------------------------+
- | dc-uuid | .. index:: |
- | | pair: dc-uuid; cib |
- | | |
- | | Indicates which cluster node is the current leader. Used |
- | | by the cluster when placing resources and determining the |
- | | order of some events. Maintained by the cluster. |
- +------------------+-----------------------------------------------------------+
-
-.. _cluster_options:
-
-Cluster Options
-###############
-
-Cluster options, as you might expect, control how the cluster behaves when
-confronted with various situations.
-
-They are grouped into sets within the ``crm_config`` section. In advanced
-configurations, there may be more than one set. (This will be described later
-in the chapter on :ref:`rules` where we will show how to have the cluster use
-different sets of options during working hours than during weekends.) For now,
-we will describe the simple case where each option is present at most once.
-
-You can obtain an up-to-date list of cluster options, including their default
-values, by running the ``man pacemaker-schedulerd`` and
-``man pacemaker-controld`` commands.
-
-.. table:: **Cluster Options**
- :class: longtable
- :widths: 2 1 4
-
- +---------------------------+---------+----------------------------------------------------+
- | Option | Default | Description |
- +===========================+=========+====================================================+
- | cluster-name | | .. index:: |
- | | | pair: cluster option; cluster-name |
- | | | |
- | | | An (optional) name for the cluster as a whole. |
- | | | This is mostly for users' convenience for use |
- | | | as desired in administration, but this can be |
- | | | used in the Pacemaker configuration in |
- | | | :ref:`rules` (as the ``#cluster-name`` |
- | | | :ref:`node attribute |
- | | | <node-attribute-expressions-special>`. It may |
- | | | also be used by higher-level tools when |
- | | | displaying cluster information, and by |
- | | | certain resource agents (for example, the |
- | | | ``ocf:heartbeat:GFS2`` agent stores the |
- | | | cluster name in filesystem meta-data). |
- +---------------------------+---------+----------------------------------------------------+
- | dc-version | | .. index:: |
- | | | pair: cluster option; dc-version |
- | | | |
- | | | Version of Pacemaker on the cluster's DC. |
- | | | Determined automatically by the cluster. Often |
- | | | includes the hash which identifies the exact |
- | | | Git changeset it was built from. Used for |
- | | | diagnostic purposes. |
- +---------------------------+---------+----------------------------------------------------+
- | cluster-infrastructure | | .. index:: |
- | | | pair: cluster option; cluster-infrastructure |
- | | | |
- | | | The messaging stack on which Pacemaker is |
- | | | currently running. Determined automatically by |
- | | | the cluster. Used for informational and |
- | | | diagnostic purposes. |
- +---------------------------+---------+----------------------------------------------------+
- | no-quorum-policy | stop | .. index:: |
- | | | pair: cluster option; no-quorum-policy |
- | | | |
- | | | What to do when the cluster does not have |
- | | | quorum. Allowed values: |
- | | | |
- | | | * ``ignore:`` continue all resource management |
- | | | * ``freeze:`` continue resource management, but |
- | | | don't recover resources from nodes not in the |
- | | | affected partition |
- | | | * ``stop:`` stop all resources in the affected |
- | | | cluster partition |
- | | | * ``demote:`` demote promotable resources and |
- | | | stop all other resources in the affected |
- | | | cluster partition *(since 2.0.5)* |
- | | | * ``suicide:`` fence all nodes in the affected |
- | | | cluster partition |
- +---------------------------+---------+----------------------------------------------------+
- | batch-limit | 0 | .. index:: |
- | | | pair: cluster option; batch-limit |
- | | | |
- | | | The maximum number of actions that the cluster |
- | | | may execute in parallel across all nodes. The |
- | | | "correct" value will depend on the speed and |
- | | | load of your network and cluster nodes. If zero, |
- | | | the cluster will impose a dynamically calculated |
- | | | limit only when any node has high load. If -1, the |
- | | | cluster will not impose any limit. |
- +---------------------------+---------+----------------------------------------------------+
- | migration-limit | -1 | .. index:: |
- | | | pair: cluster option; migration-limit |
- | | | |
- | | | The number of |
- | | | :ref:`live migration <live-migration>` actions |
- | | | that the cluster is allowed to execute in |
- | | | parallel on a node. A value of -1 means |
- | | | unlimited. |
- +---------------------------+---------+----------------------------------------------------+
- | symmetric-cluster | true | .. index:: |
- | | | pair: cluster option; symmetric-cluster |
- | | | |
- | | | Whether resources can run on any node by default |
- | | | (if false, a resource is allowed to run on a |
- | | | node only if a |
- | | | :ref:`location constraint <location-constraint>` |
- | | | enables it) |
- +---------------------------+---------+----------------------------------------------------+
- | stop-all-resources | false | .. index:: |
- | | | pair: cluster option; stop-all-resources |
- | | | |
- | | | Whether all resources should be disallowed from |
- | | | running (can be useful during maintenance) |
- +---------------------------+---------+----------------------------------------------------+
- | stop-orphan-resources | true | .. index:: |
- | | | pair: cluster option; stop-orphan-resources |
- | | | |
- | | | Whether resources that have been deleted from |
- | | | the configuration should be stopped. This value |
- | | | takes precedence over ``is-managed`` (that is, |
- | | | even unmanaged resources will be stopped when |
- | | | orphaned if this value is ``true`` |
- +---------------------------+---------+----------------------------------------------------+
- | stop-orphan-actions | true | .. index:: |
- | | | pair: cluster option; stop-orphan-actions |
- | | | |
- | | | Whether recurring :ref:`operations <operation>` |
- | | | that have been deleted from the configuration |
- | | | should be cancelled |
- +---------------------------+---------+----------------------------------------------------+
- | start-failure-is-fatal | true | .. index:: |
- | | | pair: cluster option; start-failure-is-fatal |
- | | | |
- | | | Whether a failure to start a resource on a |
- | | | particular node prevents further start attempts |
- | | | on that node? If ``false``, the cluster will |
- | | | decide whether the node is still eligible based |
- | | | on the resource's current failure count and |
- | | | :ref:`migration-threshold <failure-handling>`. |
- +---------------------------+---------+----------------------------------------------------+
- | enable-startup-probes | true | .. index:: |
- | | | pair: cluster option; enable-startup-probes |
- | | | |
- | | | Whether the cluster should check the |
- | | | pre-existing state of resources when the cluster |
- | | | starts |
- +---------------------------+---------+----------------------------------------------------+
- | maintenance-mode | false | .. index:: |
- | | | pair: cluster option; maintenance-mode |
- | | | |
- | | | Whether the cluster should refrain from |
- | | | monitoring, starting and stopping resources |
- +---------------------------+---------+----------------------------------------------------+
- | stonith-enabled | true | .. index:: |
- | | | pair: cluster option; stonith-enabled |
- | | | |
- | | | Whether the cluster is allowed to fence nodes |
- | | | (for example, failed nodes and nodes with |
- | | | resources that can't be stopped. |
- | | | |
- | | | If true, at least one fence device must be |
- | | | configured before resources are allowed to run. |
- | | | |
- | | | If false, unresponsive nodes are immediately |
- | | | assumed to be running no resources, and resource |
- | | | recovery on online nodes starts without any |
- | | | further protection (which can mean *data loss* |
- | | | if the unresponsive node still accesses shared |
- | | | storage, for example). See also the |
- | | | :ref:`requires <requires>` resource |
- | | | meta-attribute. |
- +---------------------------+---------+----------------------------------------------------+
- | stonith-action | reboot | .. index:: |
- | | | pair: cluster option; stonith-action |
- | | | |
- | | | Action the cluster should send to the fence agent |
- | | | when a node must be fenced. Allowed values are |
- | | | ``reboot``, ``off``, and (for legacy agents only) |
- | | | ``poweroff``. |
- +---------------------------+---------+----------------------------------------------------+
- | stonith-timeout | 60s | .. index:: |
- | | | pair: cluster option; stonith-timeout |
- | | | |
- | | | How long to wait for ``on``, ``off``, and |
- | | | ``reboot`` fence actions to complete by default. |
- +---------------------------+---------+----------------------------------------------------+
- | stonith-max-attempts | 10 | .. index:: |
- | | | pair: cluster option; stonith-max-attempts |
- | | | |
- | | | How many times fencing can fail for a target |
- | | | before the cluster will no longer immediately |
- | | | re-attempt it. |
- +---------------------------+---------+----------------------------------------------------+
- | stonith-watchdog-timeout | 0 | .. index:: |
- | | | pair: cluster option; stonith-watchdog-timeout |
- | | | |
- | | | If nonzero, and the cluster detects |
- | | | ``have-watchdog`` as ``true``, then watchdog-based |
- | | | self-fencing will be performed via SBD when |
- | | | fencing is required, without requiring a fencing |
- | | | resource explicitly configured. |
- | | | |
- | | | If this is set to a positive value, unseen nodes |
- | | | are assumed to self-fence within this much time. |
- | | | |
- | | | **Warning:** It must be ensured that this value is |
- | | | larger than the ``SBD_WATCHDOG_TIMEOUT`` |
- | | | environment variable on all nodes. Pacemaker |
- | | | verifies the settings individually on all nodes |
- | | | and prevents startup or shuts down if configured |
- | | | wrongly on the fly. It is strongly recommended |
- | | | that ``SBD_WATCHDOG_TIMEOUT`` be set to the same |
- | | | value on all nodes. |
- | | | |
- | | | If this is set to a negative value, and |
- | | | ``SBD_WATCHDOG_TIMEOUT`` is set, twice that value |
- | | | will be used. |
- | | | |
- | | | **Warning:** In this case, it is essential (and |
- | | | currently not verified by pacemaker) that |
- | | | ``SBD_WATCHDOG_TIMEOUT`` is set to the same |
- | | | value on all nodes. |
- +---------------------------+---------+----------------------------------------------------+
- | concurrent-fencing | false | .. index:: |
- | | | pair: cluster option; concurrent-fencing |
- | | | |
- | | | Whether the cluster is allowed to initiate |
- | | | multiple fence actions concurrently. Fence actions |
- | | | initiated externally, such as via the |
- | | | ``stonith_admin`` tool or an application such as |
- | | | DLM, or by the fencer itself such as recurring |
- | | | device monitors and ``status`` and ``list`` |
- | | | commands, are not limited by this option. |
- +---------------------------+---------+----------------------------------------------------+
- | fence-reaction | stop | .. index:: |
- | | | pair: cluster option; fence-reaction |
- | | | |
- | | | How should a cluster node react if notified of its |
- | | | own fencing? A cluster node may receive |
- | | | notification of its own fencing if fencing is |
- | | | misconfigured, or if fabric fencing is in use that |
- | | | doesn't cut cluster communication. Allowed values |
- | | | are ``stop`` to attempt to immediately stop |
- | | | pacemaker and stay stopped, or ``panic`` to |
- | | | attempt to immediately reboot the local node, |
- | | | falling back to stop on failure. The default is |
- | | | likely to be changed to ``panic`` in a future |
- | | | release. *(since 2.0.3)* |
- +---------------------------+---------+----------------------------------------------------+
- | priority-fencing-delay | 0 | .. index:: |
- | | | pair: cluster option; priority-fencing-delay |
- | | | |
- | | | Apply this delay to any fencing targeting the lost |
- | | | nodes with the highest total resource priority in |
- | | | case we don't have the majority of the nodes in |
- | | | our cluster partition, so that the more |
- | | | significant nodes potentially win any fencing |
- | | | match (especially meaningful in a split-brain of a |
- | | | 2-node cluster). A promoted resource instance |
- | | | takes the resource's priority plus 1 if the |
- | | | resource's priority is not 0. Any static or random |
- | | | delays introduced by ``pcmk_delay_base`` and |
- | | | ``pcmk_delay_max`` configured for the |
- | | | corresponding fencing resources will be added to |
- | | | this delay. This delay should be significantly |
- | | | greater than (safely twice) the maximum delay from |
- | | | those parameters. *(since 2.0.4)* |
- +---------------------------+---------+----------------------------------------------------+
- | cluster-delay | 60s | .. index:: |
- | | | pair: cluster option; cluster-delay |
- | | | |
- | | | Estimated maximum round-trip delay over the |
- | | | network (excluding action execution). If the DC |
- | | | requires an action to be executed on another node, |
- | | | it will consider the action failed if it does not |
- | | | get a response from the other node in this time |
- | | | (after considering the action's own timeout). The |
- | | | "correct" value will depend on the speed and load |
- | | | of your network and cluster nodes. |
- +---------------------------+---------+----------------------------------------------------+
- | dc-deadtime | 20s | .. index:: |
- | | | pair: cluster option; dc-deadtime |
- | | | |
- | | | How long to wait for a response from other nodes |
- | | | during startup. The "correct" value will depend on |
- | | | the speed/load of your network and the type of |
- | | | switches used. |
- +---------------------------+---------+----------------------------------------------------+
- | cluster-ipc-limit | 500 | .. index:: |
- | | | pair: cluster option; cluster-ipc-limit |
- | | | |
- | | | The maximum IPC message backlog before one cluster |
- | | | daemon will disconnect another. This is of use in |
- | | | large clusters, for which a good value is the |
- | | | number of resources in the cluster multiplied by |
- | | | the number of nodes. The default of 500 is also |
- | | | the minimum. Raise this if you see |
- | | | "Evicting client" messages for cluster daemon PIDs |
- | | | in the logs. |
- +---------------------------+---------+----------------------------------------------------+
- | pe-error-series-max | -1 | .. index:: |
- | | | pair: cluster option; pe-error-series-max |
- | | | |
- | | | The number of scheduler inputs resulting in errors |
- | | | to save. Used when reporting problems. A value of |
- | | | -1 means unlimited (report all), and 0 means none. |
- +---------------------------+---------+----------------------------------------------------+
- | pe-warn-series-max | 5000 | .. index:: |
- | | | pair: cluster option; pe-warn-series-max |
- | | | |
- | | | The number of scheduler inputs resulting in |
- | | | warnings to save. Used when reporting problems. A |
- | | | value of -1 means unlimited (report all), and 0 |
- | | | means none. |
- +---------------------------+---------+----------------------------------------------------+
- | pe-input-series-max | 4000 | .. index:: |
- | | | pair: cluster option; pe-input-series-max |
- | | | |
- | | | The number of "normal" scheduler inputs to save. |
- | | | Used when reporting problems. A value of -1 means |
- | | | unlimited (report all), and 0 means none. |
- +---------------------------+---------+----------------------------------------------------+
- | enable-acl | false | .. index:: |
- | | | pair: cluster option; enable-acl |
- | | | |
- | | | Whether :ref:`acl` should be used to authorize |
- | | | modifications to the CIB |
- +---------------------------+---------+----------------------------------------------------+
- | placement-strategy | default | .. index:: |
- | | | pair: cluster option; placement-strategy |
- | | | |
- | | | How the cluster should allocate resources to nodes |
- | | | (see :ref:`utilization`). Allowed values are |
- | | | ``default``, ``utilization``, ``balanced``, and |
- | | | ``minimal``. |
- +---------------------------+---------+----------------------------------------------------+
- | node-health-strategy | none | .. index:: |
- | | | pair: cluster option; node-health-strategy |
- | | | |
- | | | How the cluster should react to node health |
- | | | attributes (see :ref:`node-health`). Allowed values|
- | | | are ``none``, ``migrate-on-red``, ``only-green``, |
- | | | ``progressive``, and ``custom``. |
- +---------------------------+---------+----------------------------------------------------+
- | node-health-base | 0 | .. index:: |
- | | | pair: cluster option; node-health-base |
- | | | |
- | | | The base health score assigned to a node. Only |
- | | | used when ``node-health-strategy`` is |
- | | | ``progressive``. |
- +---------------------------+---------+----------------------------------------------------+
- | node-health-green | 0 | .. index:: |
- | | | pair: cluster option; node-health-green |
- | | | |
- | | | The score to use for a node health attribute whose |
- | | | value is ``green``. Only used when |
- | | | ``node-health-strategy`` is ``progressive`` or |
- | | | ``custom``. |
- +---------------------------+---------+----------------------------------------------------+
- | node-health-yellow | 0 | .. index:: |
- | | | pair: cluster option; node-health-yellow |
- | | | |
- | | | The score to use for a node health attribute whose |
- | | | value is ``yellow``. Only used when |
- | | | ``node-health-strategy`` is ``progressive`` or |
- | | | ``custom``. |
- +---------------------------+---------+----------------------------------------------------+
- | node-health-red | 0 | .. index:: |
- | | | pair: cluster option; node-health-red |
- | | | |
- | | | The score to use for a node health attribute whose |
- | | | value is ``red``. Only used when |
- | | | ``node-health-strategy`` is ``progressive`` or |
- | | | ``custom``. |
- +---------------------------+---------+----------------------------------------------------+
- | cluster-recheck-interval | 15min | .. index:: |
- | | | pair: cluster option; cluster-recheck-interval |
- | | | |
- | | | Pacemaker is primarily event-driven, and looks |
- | | | ahead to know when to recheck the cluster for |
- | | | failure timeouts and most time-based rules |
- | | | *(since 2.0.3)*. However, it will also recheck the |
- | | | cluster after this amount of inactivity. This has |
- | | | two goals: rules with ``date_spec`` are only |
- | | | guaranteed to be checked this often, and it also |
- | | | serves as a fail-safe for some kinds of scheduler |
- | | | bugs. A value of 0 disables this polling; positive |
- | | | values are a time interval. |
- +---------------------------+---------+----------------------------------------------------+
- | shutdown-lock | false | .. index:: |
- | | | pair: cluster option; shutdown-lock |
- | | | |
- | | | The default of false allows active resources to be |
- | | | recovered elsewhere when their node is cleanly |
- | | | shut down, which is what the vast majority of |
- | | | users will want. However, some users prefer to |
- | | | make resources highly available only for failures, |
- | | | with no recovery for clean shutdowns. If this |
- | | | option is true, resources active on a node when it |
- | | | is cleanly shut down are kept "locked" to that |
- | | | node (not allowed to run elsewhere) until they |
- | | | start again on that node after it rejoins (or for |
- | | | at most ``shutdown-lock-limit``, if set). Stonith |
- | | | resources and Pacemaker Remote connections are |
- | | | never locked. Clone and bundle instances and the |
- | | | promoted role of promotable clones are currently |
- | | | never locked, though support could be added in a |
- | | | future release. Locks may be manually cleared |
- | | | using the ``--refresh`` option of ``crm_resource`` |
- | | | (both the resource and node must be specified; |
- | | | this works with remote nodes if their connection |
- | | | resource's ``target-role`` is set to ``Stopped``, |
- | | | but not if Pacemaker Remote is stopped on the |
- | | | remote node without disabling the connection |
- | | | resource). *(since 2.0.4)* |
- +---------------------------+---------+----------------------------------------------------+
- | shutdown-lock-limit | 0 | .. index:: |
- | | | pair: cluster option; shutdown-lock-limit |
- | | | |
- | | | If ``shutdown-lock`` is true, and this is set to a |
- | | | nonzero time duration, locked resources will be |
- | | | allowed to start after this much time has passed |
- | | | since the node shutdown was initiated, even if the |
- | | | node has not rejoined. (This works with remote |
- | | | nodes only if their connection resource's |
- | | | ``target-role`` is set to ``Stopped``.) |
- | | | *(since 2.0.4)* |
- +---------------------------+---------+----------------------------------------------------+
- | remove-after-stop | false | .. index:: |
- | | | pair: cluster option; remove-after-stop |
- | | | |
- | | | *Deprecated* Should the cluster remove |
- | | | resources from Pacemaker's executor after they are |
- | | | stopped? Values other than the default are, at |
- | | | best, poorly tested and potentially dangerous. |
- | | | This option is deprecated and will be removed in a |
- | | | future release. |
- +---------------------------+---------+----------------------------------------------------+
- | startup-fencing | true | .. index:: |
- | | | pair: cluster option; startup-fencing |
- | | | |
- | | | *Advanced Use Only:* Should the cluster fence |
- | | | unseen nodes at start-up? Setting this to false is |
- | | | unsafe, because the unseen nodes could be active |
- | | | and running resources but unreachable. |
- +---------------------------+---------+----------------------------------------------------+
- | election-timeout | 2min | .. index:: |
- | | | pair: cluster option; election-timeout |
- | | | |
- | | | *Advanced Use Only:* If you need to adjust this |
- | | | value, it probably indicates the presence of a bug.|
- +---------------------------+---------+----------------------------------------------------+
- | shutdown-escalation | 20min | .. index:: |
- | | | pair: cluster option; shutdown-escalation |
- | | | |
- | | | *Advanced Use Only:* If you need to adjust this |
- | | | value, it probably indicates the presence of a bug.|
- +---------------------------+---------+----------------------------------------------------+
- | join-integration-timeout | 3min | .. index:: |
- | | | pair: cluster option; join-integration-timeout |
- | | | |
- | | | *Advanced Use Only:* If you need to adjust this |
- | | | value, it probably indicates the presence of a bug.|
- +---------------------------+---------+----------------------------------------------------+
- | join-finalization-timeout | 30min | .. index:: |
- | | | pair: cluster option; join-finalization-timeout |
- | | | |
- | | | *Advanced Use Only:* If you need to adjust this |
- | | | value, it probably indicates the presence of a bug.|
- +---------------------------+---------+----------------------------------------------------+
- | transition-delay | 0s | .. index:: |
- | | | pair: cluster option; transition-delay |
- | | | |
- | | | *Advanced Use Only:* Delay cluster recovery for |
- | | | the configured interval to allow for additional or |
- | | | related events to occur. This can be useful if |
- | | | your configuration is sensitive to the order in |
- | | | which ping updates arrive. Enabling this option |
- | | | will slow down cluster recovery under all |
- | | | conditions. |
- +---------------------------+---------+----------------------------------------------------+
diff --git a/doc/sphinx/Pacemaker_Explained/resources.rst b/doc/sphinx/Pacemaker_Explained/resources.rst
index 3b7520f..a971c44 100644
--- a/doc/sphinx/Pacemaker_Explained/resources.rst
+++ b/doc/sphinx/Pacemaker_Explained/resources.rst
@@ -362,8 +362,8 @@ behave and can be easily set using the ``--meta`` option of the
| | | all :ref:`colocation constraints |
| | | <s-resource-colocation>` involving this resource, |
| | | as well as the implicit colocation constraints |
- | | | created if this resource is in a :ref:`group |
- | | | <group-resources>`. For details, see |
+ | | | created if this resource is in a |
+ | | | :ref:`group <group-resources>`. For details, see |
| | | :ref:`s-coloc-influence`. *(since 2.1.0)* |
+----------------------------+----------------------------------+------------------------------------------------------+
| target-role | Started | .. index:: |
@@ -375,31 +375,39 @@ behave and can be easily set using the ``--meta`` option of the
| | | |
| | | * ``Stopped:`` Force the resource to be stopped |
| | | * ``Started:`` Allow the resource to be started |
- | | | (and in the case of :ref:`promotable clone |
- | | | resources <s-resource-promotable>`, promoted |
- | | | if appropriate) |
+ | | | (and in the case of |
+ | | | :ref:`promotable <s-resource-promotable>` clone |
+ | | | resources, promoted if appropriate) |
| | | * ``Unpromoted:`` Allow the resource to be started, |
| | | but only in the unpromoted role if the resource is |
| | | :ref:`promotable <s-resource-promotable>` |
| | | * ``Promoted:`` Equivalent to ``Started`` |
+----------------------------+----------------------------------+------------------------------------------------------+
- | is-managed | TRUE | .. index:: |
+ | is-managed | TRUE | .. _is_managed: |
+ | | | |
+ | | | .. index:: |
| | | single: is-managed; resource option |
| | | single: resource; option, is-managed |
| | | |
- | | | Is the cluster allowed to start and stop |
- | | | the resource? Allowed values: ``true``, ``false`` |
+ | | | If false, the cluster will not start or stop the |
+ | | | resource on any node. Recurring actions for the |
+ | | | resource are unaffected. Maintenance mode overrides |
+ | | | this setting. Allowed values: ``true``, ``false`` |
+----------------------------+----------------------------------+------------------------------------------------------+
- | maintenance | FALSE | .. index:: |
+ | maintenance | FALSE | .. _rsc_maintenance: |
+ | | | |
+ | | | .. index:: |
| | | single: maintenance; resource option |
| | | single: resource; option, maintenance |
| | | |
- | | | Similar to the ``maintenance-mode`` |
- | | | :ref:`cluster option <cluster_options>`, but for |
- | | | a single resource. If true, the resource will not |
- | | | be started, stopped, or monitored on any node. This |
- | | | differs from ``is-managed`` in that monitors will |
- | | | not be run. Allowed values: ``true``, ``false`` |
+ | | | If true, the cluster will not start or stop the |
+ | | | resource on any node, and will pause any recurring |
+ | | | monitors (except those specifying ``role`` as |
+ | | | ``Stopped``). If true, the |
+ | | | :ref:`maintenance-mode <maintenance_mode>` cluster |
+ | | | option or :ref:`maintenance <node_maintenance>` |
+ | | | node attribute override this. Allowed values: |
+ | | | ``true``, ``false`` |
+----------------------------+----------------------------------+------------------------------------------------------+
| resource-stickiness | 1 for individual clone | .. _resource-stickiness: |
| | instances, 0 for all | |
@@ -686,389 +694,3 @@ attributes, their purpose and default values.
<action name="meta-data" timeout="5s" />
</actions>
</resource-agent>
-
-.. index::
- single: resource; action
- single: resource; operation
-
-.. _operation:
-
-Resource Operations
-###################
-
-*Operations* are actions the cluster can perform on a resource by calling the
-resource agent. Resource agents must support certain common operations such as
-start, stop, and monitor, and may implement any others.
-
-Operations may be explicitly configured for two purposes: to override defaults
-for options (such as timeout) that the cluster will use whenever it initiates
-the operation, and to run an operation on a recurring basis (for example, to
-monitor the resource for failure).
-
-.. topic:: An OCF resource with a non-default start timeout
-
- .. code-block:: xml
-
- <primitive id="Public-IP" class="ocf" type="IPaddr" provider="heartbeat">
- <operations>
- <op id="Public-IP-start" name="start" timeout="60s"/>
- </operations>
- <instance_attributes id="params-public-ip">
- <nvpair id="public-ip-addr" name="ip" value="192.0.2.2"/>
- </instance_attributes>
- </primitive>
-
-Pacemaker identifies operations by a combination of name and interval, so this
-combination must be unique for each resource. That is, you should not configure
-two operations for the same resource with the same name and interval.
-
-.. _operation_properties:
-
-Operation Properties
-____________________
-
-Operation properties may be specified directly in the ``op`` element as
-XML attributes, or in a separate ``meta_attributes`` block as ``nvpair`` elements.
-XML attributes take precedence over ``nvpair`` elements if both are specified.
-
-.. table:: **Properties of an Operation**
- :class: longtable
- :widths: 1 2 3
-
- +----------------+-----------------------------------+-----------------------------------------------------+
- | Field | Default | Description |
- +================+===================================+=====================================================+
- | id | | .. index:: |
- | | | single: id; action property |
- | | | single: action; property, id |
- | | | |
- | | | A unique name for the operation. |
- +----------------+-----------------------------------+-----------------------------------------------------+
- | name | | .. index:: |
- | | | single: name; action property |
- | | | single: action; property, name |
- | | | |
- | | | The action to perform. This can be any action |
- | | | supported by the agent; common values include |
- | | | ``monitor``, ``start``, and ``stop``. |
- +----------------+-----------------------------------+-----------------------------------------------------+
- | interval | 0 | .. index:: |
- | | | single: interval; action property |
- | | | single: action; property, interval |
- | | | |
- | | | How frequently (in seconds) to perform the |
- | | | operation. A value of 0 means "when needed". |
- | | | A positive value defines a *recurring action*, |
- | | | which is typically used with |
- | | | :ref:`monitor <s-resource-monitoring>`. |
- +----------------+-----------------------------------+-----------------------------------------------------+
- | timeout | | .. index:: |
- | | | single: timeout; action property |
- | | | single: action; property, timeout |
- | | | |
- | | | How long to wait before declaring the action |
- | | | has failed |
- +----------------+-----------------------------------+-----------------------------------------------------+
- | on-fail | Varies by action: | .. index:: |
- | | | single: on-fail; action property |
- | | * ``stop``: ``fence`` if | single: action; property, on-fail |
- | | ``stonith-enabled`` is true | |
- | | or ``block`` otherwise | The action to take if this action ever fails. |
- | | * ``demote``: ``on-fail`` of the | Allowed values: |
- | | ``monitor`` action with | |
- | | ``role`` set to ``Promoted``, | * ``ignore:`` Pretend the resource did not fail. |
- | | if present, enabled, and | * ``block:`` Don't perform any further operations |
- | | configured to a value other | on the resource. |
- | | than ``demote``, or ``restart`` | * ``stop:`` Stop the resource and do not start |
- | | otherwise | it elsewhere. |
- | | * all other actions: ``restart`` | * ``demote:`` Demote the resource, without a |
- | | | full restart. This is valid only for ``promote`` |
- | | | actions, and for ``monitor`` actions with both |
- | | | a nonzero ``interval`` and ``role`` set to |
- | | | ``Promoted``; for any other action, a |
- | | | configuration error will be logged, and the |
- | | | default behavior will be used. *(since 2.0.5)* |
- | | | * ``restart:`` Stop the resource and start it |
- | | | again (possibly on a different node). |
- | | | * ``fence:`` STONITH the node on which the |
- | | | resource failed. |
- | | | * ``standby:`` Move *all* resources away from the |
- | | | node on which the resource failed. |
- +----------------+-----------------------------------+-----------------------------------------------------+
- | enabled | TRUE | .. index:: |
- | | | single: enabled; action property |
- | | | single: action; property, enabled |
- | | | |
- | | | If ``false``, ignore this operation definition. |
- | | | This is typically used to pause a particular |
- | | | recurring ``monitor`` operation; for instance, it |
- | | | can complement the respective resource being |
- | | | unmanaged (``is-managed=false``), as this alone |
- | | | will :ref:`not block any configured monitoring |
- | | | <s-monitoring-unmanaged>`. Disabling the operation |
- | | | does not suppress all actions of the given type. |
- | | | Allowed values: ``true``, ``false``. |
- +----------------+-----------------------------------+-----------------------------------------------------+
- | record-pending | TRUE | .. index:: |
- | | | single: record-pending; action property |
- | | | single: action; property, record-pending |
- | | | |
- | | | If ``true``, the intention to perform the operation |
- | | | is recorded so that GUIs and CLI tools can indicate |
- | | | that an operation is in progress. This is best set |
- | | | as an *operation default* |
- | | | (see :ref:`s-operation-defaults`). Allowed values: |
- | | | ``true``, ``false``. |
- +----------------+-----------------------------------+-----------------------------------------------------+
- | role | | .. index:: |
- | | | single: role; action property |
- | | | single: action; property, role |
- | | | |
- | | | Run the operation only on node(s) that the cluster |
- | | | thinks should be in the specified role. This only |
- | | | makes sense for recurring ``monitor`` operations. |
- | | | Allowed (case-sensitive) values: ``Stopped``, |
- | | | ``Started``, and in the case of :ref:`promotable |
- | | | clone resources <s-resource-promotable>`, |
- | | | ``Unpromoted`` and ``Promoted``. |
- +----------------+-----------------------------------+-----------------------------------------------------+
-
-.. note::
-
- When ``on-fail`` is set to ``demote``, recovery from failure by a successful
- demote causes the cluster to recalculate whether and where a new instance
- should be promoted. The node with the failure is eligible, so if promotion
- scores have not changed, it will be promoted again.
-
- There is no direct equivalent of ``migration-threshold`` for the promoted
- role, but the same effect can be achieved with a location constraint using a
- :ref:`rule <rules>` with a node attribute expression for the resource's fail
- count.
-
- For example, to immediately ban the promoted role from a node with any
- failed promote or promoted instance monitor:
-
- .. code-block:: xml
-
- <rsc_location id="loc1" rsc="my_primitive">
- <rule id="rule1" score="-INFINITY" role="Promoted" boolean-op="or">
- <expression id="expr1" attribute="fail-count-my_primitive#promote_0"
- operation="gte" value="1"/>
- <expression id="expr2" attribute="fail-count-my_primitive#monitor_10000"
- operation="gte" value="1"/>
- </rule>
- </rsc_location>
-
- This example assumes that there is a promotable clone of the ``my_primitive``
- resource (note that the primitive name, not the clone name, is used in the
- rule), and that there is a recurring 10-second-interval monitor configured for
- the promoted role (fail count attributes specify the interval in
- milliseconds).
-
-.. _s-resource-monitoring:
-
-Monitoring Resources for Failure
-________________________________
-
-When Pacemaker first starts a resource, it runs one-time ``monitor`` operations
-(referred to as *probes*) to ensure the resource is running where it's
-supposed to be, and not running where it's not supposed to be. (This behavior
-can be affected by the ``resource-discovery`` location constraint property.)
-
-Other than those initial probes, Pacemaker will *not* (by default) check that
-the resource continues to stay healthy [#]_. You must configure ``monitor``
-operations explicitly to perform these checks.
-
-.. topic:: An OCF resource with a recurring health check
-
- .. code-block:: xml
-
- <primitive id="Public-IP" class="ocf" type="IPaddr" provider="heartbeat">
- <operations>
- <op id="Public-IP-start" name="start" timeout="60s"/>
- <op id="Public-IP-monitor" name="monitor" interval="60s"/>
- </operations>
- <instance_attributes id="params-public-ip">
- <nvpair id="public-ip-addr" name="ip" value="192.0.2.2"/>
- </instance_attributes>
- </primitive>
-
-By default, a ``monitor`` operation will ensure that the resource is running
-where it is supposed to. The ``target-role`` property can be used for further
-checking.
-
-For example, if a resource has one ``monitor`` operation with
-``interval=10 role=Started`` and a second ``monitor`` operation with
-``interval=11 role=Stopped``, the cluster will run the first monitor on any nodes
-it thinks *should* be running the resource, and the second monitor on any nodes
-that it thinks *should not* be running the resource (for the truly paranoid,
-who want to know when an administrator manually starts a service by mistake).
-
-.. note::
-
- Currently, monitors with ``role=Stopped`` are not implemented for
- :ref:`clone <s-resource-clone>` resources.
-
-.. _s-monitoring-unmanaged:
-
-Monitoring Resources When Administration is Disabled
-____________________________________________________
-
-Recurring ``monitor`` operations behave differently under various administrative
-settings:
-
-* When a resource is unmanaged (by setting ``is-managed=false``): No monitors
- will be stopped.
-
- If the unmanaged resource is stopped on a node where the cluster thinks it
- should be running, the cluster will detect and report that it is not, but it
- will not consider the monitor failed, and will not try to start the resource
- until it is managed again.
-
- Starting the unmanaged resource on a different node is strongly discouraged
- and will at least cause the cluster to consider the resource failed, and
- may require the resource's ``target-role`` to be set to ``Stopped`` then
- ``Started`` to be recovered.
-
-* When a resource is put into maintenance mode (by setting
- ``maintenance=true``): The resource will be marked as unmanaged. (This
- overrides ``is-managed=true``.)
-
- Additionally, all monitor operations will be stopped, except those specifying
- ``role`` as ``Stopped`` (which will be newly initiated if appropriate). As
- with unmanaged resources in general, starting a resource on a node other than
- where the cluster expects it to be will cause problems.
-
-* When a node is put into standby: All resources will be moved away from the
- node, and all ``monitor`` operations will be stopped on the node, except those
- specifying ``role`` as ``Stopped`` (which will be newly initiated if
- appropriate).
-
-* When a node is put into maintenance mode: All resources that are active on the
- node will be marked as in maintenance mode. See above for more details.
-
-* When the cluster is put into maintenance mode: All resources in the cluster
- will be marked as in maintenance mode. See above for more details.
-
-A resource is in maintenance mode if the cluster, the node where the resource
-is active, or the resource itself is configured to be in maintenance mode. If a
-resource is in maintenance mode, then it is also unmanaged. However, if a
-resource is unmanaged, it is not necessarily in maintenance mode.
-
-.. _s-operation-defaults:
-
-Setting Global Defaults for Operations
-______________________________________
-
-You can change the global default values for operation properties
-in a given cluster. These are defined in an ``op_defaults`` section
-of the CIB's ``configuration`` section, and can be set with
-``crm_attribute``. For example,
-
-.. code-block:: none
-
- # crm_attribute --type op_defaults --name timeout --update 20s
-
-would default each operation's ``timeout`` to 20 seconds. If an
-operation's definition also includes a value for ``timeout``, then that
-value would be used for that operation instead.
-
-When Implicit Operations Take a Long Time
-_________________________________________
-
-The cluster will always perform a number of implicit operations: ``start``,
-``stop`` and a non-recurring ``monitor`` operation used at startup to check
-whether the resource is already active. If one of these is taking too long,
-then you can create an entry for them and specify a longer timeout.
-
-.. topic:: An OCF resource with custom timeouts for its implicit actions
-
- .. code-block:: xml
-
- <primitive id="Public-IP" class="ocf" type="IPaddr" provider="heartbeat">
- <operations>
- <op id="public-ip-startup" name="monitor" interval="0" timeout="90s"/>
- <op id="public-ip-start" name="start" interval="0" timeout="180s"/>
- <op id="public-ip-stop" name="stop" interval="0" timeout="15min"/>
- </operations>
- <instance_attributes id="params-public-ip">
- <nvpair id="public-ip-addr" name="ip" value="192.0.2.2"/>
- </instance_attributes>
- </primitive>
-
-Multiple Monitor Operations
-___________________________
-
-Provided no two operations (for a single resource) have the same name
-and interval, you can have as many ``monitor`` operations as you like.
-In this way, you can do a superficial health check every minute and
-progressively more intense ones at higher intervals.
-
-To tell the resource agent what kind of check to perform, you need to
-provide each monitor with a different value for a common parameter.
-The OCF standard creates a special parameter called ``OCF_CHECK_LEVEL``
-for this purpose and dictates that it is "made available to the
-resource agent without the normal ``OCF_RESKEY`` prefix".
-
-Whatever name you choose, you can specify it by adding an
-``instance_attributes`` block to the ``op`` tag. It is up to each
-resource agent to look for the parameter and decide how to use it.
-
-.. topic:: An OCF resource with two recurring health checks, performing
- different levels of checks specified via ``OCF_CHECK_LEVEL``.
-
- .. code-block:: xml
-
- <primitive id="Public-IP" class="ocf" type="IPaddr" provider="heartbeat">
- <operations>
- <op id="public-ip-health-60" name="monitor" interval="60">
- <instance_attributes id="params-public-ip-depth-60">
- <nvpair id="public-ip-depth-60" name="OCF_CHECK_LEVEL" value="10"/>
- </instance_attributes>
- </op>
- <op id="public-ip-health-300" name="monitor" interval="300">
- <instance_attributes id="params-public-ip-depth-300">
- <nvpair id="public-ip-depth-300" name="OCF_CHECK_LEVEL" value="20"/>
- </instance_attributes>
- </op>
- </operations>
- <instance_attributes id="params-public-ip">
- <nvpair id="public-ip-level" name="ip" value="192.0.2.2"/>
- </instance_attributes>
- </primitive>
-
-Disabling a Monitor Operation
-_____________________________
-
-The easiest way to stop a recurring monitor is to just delete it.
-However, there can be times when you only want to disable it
-temporarily. In such cases, simply add ``enabled=false`` to the
-operation's definition.
-
-.. topic:: Example of an OCF resource with a disabled health check
-
- .. code-block:: xml
-
- <primitive id="Public-IP" class="ocf" type="IPaddr" provider="heartbeat">
- <operations>
- <op id="public-ip-check" name="monitor" interval="60s" enabled="false"/>
- </operations>
- <instance_attributes id="params-public-ip">
- <nvpair id="public-ip-addr" name="ip" value="192.0.2.2"/>
- </instance_attributes>
- </primitive>
-
-This can be achieved from the command line by executing:
-
-.. code-block:: none
-
- # cibadmin --modify --xml-text '<op id="public-ip-check" enabled="false"/>'
-
-Once you've done whatever you needed to do, you can then re-enable it with
-
-.. code-block:: none
-
- # cibadmin --modify --xml-text '<op id="public-ip-check" enabled="true"/>'
-
-.. [#] Currently, anyway. Automatic monitoring operations may be added in a future
- version of Pacemaker.
diff --git a/doc/sphinx/Pacemaker_Explained/reusing-configuration.rst b/doc/sphinx/Pacemaker_Explained/reusing-configuration.rst
index 0f34f84..06c00f0 100644
--- a/doc/sphinx/Pacemaker_Explained/reusing-configuration.rst
+++ b/doc/sphinx/Pacemaker_Explained/reusing-configuration.rst
@@ -330,6 +330,11 @@ resources.
A single configuration element can be listed in any number of tags.
+.. important::
+
+ If listing nodes in a tag, you must list the node's ``id``, not name.
+
+
Using Tags in Constraints and Resource Sets
___________________________________________
diff --git a/doc/sphinx/Pacemaker_Explained/status.rst b/doc/sphinx/Pacemaker_Explained/status.rst
index 2d7dd7e..6384eda 100644
--- a/doc/sphinx/Pacemaker_Explained/status.rst
+++ b/doc/sphinx/Pacemaker_Explained/status.rst
@@ -33,7 +33,7 @@ Users are highly recommended *not* to modify any part of a node's
state *directly*. The cluster will periodically regenerate the entire
section from authoritative sources, so any changes should be done
with the tools appropriate to those sources.
-
+
.. table:: **Authoritative Sources for State Information**
:widths: 1 1
@@ -48,9 +48,7 @@ with the tools appropriate to those sources.
+----------------------+----------------------+
The fields used in the ``node_state`` objects are named as they are
-largely for historical reasons and are rooted in Pacemaker's origins
-as the resource manager for the older Heartbeat project. They have remained
-unchanged to preserve compatibility with older versions.
+largely for historical reasons, to maintain compatibility with older versions.
.. table:: **Node Status Fields**
:widths: 1 3
@@ -147,8 +145,8 @@ all known resources have been checked for on this machine (``probe_complete``).
Operation History
#################
-A node's resource history is held in the ``lrm_resources`` tag (a child
-of the ``lrm`` tag). The information stored here includes enough
+A node's resource history is held in the ``lrm_resources`` element (a child
+of the ``lrm`` element). The information stored here includes enough
information for the cluster to stop the resource safely if it is
removed from the ``configuration`` section. Specifically, the resource's
``id``, ``class``, ``type`` and ``provider`` are stored.
@@ -159,11 +157,9 @@ removed from the ``configuration`` section. Specifically, the resource's
<lrm_resource id="apcstonith" type="fence_apc_snmp" class="stonith"/>
-Additionally, we store the last job for every combination of
-``resource``, ``action`` and ``interval``. The concatenation of the values in
-this tuple are used to create the id of the ``lrm_rsc_op`` object.
+Additionally, we store history entries for certain actions.
-.. table:: **Contents of an lrm_rsc_op job**
+.. table:: **Attributes of an lrm_rsc_op element**
:class: longtable
:widths: 1 3
@@ -174,78 +170,78 @@ this tuple are used to create the id of the ``lrm_rsc_op`` object.
| | single: id; action status |
| | single: action; status, id |
| | |
- | | Identifier for the job constructed from the resource's |
- | | ``operation`` and ``interval``. |
+ | | Identifier for the history entry constructed from the |
+ | | resource ID, action name, and operation interval. |
+------------------+----------------------------------------------------------+
| call-id | .. index:: |
| | single: call-id; action status |
| | single: action; status, call-id |
| | |
- | | The job's ticket number. Used as a sort key to determine |
- | | the order in which the jobs were executed. |
+ | | A node-specific counter used to determine the order in |
+ | | which actions were executed. |
+------------------+----------------------------------------------------------+
| operation | .. index:: |
| | single: operation; action status |
| | single: action; status, operation |
| | |
- | | The action the resource agent was invoked with. |
+ | | The action name the resource agent was invoked with. |
+------------------+----------------------------------------------------------+
| interval | .. index:: |
| | single: interval; action status |
| | single: action; status, interval |
| | |
| | The frequency, in milliseconds, at which the operation |
- | | will be repeated. A one-off job is indicated by 0. |
+ | | will be repeated. One-time execution is indicated by 0. |
+------------------+----------------------------------------------------------+
| op-status | .. index:: |
| | single: op-status; action status |
| | single: action; status, op-status |
| | |
- | | The job's status. Generally this will be either 0 (done) |
- | | or -1 (pending). Rarely used in favor of ``rc-code``. |
+ | | The execution status of this action. The meanings of |
+ | | these codes are internal to Pacemaker. |
+------------------+----------------------------------------------------------+
| rc-code | .. index:: |
| | single: rc-code; action status |
| | single: action; status, rc-code |
| | |
- | | The job's result. Refer to the *Resource Agents* chapter |
- | | of *Pacemaker Administration* for details on what the |
- | | values here mean and how they are interpreted. |
+ | | The resource agent's exit status for this action. Refer |
+ | | to the *Resource Agents* chapter of |
+ | | *Pacemaker Administration* for how these values are |
+ | | interpreted. |
+------------------+----------------------------------------------------------+
| last-rc-change | .. index:: |
| | single: last-rc-change; action status |
| | single: action; status, last-rc-change |
| | |
| | Machine-local date/time, in seconds since epoch, at |
- | | which the job first returned the current value of |
+ | | which the action first returned the current value of |
| | ``rc-code``. For diagnostic purposes. |
+------------------+----------------------------------------------------------+
| exec-time | .. index:: |
| | single: exec-time; action status |
| | single: action; status, exec-time |
| | |
- | | Time, in milliseconds, that the job was running for. |
+ | | Time, in milliseconds, that the action was running for. |
| | For diagnostic purposes. |
+------------------+----------------------------------------------------------+
| queue-time | .. index:: |
| | single: queue-time; action status |
| | single: action; status, queue-time |
| | |
- | | Time, in seconds, that the job was queued for in the |
+ | | Time, in seconds, that the action was queued for in the |
| | local executor. For diagnostic purposes. |
+------------------+----------------------------------------------------------+
| crm_feature_set | .. index:: |
| | single: crm_feature_set; action status |
| | single: action; status, crm_feature_set |
| | |
- | | The version which this job description conforms to. Used |
- | | when processing ``op-digest``. |
+ | | The Pacemaker feature set used to record this entry. |
+------------------+----------------------------------------------------------+
| transition-key | .. index:: |
| | single: transition-key; action status |
| | single: action; status, transition-key |
| | |
- | | A concatenation of the job's graph action number, the |
+ | | A concatenation of the action's graph action number, the |
| | graph number, the expected result and the UUID of the |
| | controller instance that scheduled it. This is used to |
| | construct ``transition-magic`` (below). |
@@ -254,13 +250,13 @@ this tuple are used to create the id of the ``lrm_rsc_op`` object.
| | single: transition-magic; action status |
| | single: action; status, transition-magic |
| | |
- | | A concatenation of the job's ``op-status``, ``rc-code`` |
+ | | A concatenation of ``op-status``, ``rc-code`` |
| | and ``transition-key``. Guaranteed to be unique for the |
| | life of the cluster (which ensures it is part of CIB |
| | update notifications) and contains all the information |
| | needed for the controller to correctly analyze and |
- | | process the completed job. Most importantly, the |
- | | decomposed elements tell the controller if the job |
+ | | process the completed action. Most importantly, the |
+ | | decomposed elements tell the controller if the history |
| | entry was expected and whether it failed. |
+------------------+----------------------------------------------------------+
| op-digest | .. index:: |
@@ -268,7 +264,7 @@ this tuple are used to create the id of the ``lrm_rsc_op`` object.
| | single: action; status, op-digest |
| | |
| | An MD5 sum representing the parameters passed to the |
- | | job. Used to detect changes to the configuration, to |
+ | | action. Used to detect changes to the configuration, to |
| | restart resources if necessary. |
+------------------+----------------------------------------------------------+
| crm-debug-origin | .. index:: |
@@ -296,7 +292,7 @@ ________________________________
last-rc-change="1239008085" exec-time="10" queue-time="0"/>
</lrm_resource>
-In the above example, the job is a non-recurring monitor operation
+In the above example, the action is a non-recurring monitor operation
often referred to as a "probe" for the ``apcstonith`` resource.
The cluster schedules probes for every configured resource on a node when
@@ -308,16 +304,16 @@ the 2nd graph produced by this instance of the controller
(2668bbeb-06d5-40f9-936d-24cb7f87006a).
The third field of the ``transition-key`` contains a 7, which indicates
-that the job expects to find the resource inactive. By looking at the ``rc-code``
-property, we see that this was the case.
+that the cluster expects to find the resource inactive. By looking at the
+``rc-code`` property, we see that this was the case.
-As that is the only job recorded for this node, we can conclude that
+As that is the only action recorded for this node, we can conclude that
the cluster started the resource elsewhere.
Complex Operation History Example
_________________________________
-.. topic:: Resource history of a ``pingd`` clone with multiple jobs
+.. topic:: Resource history of a ``pingd`` clone with multiple entries
.. code-block:: xml
@@ -344,7 +340,7 @@ _________________________________
last-rc-change="1239008085" exec-time="20" queue-time="0"/>
</lrm_resource>
-When more than one job record exists, it is important to first sort
+When more than one history entry exists, it is important to first sort
them by ``call-id`` before interpreting them.
Once sorted, the above example can be summarized as:
@@ -354,7 +350,7 @@ Once sorted, the above example can be summarized as:
#. A start operation returning 0 (success), with a ``call-id`` of 33
#. A recurring monitor returning 0 (success), with a ``call-id`` of 34
-The cluster processes each job record to build up a picture of the
+The cluster processes each history entry to build up a picture of the
resource's state. After the first and second entries, it is
considered stopped, and after the third it considered active.
diff --git a/doc/sphinx/Pacemaker_Explained/utilization.rst b/doc/sphinx/Pacemaker_Explained/utilization.rst
index 93c67cd..87eef60 100644
--- a/doc/sphinx/Pacemaker_Explained/utilization.rst
+++ b/doc/sphinx/Pacemaker_Explained/utilization.rst
@@ -4,19 +4,19 @@ Utilization and Placement Strategy
----------------------------------
Pacemaker decides where to place a resource according to the resource
-allocation scores on every node. The resource will be allocated to the
+assignment scores on every node. The resource will be assigned to the
node where the resource has the highest score.
-If the resource allocation scores on all the nodes are equal, by the default
+If the resource assignment scores on all the nodes are equal, by the default
placement strategy, Pacemaker will choose a node with the least number of
-allocated resources for balancing the load. If the number of resources on each
+assigned resources for balancing the load. If the number of resources on each
node is equal, the first eligible node listed in the CIB will be chosen to run
the resource.
Often, in real-world situations, different resources use significantly
different proportions of a node's capacities (memory, I/O, etc.).
We cannot balance the load ideally just according to the number of resources
-allocated to a node. Besides, if resources are placed such that their combined
+assigned to a node. Besides, if resources are placed such that their combined
requirements exceed the provided capacity, they may fail to start completely or
run with degraded performance.
@@ -119,7 +119,7 @@ Four values are available for the ``placement-strategy``:
* **default**
Utilization values are not taken into account at all.
- Resources are allocated according to allocation scores. If scores are equal,
+ Resources are assigned according to assignment scores. If scores are equal,
resources are evenly distributed across nodes.
* **utilization**
@@ -127,7 +127,7 @@ Four values are available for the ``placement-strategy``:
Utilization values are taken into account *only* when deciding whether a node
is considered eligible (i.e. whether it has sufficient free capacity to satisfy
the resource's requirements). Load-balancing is still done based on the
- number of resources allocated to a node.
+ number of resources assigned to a node.
* **balanced**
@@ -152,11 +152,11 @@ Now Pacemaker will ensure the load from your resources will be distributed
evenly throughout the cluster, without the need for convoluted sets of
colocation constraints.
-Allocation Details
+Assignment Details
##################
-Which node is preferred to get consumed first when allocating resources?
-________________________________________________________________________
+Which node is preferred to get consumed first when assigning resources?
+_______________________________________________________________________
* The node with the highest node weight gets consumed first. Node weight
is a score maintained by the cluster to represent node health.
@@ -164,18 +164,18 @@ ________________________________________________________________________
* If multiple nodes have the same node weight:
* If ``placement-strategy`` is ``default`` or ``utilization``,
- the node that has the least number of allocated resources gets consumed first.
+ the node that has the least number of assigned resources gets consumed first.
- * If their numbers of allocated resources are equal,
+ * If their numbers of assigned resources are equal,
the first eligible node listed in the CIB gets consumed first.
* If ``placement-strategy`` is ``balanced``,
the node that has the most free capacity gets consumed first.
* If the free capacities of the nodes are equal,
- the node that has the least number of allocated resources gets consumed first.
+ the node that has the least number of assigned resources gets consumed first.
- * If their numbers of allocated resources are equal,
+ * If their numbers of assigned resources are equal,
the first eligible node listed in the CIB gets consumed first.
* If ``placement-strategy`` is ``minimal``,
@@ -201,17 +201,17 @@ Which resource is preferred to be assigned first?
_________________________________________________
* The resource that has the highest ``priority`` (see :ref:`resource_options`) gets
- allocated first.
+ assigned first.
* If their priorities are equal, check whether they are already running. The
- resource that has the highest score on the node where it's running gets allocated
+ resource that has the highest score on the node where it's running gets assigned
first, to prevent resource shuffling.
* If the scores above are equal or the resources are not running, the resource has
- the highest score on the preferred node gets allocated first.
+ the highest score on the preferred node gets assigned first.
* If the scores above are equal, the first runnable resource listed in the CIB
- gets allocated first.
+ gets assigned first.
Limitations and Workarounds
###########################
@@ -233,9 +233,9 @@ services stopped.
In the contrived example at the start of this chapter:
-* ``rsc-small`` would be allocated to ``node1``
+* ``rsc-small`` would be assigned to ``node1``
-* ``rsc-medium`` would be allocated to ``node2``
+* ``rsc-medium`` would be assigned to ``node2``
* ``rsc-large`` would remain inactive
diff --git a/doc/sphinx/Pacemaker_Remote/alternatives.rst b/doc/sphinx/Pacemaker_Remote/alternatives.rst
index 83ed67c..adbdc99 100644
--- a/doc/sphinx/Pacemaker_Remote/alternatives.rst
+++ b/doc/sphinx/Pacemaker_Remote/alternatives.rst
@@ -78,13 +78,8 @@ using virtual machines. Key differences:
technology -- for example, the ``libvirt-daemon-lxc`` package to get the
`libvirt-lxc <http://libvirt.org/drvlxc.html>`_ driver for LXC containers.
-* Libvirt XML definitions must be generated for the containers. The
- ``pacemaker-cts`` package includes a script for this purpose,
- ``/usr/share/pacemaker/tests/cts/lxc_autogen.sh``. Run it with the
- ``--help`` option for details on how to use it. It is intended for testing
- purposes only, and hardcodes various parameters that would need to be set
- appropriately in real usage. Of course, you can create XML definitions
- manually, following the appropriate libvirt driver documentation.
+* Libvirt XML definitions must be generated for the containers. You can create
+ XML definitions manually, following the appropriate libvirt driver documentation.
* To share the authentication key, either share the host's ``/etc/pacemaker``
directory with the container, or copy the key into the container's
diff --git a/doc/sphinx/Pacemaker_Remote/baremetal-tutorial.rst b/doc/sphinx/Pacemaker_Remote/baremetal-tutorial.rst
index a3c0fbe..7c23bd6 100644
--- a/doc/sphinx/Pacemaker_Remote/baremetal-tutorial.rst
+++ b/doc/sphinx/Pacemaker_Remote/baremetal-tutorial.rst
@@ -109,7 +109,7 @@ Start and enable the ``pcsd`` daemon on the remote node.
[root@remote1 ~]# systemctl enable pcsd
Created symlink /etc/systemd/system/multi-user.target.wants/pcsd.service → /usr/lib/systemd/system/pcsd.service.
-Next, set a password for the ``hacluster`` user on the remote node
+Next, set a password for the |CRM_DAEMON_USER| user on the remote node
.. code-block:: none
diff --git a/doc/sphinx/Pacemaker_Remote/kvm-tutorial.rst b/doc/sphinx/Pacemaker_Remote/kvm-tutorial.rst
index 253149e..ef09882 100644
--- a/doc/sphinx/Pacemaker_Remote/kvm-tutorial.rst
+++ b/doc/sphinx/Pacemaker_Remote/kvm-tutorial.rst
@@ -254,7 +254,7 @@ Start and enable the ``pcsd`` daemon on the guest.
[root@guest1 ~]# systemctl enable pcsd
Created symlink /etc/systemd/system/multi-user.target.wants/pcsd.service → /usr/lib/systemd/system/pcsd.service.
-Next, set a password for the ``hacluster`` user on the guest.
+Next, set a password for the |CRM_DAEMON_USER| user on the guest.
.. code-block:: none
diff --git a/doc/sphinx/conf.py.in b/doc/sphinx/conf.py.in
index 7d843d8..556eb72 100644
--- a/doc/sphinx/conf.py.in
+++ b/doc/sphinx/conf.py.in
@@ -30,6 +30,16 @@ doc_license += " version 4.0 or later (CC-BY-SA v4.0+)"
rst_prolog="""
.. |CFS_DISTRO| replace:: AlmaLinux
.. |CFS_DISTRO_VER| replace:: 9
+.. |CRM_BLACKBOX_DIR| replace:: ``%CRM_BLACKBOX_DIR%``
+.. |CRM_DAEMON_GROUP| replace:: ``%CRM_DAEMON_GROUP%``
+.. |CRM_DAEMON_USER| replace:: ``%CRM_DAEMON_USER%``
+.. |CRM_DAEMON_USER_RAW| replace:: %CRM_DAEMON_USER%
+.. |CRM_SCHEMA_DIRECTORY| replace:: %CRM_SCHEMA_DIRECTORY%
+.. |PCMK_AUTHKEY_FILE| replace:: %PACEMAKER_CONFIG_DIR%/authkey
+.. |PCMK_CONFIG_FILE| replace:: ``%CONFIGDIR%/pacemaker``
+.. |PCMK_INIT_ENV_FILE| replace:: ``%PACEMAKER_CONFIG_DIR%/pcmk-init.env``
+.. |PCMK_LOG_FILE| replace:: %CRM_LOG_DIR%/pacemaker.log
+.. |PCMK_GNUTLS_PRIORITIES| replace:: %PCMK_GNUTLS_PRIORITIES%
.. |REMOTE_DISTRO| replace:: AlmaLinux
.. |REMOTE_DISTRO_VER| replace:: 9
"""
diff --git a/etc/Makefile.am b/etc/Makefile.am
index b810f82..b90bb50 100644
--- a/etc/Makefile.am
+++ b/etc/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2021-2022 the Pacemaker project contributors
+# Copyright 2021-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -10,7 +10,8 @@
MAINTAINERCLEANFILES = Makefile.in
configdir = @CONFIGDIR@
-CONFIGS = crm_mon pacemaker
+CONFIGS = crm_mon \
+ pacemaker
if !BUILD_SYSTEMD
initdir = $(INITDIR)
@@ -23,6 +24,7 @@ logrotate_DATA = logrotate.d/pacemaker
EXTRA_DIST = $(foreach f,$(CONFIGS),sysconfig/$(f))
# Don't overwrite user's existing config files
+.PHONY: install-data-local
install-data-local:
$(AM_V_at)$(MKDIR_P) $(DESTDIR)$(configdir)
$(AM_V_at)for f in $(CONFIGS); do \
@@ -31,6 +33,7 @@ install-data-local:
$(INSTALL_DATA) "$(srcdir)/sysconfig/$$f" "$$dest"; \
done
+.PHONY: uninstall-local
uninstall-local:
$(AM_V_at)for f in $(CONFIGS); do \
dest="$(DESTDIR)$(configdir)/$$f"; \
diff --git a/etc/sysconfig/pacemaker.in b/etc/sysconfig/pacemaker.in
index 3b03ad6..0c3609d 100644
--- a/etc/sysconfig/pacemaker.in
+++ b/etc/sysconfig/pacemaker.in
@@ -81,6 +81,17 @@
# Default: PCMK_debug="no"
# Example: PCMK_debug="pacemakerd,pacemaker-execd"
+# PCMK_stderr (Advanced Use Only)
+#
+# Whether to send daemon log messages to stderr. This would be useful only
+# during troubleshooting, when starting Pacemaker manually on the command line.
+#
+# Setting this option in this file is pointless, since this file is not read
+# when starting Pacemaker manually. However, it can be set directly as an
+# environment variable on the command line.
+#
+# Default: PCMK_stderr="no"
+
# PCMK_trace_functions (Advanced Use Only)
#
# Send debug and trace severity messages from these (comma-separated)
@@ -137,18 +148,24 @@
# Example: PCMK_trace_blackbox="remote.c:144,remote.c:149"
-## Node start state
+## Option overrides
# PCMK_node_start_state
#
# By default, the local host will join the cluster in an online or standby
# state when Pacemaker first starts depending on whether it was previously put
# into standby mode. If this variable is set to "standby" or "online", it will
-# force the local host to join in the specified state. This has no effect on
-# Pacemaker Remote nodes.
+# force the local host to join in the specified state.
#
# Default: PCMK_node_start_state="default"
+# PCMK_node_action_limit
+#
+# Specify the maximum number of jobs that can be scheduled on this node. If set,
+# this overrides the node-action-limit cluster property for this node.
+#
+# Default: PCMK_node_action_limit=""
+
## Crash Handling
@@ -179,8 +196,8 @@
#
# Use the contents of this file as the authorization key to use with Pacemaker
# Remote connections. This file must be readable by Pacemaker daemons (that is,
-# it must allow read permissions to either the hacluster user or the haclient
-# group), and its contents must be identical on all nodes.
+# it must allow read permissions to either the @CRM_DAEMON_USER@ user or the
+# @CRM_DAEMON_GROUP@ group), and its contents must be identical on all nodes.
#
# Default: PCMK_authkey_location="@PACEMAKER_CONFIG_DIR@/authkey"
@@ -203,6 +220,30 @@
#
# Default: PCMK_remote_port="3121"
+# PCMK_remote_pid1 (Advanced Use Only)
+#
+# When a bundle resource's "run-command" option is left to default, Pacemaker
+# Remote runs as PID 1 in the bundle's containers. When it does so, it loads
+# environment variables from the container's
+# @PACEMAKER_CONFIG_DIR@/pcmk-init.env and performs the PID 1 responsibility of
+# reaping dead subprocesses.
+#
+# This option controls whether those actions are performed when Pacemaker
+# Remote is not running as PID 1. It is intended primarily for developer testing
+# but can be useful when "run-command" is set to a separate, custom PID 1
+# process that launches Pacemaker Remote.
+#
+# * If set to "full", Pacemaker Remote loads environment variables from
+# @PACEMAKER_CONFIG_DIR@/pcmk-init.env and reaps dead subprocesses.
+# * If set to "vars", Pacemaker Remote loads environment variables from
+# @PACEMAKER_CONFIG_DIR@/pcmk-init.env but does not reap dead subprocesses.
+# * If set to "default", Pacemaker Remote performs neither action.
+#
+# If Pacemaker Remote is running as PID 1, this option is ignored, and the
+# behavior is the same as for "full".
+#
+# Default: PCMK_remote_pid1="default"
+
# PCMK_tls_priorities (Advanced Use Only)
#
# These GnuTLS cipher priorities will be used for TLS connections (whether for
@@ -235,7 +276,7 @@
# the value must be lowered in order for the client's GnuTLS library to accept
# a connection to an older server.
#
-# Default: PCMK_dh_min_bits="1024"
+# Default: PCMK_dh_min_bits="0" (no minimum)
# PCMK_dh_max_bits (Advanced Use Only)
#
@@ -252,7 +293,7 @@
#
# Clients do not use PCMK_dh_max_bits.
#
-# Default: PCMK_dh_max_bits="2048"
+# Default: PCMK_dh_max_bits="0" (no maximum)
## Inter-process Communication
@@ -277,6 +318,19 @@
# Default: PCMK_ipc_buffer="131072"
+## Cluster type
+
+# PCMK_cluster_type (Advanced Use Only)
+#
+# Specify the cluster layer to be used. If unset, Pacemaker will detect and use
+# a supported cluster layer, if available. Currently, "corosync" is the only
+# supported cluster layer. If multiple layers are supported in the future, this
+# will allow overriding Pacemaker's automatic detection to select a specific
+# one.
+#
+# Default: PCMK_cluster_type=""
+
+
## Developer Options
# PCMK_schema_directory (Advanced Use Only)
diff --git a/include/Makefile.am b/include/Makefile.am
index dfd7085..6618c7a 100644
--- a/include/Makefile.am
+++ b/include/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2003-2019 the Pacemaker project contributors
+# Copyright 2003-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -7,14 +7,15 @@
# or later (GPLv2+) WITHOUT ANY WARRANTY.
#
-MAINTAINERCLEANFILES = Makefile.in config.h.in
+MAINTAINERCLEANFILES = Makefile.in \
+ config.h.in
-noinst_HEADERS = config.h \
- crm_internal.h \
- doxygen.h \
- pacemaker.h \
- pacemaker-internal.h \
- portability.h \
+noinst_HEADERS = config.h \
+ crm_internal.h \
+ doxygen.h \
+ pacemaker.h \
+ pacemaker-internal.h \
+ portability.h \
gettext.h
pkginclude_HEADERS = crm_config.h
@@ -24,16 +25,17 @@ SUBDIRS = crm pcmki
GETTEXT_H ?= $(datadir)/gettext/gettext.h
+.PHONY: update-gettext
update-gettext:
@if [ ! -e "$(GETTEXT_H)" ]; then \
echo "$(GETTEXT_H) not found"; \
else \
cp "$(GETTEXT_H)" gettext.h; \
- git diff --quiet gettext.h 2>/dev/null; \
+ "$(GIT)" diff --quiet gettext.h 2>/dev/null; \
if [ $$? -eq 0 ]; then \
echo "No update needed"; \
else \
- git add gettext.h; \
+ "$(GIT)" add gettext.h; \
echo 'Review changes then run:'; \
echo 'git commit -m "Low: NLS: update gettext.h from upstream"'; \
fi \
diff --git a/include/crm/Makefile.am b/include/crm/Makefile.am
index 6dd52fd..95564b8 100644
--- a/include/crm/Makefile.am
+++ b/include/crm/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2004-2021 the Pacemaker project contributors
+# Copyright 2004-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -11,12 +11,23 @@ MAINTAINERCLEANFILES = Makefile.in
headerdir=$(pkgincludedir)/crm
-header_HEADERS = cib.h cluster.h compatibility.h crm.h \
- lrmd.h msg_xml.h services.h stonith-ng.h \
+header_HEADERS = cib.h \
+ cluster.h \
+ compatibility.h \
+ crm.h \
crm_compat.h \
+ lrmd.h \
+ lrmd_events.h \
+ msg_xml.h \
msg_xml_compat.h \
- services_compat.h
+ services.h \
+ services_compat.h \
+ stonith-ng.h
-noinst_HEADERS = lrmd_internal.h services_internal.h
+noinst_HEADERS = $(wildcard *_internal.h)
-SUBDIRS = common pengine cib fencing cluster
+SUBDIRS = common \
+ pengine \
+ cib \
+ fencing \
+ cluster
diff --git a/include/crm/cib/cib_types.h b/include/crm/cib/cib_types.h
index 5bd10e4..a803311 100644
--- a/include/crm/cib/cib_types.h
+++ b/include/crm/cib/cib_types.h
@@ -59,12 +59,54 @@ enum cib_call_options {
cib_discard_reply = (1 << 4),
cib_no_children = (1 << 5),
cib_xpath_address = (1 << 6),
+
+ //! \deprecated This value will be removed in a future release
cib_mixed_update = (1 << 7),
+
+ /* @COMPAT: cib_scope_local is processed only in the legacy function
+ * parse_local_options_v1().
+ *
+ * If (host == NULL):
+ * * In legacy mode, the CIB manager forwards a request to the primary
+ * instance unless cib_scope_local is set or the local node is primary.
+ * * Outside of legacy mode:
+ * * If a request modifies the CIB, the CIB manager forwards it to all
+ * nodes.
+ * * Otherwise, the CIB manager processes the request locally.
+ *
+ * There is no current use case for this implementing this flag in
+ * non-legacy mode.
+ */
+
+ //! \deprecated This value will be removed in a future release
cib_scope_local = (1 << 8),
+
cib_dryrun = (1 << 9),
+
+ /*!
+ * \brief Process request when the client commits the active transaction
+ *
+ * Add the request to the client's active transaction instead of processing
+ * it immediately. If the client has no active transaction, or if the
+ * request is not supported in transactions, the call will fail.
+ *
+ * The request is added to the transaction synchronously, and the return
+ * value indicates whether it was added successfully.
+ *
+ * Refer to \p cib_api_operations_t:init_transaction() and
+ * \p cib_api_operations_t:end_transaction() for more details on CIB
+ * transactions.
+ */
+ cib_transaction = (1 << 10),
+
cib_sync_call = (1 << 12),
cib_no_mtime = (1 << 13),
+
+#if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1)
+ //! \deprecated This value will be removed in a future release
cib_zero_copy = (1 << 14),
+#endif // !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1)
+
cib_inhibit_notify = (1 << 16),
#if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1)
@@ -82,13 +124,19 @@ typedef struct cib_s cib_t;
typedef struct cib_api_operations_s {
int (*signon) (cib_t *cib, const char *name, enum cib_conn_type type);
+
+ //! \deprecated This method will be removed and should not be used
int (*signon_raw) (cib_t *cib, const char *name, enum cib_conn_type type,
int *event_fd);
+
int (*signoff) (cib_t *cib);
int (*free) (cib_t *cib);
+
+ //! \deprecated This method will be removed and should not be used
int (*set_op_callback) (cib_t *cib, void (*callback) (const xmlNode *msg,
int callid, int rc,
xmlNode *output));
+
int (*add_notify_callback) (cib_t *cib, const char *event,
void (*callback) (const char *event,
xmlNode *msg));
@@ -97,8 +145,13 @@ typedef struct cib_api_operations_s {
xmlNode *msg));
int (*set_connection_dnotify) (cib_t *cib,
void (*dnotify) (gpointer user_data));
+
+ //! \deprecated This method will be removed and should not be used
int (*inputfd) (cib_t *cib);
+
+ //! \deprecated This method will be removed and should not be used
int (*noop) (cib_t *cib, int call_options);
+
int (*ping) (cib_t *cib, xmlNode **output_data, int call_options);
int (*query) (cib_t *cib, const char *section, xmlNode **output_data,
int call_options);
@@ -141,7 +194,9 @@ typedef struct cib_api_operations_s {
int (*delete_absolute) (cib_t *cib, const char *section, xmlNode *data,
int call_options);
+ //! \deprecated This method is not implemented and should not be used
int (*quit) (cib_t *cib, int call_options);
+
int (*register_notification) (cib_t *cib, const char *callback,
int enabled);
gboolean (*register_callback) (cib_t *cib, int call_id, int timeout,
@@ -190,14 +245,85 @@ typedef struct cib_api_operations_s {
*
* \return Legacy Pacemaker return code
*
- * \note The client IDs are assigned by \p pacemaker-based when the client
- * connects. \p cib_t variants that don't connect to
- * \p pacemaker-based may never be assigned a client ID.
* \note Some variants may have only one client for both asynchronous and
* synchronous requests.
*/
int (*client_id)(const cib_t *cib, const char **async_id,
const char **sync_id);
+
+ /*!
+ * \brief Initiate an atomic CIB transaction for this client
+ *
+ * If the client has initiated a transaction and a new request's call
+ * options contain \p cib_transaction, the new request is appended to the
+ * transaction for later processing.
+ *
+ * Supported requests are those that meet the following conditions:
+ * * can be processed synchronously (with any changes applied to a working
+ * CIB copy)
+ * * are not queries
+ * * do not involve other nodes
+ * * do not affect the state of pacemaker-based itself
+ *
+ * Currently supported CIB API functions include:
+ * * \p bump_epoch()
+ * * \p create()
+ * * \p erase()
+ * * \p modify()
+ * * \p remove()
+ * * \p replace()
+ * * \p upgrade()
+ *
+ * Because the transaction is atomic, individual requests do not trigger
+ * callbacks or notifications when they are processed, and they do not
+ * receive output XML. The commit request itself can trigger callbacks and
+ * notifications if any are registered.
+ *
+ * An \c init_transaction() call is always synchronous.
+ *
+ * \param[in,out] cib CIB connection
+ *
+ * \return Legacy Pacemaker return code
+ */
+ int (*init_transaction)(cib_t *cib);
+
+ /*!
+ * \brief End and optionally commit this client's CIB transaction
+ *
+ * When a client commits a transaction, all requests in the transaction are
+ * processed in a FIFO manner until either a request fails or all requests
+ * have been processed. Changes are applied to a working copy of the CIB.
+ * If a request fails, the transaction and working CIB copy are discarded,
+ * and an error is returned. If all requests succeed, the working CIB copy
+ * replaces the initial CIB copy.
+ *
+ * Callbacks and notifications can be triggered by the commit request itself
+ * but not by the individual requests in a transaction.
+ *
+ * An \c end_transaction() call with \p commit set to \c false is always
+ * synchronous.
+ *
+ * \param[in,out] cib CIB connection
+ * \param[in] commit If \p true, commit transaction; otherwise,
+ * discard it
+ * \param[in] call_options Group of <tt>enum cib_call_options</tt>
+ * flags
+ *
+ * \return Legacy Pacemaker return code
+ */
+ int (*end_transaction)(cib_t *cib, bool commit, int call_options);
+
+ /*!
+ * \brief Set the user as whom all CIB requests via methods will be executed
+ *
+ * By default, the value of the \c CIB_user environment variable is used if
+ * set. Otherwise, \c root is used.
+ *
+ * \param[in,out] cib CIB connection
+ * \param[in] user Name of user whose permissions to use when
+ * processing requests
+ */
+ void (*set_user)(cib_t *cib, const char *user);
} cib_api_operations_t;
struct cib_s {
@@ -211,9 +337,16 @@ struct cib_s {
void *delegate_fn;
GList *notify_list;
+
+ //! \deprecated This method will be removed in a future release
void (*op_callback) (const xmlNode *msg, int call_id, int rc,
xmlNode *output);
+
cib_api_operations_t *cmds;
+
+ xmlNode *transaction;
+
+ char *user;
};
#ifdef __cplusplus
diff --git a/include/crm/cib/internal.h b/include/crm/cib/internal.h
index 374902b..20059ec 100644
--- a/include/crm/cib/internal.h
+++ b/include/crm/cib/internal.h
@@ -15,7 +15,6 @@
// Request types for CIB manager IPC/CPG
#define PCMK__CIB_REQUEST_SECONDARY "cib_slave"
-#define PCMK__CIB_REQUEST_ALL_SECONDARY "cib_slave_all"
#define PCMK__CIB_REQUEST_PRIMARY "cib_master"
#define PCMK__CIB_REQUEST_SYNC_TO_ALL "cib_sync"
#define PCMK__CIB_REQUEST_SYNC_TO_ONE "cib_sync_one"
@@ -32,6 +31,7 @@
#define PCMK__CIB_REQUEST_ABS_DELETE "cib_delete_alt"
#define PCMK__CIB_REQUEST_NOOP "noop"
#define PCMK__CIB_REQUEST_SHUTDOWN "cib_shutdown_req"
+#define PCMK__CIB_REQUEST_COMMIT_TRANSACT "cib_commit_transact"
# define F_CIB_CLIENTID "cib_clientid"
# define F_CIB_CALLOPTS "cib_callopt"
@@ -60,34 +60,72 @@
# define F_CIB_LOCAL_NOTIFY_ID "cib_local_notify_id"
# define F_CIB_PING_ID "cib_ping_id"
# define F_CIB_SCHEMA_MAX "cib_schema_max"
-# define F_CIB_CHANGE_SECTION "cib_change_section"
# define T_CIB "cib"
+# define T_CIB_COMMAND "cib_command"
# define T_CIB_NOTIFY "cib_notify"
/* notify sub-types */
# define T_CIB_PRE_NOTIFY "cib_pre_notify"
# define T_CIB_POST_NOTIFY "cib_post_notify"
+# define T_CIB_TRANSACTION "cib_transaction"
# define T_CIB_UPDATE_CONFIRM "cib_update_confirmation"
-# define T_CIB_REPLACE_NOTIFY "cib_refresh_notify"
/*!
* \internal
- * \enum cib_change_section_info
- * \brief Flags to indicate which sections of the CIB have changed
+ * \enum cib__op_attr
+ * \brief Flags for CIB operation attributes
*/
-enum cib_change_section_info {
- cib_change_section_none = 0, //!< No sections have changed
- cib_change_section_nodes = (1 << 0), //!< The nodes section has changed
- cib_change_section_alerts = (1 << 1), //!< The alerts section has changed
- cib_change_section_status = (1 << 2), //!< The status section has changed
+enum cib__op_attr {
+ cib__op_attr_none = 0, //!< No special attributes
+ cib__op_attr_modifies = (1 << 1), //!< Modifies CIB
+ cib__op_attr_privileged = (1 << 2), //!< Requires privileges
+ cib__op_attr_local = (1 << 3), //!< Must only be processed locally
+ cib__op_attr_replaces = (1 << 4), //!< Replaces CIB
+ cib__op_attr_writes_through = (1 << 5), //!< Writes to disk on success
+ cib__op_attr_transaction = (1 << 6), //!< Supported in a transaction
};
+/*!
+ * \internal
+ * \enum cib__op_type
+ * \brief Types of CIB operations
+ */
+enum cib__op_type {
+ cib__op_abs_delete,
+ cib__op_apply_patch,
+ cib__op_bump,
+ cib__op_commit_transact,
+ cib__op_create,
+ cib__op_delete,
+ cib__op_erase,
+ cib__op_is_primary,
+ cib__op_modify,
+ cib__op_noop,
+ cib__op_ping,
+ cib__op_primary,
+ cib__op_query,
+ cib__op_replace,
+ cib__op_secondary,
+ cib__op_shutdown,
+ cib__op_sync_all,
+ cib__op_sync_one,
+ cib__op_upgrade,
+};
gboolean cib_diff_version_details(xmlNode * diff, int *admin_epoch, int *epoch, int *updates,
int *_admin_epoch, int *_epoch, int *_updates);
gboolean cib_read_config(GHashTable * options, xmlNode * current_cib);
+typedef int (*cib__op_fn_t)(const char *, int, const char *, xmlNode *,
+ xmlNode *, xmlNode *, xmlNode **, xmlNode **);
+
+typedef struct cib__operation_s {
+ const char *name;
+ enum cib__op_type type;
+ uint32_t flags; //!< Group of <tt>enum cib__op_attr</tt> flags
+} cib__operation_t;
+
typedef struct cib_notify_client_s {
const char *event;
const char *obj_id; /* implement one day */
@@ -124,24 +162,66 @@ struct timer_rec_s {
(flags_to_clear), #flags_to_clear); \
} while (0)
-typedef int (*cib_op_t) (const char *, int, const char *, xmlNode *,
- xmlNode *, xmlNode *, xmlNode **, xmlNode **);
-
cib_t *cib_new_variant(void);
-int cib_perform_op(const char *op, int call_options, cib_op_t * fn, gboolean is_query,
- const char *section, xmlNode * req, xmlNode * input,
- gboolean manage_counters, gboolean * config_changed,
- xmlNode * current_cib, xmlNode ** result_cib, xmlNode ** diff,
- xmlNode ** output);
-
-xmlNode *cib_create_op(int call_id, const char *op, const char *host,
- const char *section, xmlNode * data, int call_options,
- const char *user_name);
+/*!
+ * \internal
+ * \brief Check whether a given CIB client's update should trigger a refresh
+ *
+ * Here, "refresh" means that Pacemaker daemons write out their current state.
+ *
+ * If a Pacemaker daemon or one of certain Pacemaker CLI tools modifies the CIB,
+ * we can assume that the CIB hasn't diverged from the true cluster state. A
+ * "safe" CLI tool requests that all relevant daemons update their state before
+ * the tool requests any CIB modifications directly.
+ *
+ * In contrast, other "unsafe" tools (for example, \c cibadmin and external
+ * tools) may request arbitrary CIB changes.
+ *
+ * A Pacemaker daemon can write out its current state to the CIB when it's
+ * notified of an update from an unsafe client, to ensure the CIB still contains
+ * the daemon's correct state.
+ *
+ * \param[in] name CIB client name
+ *
+ * \return \c true if the CIB client should trigger a refresh, or \c false
+ * otherwise
+ */
+static inline bool
+cib__client_triggers_refresh(const char *name)
+{
+ return !crm_is_daemon_name(name)
+ && !pcmk__str_any_of(name,
+ "attrd_updater",
+ "crm_attribute",
+ "crm_node",
+ "crm_resource",
+ "crm_ticket",
+ NULL);
+}
+
+int cib__get_notify_patchset(const xmlNode *msg, const xmlNode **patchset);
+
+bool cib__element_in_patchset(const xmlNode *patchset, const char *element);
+
+int cib_perform_op(const char *op, int call_options, cib__op_fn_t fn,
+ bool is_query, const char *section, xmlNode *req,
+ xmlNode *input, bool manage_counters, bool *config_changed,
+ xmlNode **current_cib, xmlNode **result_cib, xmlNode **diff,
+ xmlNode **output);
+
+int cib__create_op(cib_t *cib, const char *op, const char *host,
+ const char *section, xmlNode *data, int call_options,
+ const char *user_name, const char *client_name,
+ xmlNode **op_msg);
+
+int cib__extend_transaction(cib_t *cib, xmlNode *request);
void cib_native_callback(cib_t * cib, xmlNode * msg, int call_id, int rc);
void cib_native_notify(gpointer data, gpointer user_data);
+int cib__get_operation(const char *op, const cib__operation_t **operation);
+
int cib_process_query(const char *op, int options, const char *section, xmlNode * req,
xmlNode * input, xmlNode * existing_cib, xmlNode ** result_cib,
xmlNode ** answer);
diff --git a/include/crm/cluster.h b/include/crm/cluster.h
index bceb9c2..b61fd70 100644
--- a/include/crm/cluster.h
+++ b/include/crm/cluster.h
@@ -78,6 +78,9 @@ typedef struct crm_peer_node_s {
time_t peer_lost;
char *conn_host;
+
+ time_t when_member; // Since when node has been a cluster member
+ time_t when_online; // Since when peer has been online in CPG
} crm_node_t;
void crm_peer_init(void);
@@ -133,8 +136,8 @@ enum crm_get_peer_flags {
};
gboolean send_cluster_message(const crm_node_t *node,
- enum crm_ais_msg_types service, xmlNode *data,
- gboolean ordered);
+ enum crm_ais_msg_types service,
+ const xmlNode *data, gboolean ordered);
int crm_remote_peer_cache_size(void);
@@ -174,7 +177,6 @@ char *pcmk_message_common_cs(cpg_handle_t handle, uint32_t nodeid, uint32_t pid,
const char *crm_peer_uuid(crm_node_t *node);
const char *crm_peer_uname(const char *uuid);
-void set_uuid(xmlNode *xml, const char *attr, crm_node_t *node);
enum crm_status_type {
crm_status_uname,
diff --git a/include/crm/cluster/Makefile.am b/include/crm/cluster/Makefile.am
index 96f2bd0..2500a87 100644
--- a/include/crm/cluster/Makefile.am
+++ b/include/crm/cluster/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2012-2021 the Pacemaker project contributors
+# Copyright 2012-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -10,5 +10,6 @@ MAINTAINERCLEANFILES = Makefile.in
headerdir=$(pkgincludedir)/crm/cluster
-noinst_HEADERS = internal.h election_internal.h
+noinst_HEADERS = internal.h \
+ $(wildcard *_internal.h)
header_HEADERS = compat.h
diff --git a/include/crm/cluster/compat.h b/include/crm/cluster/compat.h
index 9bf14ee..89a03fd 100644
--- a/include/crm/cluster/compat.h
+++ b/include/crm/cluster/compat.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2004-2021 the Pacemaker project contributors
+ * Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -10,6 +10,9 @@
#ifndef PCMK__CRM_CLUSTER_COMPAT__H
# define PCMK__CRM_CLUSTER_COMPAT__H
+#include <libxml/tree.h> // xmlNode
+#include <crm/cluster.h> // crm_node_t
+
#ifdef __cplusplus
extern "C" {
#endif
@@ -30,6 +33,9 @@ int crm_terminate_member(int nodeid, const char *uname, void *unused);
int crm_terminate_member_no_mainloop(int nodeid, const char *uname,
int *connection);
+// \deprecated Use crm_xml_add(xml, attr, crm_peer_uuid(node)) instead
+void set_uuid(xmlNode *xml, const char *attr, crm_node_t *node);
+
#ifdef __cplusplus
}
#endif
diff --git a/include/crm/cluster/internal.h b/include/crm/cluster/internal.h
index 9bc57c6..e20ee4c 100644
--- a/include/crm/cluster/internal.h
+++ b/include/crm/cluster/internal.h
@@ -124,10 +124,16 @@ void pcmk__corosync_quorum_connect(gboolean (*dispatch)(unsigned long long,
void (*destroy) (gpointer));
crm_node_t *pcmk__search_node_caches(unsigned int id, const char *uname,
uint32_t flags);
-crm_node_t *pcmk__search_cluster_node_cache(unsigned int id, const char *uname);
+crm_node_t *pcmk__search_cluster_node_cache(unsigned int id, const char *uname,
+ const char *uuid);
void pcmk__refresh_node_caches_from_cib(xmlNode *cib);
crm_node_t *pcmk__search_known_node_cache(unsigned int id, const char *uname,
uint32_t flags);
+crm_node_t *pcmk__get_peer(unsigned int id, const char *uname,
+ const char *uuid);
+crm_node_t *pcmk__get_peer_full(unsigned int id, const char *uname,
+ const char *uuid, int flags);
+
#endif
diff --git a/include/crm/common/Makefile.am b/include/crm/common/Makefile.am
index 7d417e4..83a4197 100644
--- a/include/crm/common/Makefile.am
+++ b/include/crm/common/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2004-2022 the Pacemaker project contributors
+# Copyright 2004-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -11,45 +11,34 @@ MAINTAINERCLEANFILES = Makefile.in
headerdir=$(pkgincludedir)/crm/common
-header_HEADERS = acl.h \
- agents.h \
- agents_compat.h \
- cib.h \
- ipc.h \
- ipc_attrd_internal.h \
- ipc_controld.h \
- ipc_pacemakerd.h \
- ipc_schedulerd.h \
- iso8601.h \
- logging.h \
- logging_compat.h \
- mainloop.h \
- mainloop_compat.h \
- nvpair.h \
- output.h \
- results.h \
- results_compat.h \
- util.h \
- util_compat.h \
- xml.h \
- xml_compat.h
+header_HEADERS = acl.h \
+ actions.h \
+ agents.h \
+ agents_compat.h \
+ cib.h \
+ ipc.h \
+ ipc_controld.h \
+ ipc_pacemakerd.h \
+ ipc_schedulerd.h \
+ iso8601.h \
+ logging.h \
+ logging_compat.h \
+ mainloop.h \
+ mainloop_compat.h \
+ nodes.h \
+ nvpair.h \
+ output.h \
+ resources.h \
+ results.h \
+ results_compat.h \
+ roles.h \
+ scheduler.h \
+ scheduler_types.h \
+ tags.h \
+ tickets.h \
+ util.h \
+ util_compat.h \
+ xml.h \
+ xml_compat.h
-noinst_HEADERS = acl_internal.h \
- alerts_internal.h \
- attrd_internal.h \
- cmdline_internal.h \
- health_internal.h \
- internal.h \
- io_internal.h \
- ipc_internal.h \
- iso8601_internal.h \
- lists_internal.h \
- logging_internal.h \
- messages_internal.h \
- options_internal.h \
- output_internal.h \
- remote_internal.h \
- results_internal.h \
- strings_internal.h \
- unittest_internal.h \
- xml_internal.h
+noinst_HEADERS = $(wildcard *internal.h)
diff --git a/include/crm/common/action_relation_internal.h b/include/crm/common/action_relation_internal.h
new file mode 100644
index 0000000..e789131
--- /dev/null
+++ b/include/crm/common/action_relation_internal.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright 2023 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU Lesser General Public License
+ * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
+ */
+
+#ifndef PCMK__CRM_COMMON_ACTION_RELATION_INTERNAL__H
+# define PCMK__CRM_COMMON_ACTION_RELATION_INTERNAL__H
+
+/*!
+ * Flags to indicate the relationship between two actions
+ *
+ * @COMPAT The values and semantics of these flags should not be changed until
+ * the deprecated enum pe_ordering is dropped from the public API.
+ */
+enum pcmk__action_relation_flags {
+ //! No relation (compare with equality rather than bit set)
+ pcmk__ar_none = 0U,
+
+ //! Actions are ordered (optionally, if no other flags are set)
+ pcmk__ar_ordered = (1U << 0),
+
+ //! Relation applies only if 'first' cannot be part of a live migration
+ pcmk__ar_if_first_unmigratable = (1U << 1),
+
+ /*!
+ * If 'then' is required, 'first' becomes required (and becomes unmigratable
+ * if 'then' is); also, if 'first' is a stop of a blocked resource, 'then'
+ * becomes unrunnable
+ */
+ pcmk__ar_then_implies_first = (1U << 4),
+
+ /*!
+ * If 'first' is required, 'then' becomes required; if 'first' is a stop of
+ * a blocked resource, 'then' becomes unrunnable
+ */
+ pcmk__ar_first_implies_then = (1U << 5),
+
+ /*!
+ * If 'then' is required and for a promoted instance, 'first' becomes
+ * required (and becomes unmigratable if 'then' is)
+ */
+ pcmk__ar_promoted_then_implies_first = (1U << 6),
+
+ /*!
+ * 'first' is runnable only if 'then' is both runnable and migratable,
+ * and 'first' becomes required if 'then' is
+ */
+ pcmk__ar_unmigratable_then_blocks = (1U << 7),
+
+ //! 'then' is runnable (and migratable) only if 'first' is runnable
+ pcmk__ar_unrunnable_first_blocks = (1U << 8),
+
+ //! If 'first' is unrunnable, 'then' becomes a real, unmigratable action
+ pcmk__ar_first_else_then = (1U << 9),
+
+ //! If 'first' is required, 'then' action for instance on same node is
+ pcmk__ar_first_implies_same_node_then = (1U << 10),
+
+ /*!
+ * Disable relation if 'first' is unrunnable and for an active resource,
+ * otherwise order actions and make 'then' unrunnable if 'first' is.
+ *
+ * This is used to order a bundle replica's start of its container before a
+ * probe of its remote connection resource, in case the connection uses the
+ * REMOTE_CONTAINER_HACK to replace the connection address with where the
+ * container is running.
+ */
+ pcmk__ar_nested_remote_probe = (1U << 11),
+
+ /*!
+ * If 'first' is for a blocked resource, make 'then' unrunnable.
+ *
+ * If 'then' is required, make 'first' required, make 'first' unmigratable
+ * if 'then' is unmigratable, and make 'then' unrunnable if 'first' is
+ * unrunnable.
+ *
+ * If 'then' is unrunnable and for the same resource as 'first', make
+ * 'first' required if it is runnable, and make 'first' unmigratable if
+ * 'then' is unmigratable.
+ *
+ * This is used for "stop then start primitive" (restarts) and
+ * "stop group member then stop previous member".
+ */
+ pcmk__ar_intermediate_stop = (1U << 12),
+
+ /*!
+ * The actions must be serialized if in the same transition but can be in
+ * either order. (In practice, we always arrange them as 'first' then
+ * 'then', so they end up being essentially the same as optional orderings.)
+ *
+ * @TODO Handle more intelligently -- for example, we could schedule the
+ * action with the fewest inputs first, so we're more likely to execute at
+ * least one if there is a failure during the transition. Or, we could
+ * prefer certain action types over others, or base it on resource priority.
+ */
+ pcmk__ar_serialize = (1U << 14),
+
+ //! Relation applies only if actions are on same node
+ pcmk__ar_if_on_same_node = (1U << 15),
+
+ //! If 'then' is required, 'first' must be added to the transition graph
+ pcmk__ar_then_implies_first_graphed = (1U << 16),
+
+ //! If 'first' is required and runnable, 'then' must be in graph
+ pcmk__ar_first_implies_then_graphed = (1U << 17),
+
+ //! User-configured asymmetric ordering
+ pcmk__ar_asymmetric = (1U << 20),
+
+ //! Actions are ordered if on same node (or migration target for migrate_to)
+ pcmk__ar_if_on_same_node_or_target = (1U << 21),
+
+ //! 'then' action is runnable if certain number of 'first' instances are
+ pcmk__ar_min_runnable = (1U << 22),
+
+ //! Ordering applies only if 'first' is required and on same node as 'then'
+ pcmk__ar_if_required_on_same_node = (1U << 23),
+
+ //! Ordering applies even if 'first' runs on guest node created by 'then'
+ pcmk__ar_guest_allowed = (1U << 24),
+
+ //! If 'then' action becomes required, 'first' becomes optional
+ pcmk__ar_then_cancels_first = (1U << 25),
+};
+
+typedef struct pe_action_wrapper_s pcmk__related_action_t;
+
+#endif // PCMK__CRM_COMMON_ACTION_RELATION_INTERNAL__H
diff --git a/include/crm/common/actions.h b/include/crm/common/actions.h
new file mode 100644
index 0000000..5d2784d
--- /dev/null
+++ b/include/crm/common/actions.h
@@ -0,0 +1,467 @@
+/*
+ * Copyright 2004-2023 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU Lesser General Public License
+ * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
+ */
+
+#ifndef PCMK__CRM_COMMON_ACTIONS__H
+#define PCMK__CRM_COMMON_ACTIONS__H
+
+#include <stdbool.h> // bool
+#include <strings.h> // strcasecmp()
+#include <glib.h> // gboolean, guint
+#include <libxml/tree.h> // xmlNode
+
+#include <crm/lrmd_events.h> // lrmd_event_data_t
+
+#include <glib.h> // GList, GHashTable
+#include <libxml/tree.h> // xmlNode
+
+#include <crm/common/nodes.h>
+#include <crm/common/resources.h> // enum rsc_start_requirement, etc.
+#include <crm/common/scheduler_types.h> // pcmk_resource_t, pcmk_node_t
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*!
+ * \file
+ * \brief APIs related to actions
+ * \ingroup core
+ */
+
+//! Default timeout (in milliseconds) for non-metadata actions
+#define PCMK_DEFAULT_ACTION_TIMEOUT_MS 20000
+
+// @COMPAT We don't need a separate timeout for metadata, much less a longer one
+//! \deprecated Default timeout (in milliseconds) for metadata actions
+#define PCMK_DEFAULT_METADATA_TIMEOUT_MS 30000
+
+// Action names as strings
+#define PCMK_ACTION_CANCEL "cancel"
+#define PCMK_ACTION_CLEAR_FAILCOUNT "clear_failcount"
+#define PCMK_ACTION_CLONE_ONE_OR_MORE "clone-one-or-more"
+#define PCMK_ACTION_DELETE "delete"
+#define PCMK_ACTION_DEMOTE "demote"
+#define PCMK_ACTION_DEMOTED "demoted"
+#define PCMK_ACTION_DO_SHUTDOWN "do_shutdown"
+#define PCMK_ACTION_LIST "list"
+#define PCMK_ACTION_LRM_DELETE "lrm_delete"
+#define PCMK_ACTION_LOAD_STOPPED "load_stopped"
+#define PCMK_ACTION_MAINTENANCE_NODES "maintenance_nodes"
+#define PCMK_ACTION_META_DATA "meta-data"
+#define PCMK_ACTION_MIGRATE_FROM "migrate_from"
+#define PCMK_ACTION_MIGRATE_TO "migrate_to"
+#define PCMK_ACTION_MONITOR "monitor"
+#define PCMK_ACTION_NOTIFIED "notified"
+#define PCMK_ACTION_NOTIFY "notify"
+#define PCMK_ACTION_OFF "off"
+#define PCMK_ACTION_ON "on"
+#define PCMK_ACTION_ONE_OR_MORE "one-or-more"
+#define PCMK_ACTION_PROMOTE "promote"
+#define PCMK_ACTION_PROMOTED "promoted"
+#define PCMK_ACTION_REBOOT "reboot"
+#define PCMK_ACTION_RELOAD "reload"
+#define PCMK_ACTION_RELOAD_AGENT "reload-agent"
+#define PCMK_ACTION_RUNNING "running"
+#define PCMK_ACTION_START "start"
+#define PCMK_ACTION_STATUS "status"
+#define PCMK_ACTION_STONITH "stonith"
+#define PCMK_ACTION_STOP "stop"
+#define PCMK_ACTION_STOPPED "stopped"
+#define PCMK_ACTION_VALIDATE_ALL "validate-all"
+
+//! Possible actions (including some pseudo-actions)
+enum action_tasks {
+ pcmk_action_unspecified = 0, //!< Unspecified or unknown action
+ pcmk_action_monitor, //!< Monitor
+
+ // Each "completed" action must be the regular action plus 1
+
+ pcmk_action_stop, //!< Stop
+ pcmk_action_stopped, //!< Stop completed
+
+ pcmk_action_start, //!< Start
+ pcmk_action_started, //!< Start completed
+
+ pcmk_action_notify, //!< Notify
+ pcmk_action_notified, //!< Notify completed
+
+ pcmk_action_promote, //!< Promote
+ pcmk_action_promoted, //!< Promoted
+
+ pcmk_action_demote, //!< Demote
+ pcmk_action_demoted, //!< Demoted
+
+ pcmk_action_shutdown, //!< Shut down node
+ pcmk_action_fence, //!< Fence node
+
+#if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1)
+ //! \deprecated Use pcmk_action_unspecified instead
+ no_action = pcmk_action_unspecified,
+
+ //! \deprecated Use pcmk_action_monitor instead
+ monitor_rsc = pcmk_action_monitor,
+
+ //! \deprecated Use pcmk_action_stop instead
+ stop_rsc = pcmk_action_stop,
+
+ //! \deprecated Use pcmk_action_stopped instead
+ stopped_rsc = pcmk_action_stopped,
+
+ //! \deprecated Use pcmk_action_start instead
+ start_rsc = pcmk_action_start,
+
+ //! \deprecated Use pcmk_action_started instead
+ started_rsc = pcmk_action_started,
+
+ //! \deprecated Use pcmk_action_notify instead
+ action_notify = pcmk_action_notify,
+
+ //! \deprecated Use pcmk_action_notified instead
+ action_notified = pcmk_action_notified,
+
+ //! \deprecated Use pcmk_action_promote instead
+ action_promote = pcmk_action_promote,
+
+ //! \deprecated Use pcmk_action_promoted instead
+ action_promoted = pcmk_action_promoted,
+
+ //! \deprecated Use pcmk_action_demote instead
+ action_demote = pcmk_action_demote,
+
+ //! \deprecated Use pcmk_action_demoted instead
+ action_demoted = pcmk_action_demoted,
+
+ //! \deprecated Use pcmk_action_shutdown instead
+ shutdown_crm = pcmk_action_shutdown,
+
+ //! \deprecated Use pcmk_action_fence instead
+ stonith_node = pcmk_action_fence,
+#endif
+};
+
+//! Possible responses to a resource action failure
+enum action_fail_response {
+ /* The order is (partially) significant here; the values from
+ * pcmk_on_fail_ignore through pcmk_on_fail_fence_node are in order of
+ * increasing severity.
+ *
+ * @COMPAT The values should be ordered and numbered per the "TODO" comments
+ * below, so all values are in order of severity and there is room for
+ * future additions, but that would break API compatibility.
+ * @TODO For now, we just use a function to compare the values specially, but
+ * at the next compatibility break, we should arrange things
+ * properly so we can compare with less than and greater than.
+ */
+
+ // @TODO Define as 10
+ pcmk_on_fail_ignore = 0, //!< Act as if failure didn't happen
+
+ // @TODO Define as 30
+ pcmk_on_fail_restart = 1, //!< Restart resource
+
+ // @TODO Define as 60
+ pcmk_on_fail_ban = 2, //!< Ban resource from current node
+
+ // @TODO Define as 70
+ pcmk_on_fail_block = 3, //!< Treat resource as unmanaged
+
+ // @TODO Define as 80
+ pcmk_on_fail_stop = 4, //!< Stop resource and leave stopped
+
+ // @TODO Define as 90
+ pcmk_on_fail_standby_node = 5, //!< Put resource's node in standby
+
+ // @TODO Define as 100
+ pcmk_on_fail_fence_node = 6, //!< Fence resource's node
+
+ // @COMPAT Values below here are out of desired order for API compatibility
+
+ // @TODO Define as 50
+ pcmk_on_fail_restart_container = 7, //!< Restart resource's container
+
+ // @TODO Define as 40
+ /*!
+ * Fence the remote node created by the resource if fencing is enabled,
+ * otherwise attempt to restart the resource (used internally for some
+ * remote connection failures).
+ */
+ pcmk_on_fail_reset_remote = 8,
+
+ // @TODO Define as 20
+ pcmk_on_fail_demote = 9, //!< Demote if promotable, else stop
+
+#if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1)
+ //! \deprecated Use pcmk_on_fail_ignore instead
+ action_fail_ignore = pcmk_on_fail_ignore,
+
+ //! \deprecated Use pcmk_on_fail_restart instead
+ action_fail_recover = pcmk_on_fail_restart,
+
+ //! \deprecated Use pcmk_on_fail_ban instead
+ action_fail_migrate = pcmk_on_fail_ban,
+
+ //! \deprecated Use pcmk_on_fail_block instead
+ action_fail_block = pcmk_on_fail_block,
+
+ //! \deprecated Use pcmk_on_fail_stop instead
+ action_fail_stop = pcmk_on_fail_stop,
+
+ //! \deprecated Use pcmk_on_fail_standby_node instead
+ action_fail_standby = pcmk_on_fail_standby_node,
+
+ //! \deprecated Use pcmk_on_fail_fence_node instead
+ action_fail_fence = pcmk_on_fail_fence_node,
+
+ //! \deprecated Use pcmk_on_fail_restart_container instead
+ action_fail_restart_container = pcmk_on_fail_restart_container,
+
+ //! \deprecated Use pcmk_on_fail_reset_remote instead
+ action_fail_reset_remote = pcmk_on_fail_reset_remote,
+
+ //! \deprecated Use pcmk_on_fail_demote instead
+ action_fail_demote = pcmk_on_fail_demote,
+#endif
+};
+
+//! Action scheduling flags
+enum pe_action_flags {
+ //! No action flags set (compare with equality rather than bit set)
+ pcmk_no_action_flags = 0,
+
+ //! Whether action does not require invoking an agent
+ pcmk_action_pseudo = (1 << 0),
+
+ //! Whether action is runnable
+ pcmk_action_runnable = (1 << 1),
+
+ //! Whether action should not be executed
+ pcmk_action_optional = (1 << 2),
+
+ //! Whether action should be added to transition graph even if optional
+ pcmk_action_always_in_graph = (1 << 3),
+
+ //! Whether operation-specific instance attributes have been unpacked yet
+ pcmk_action_attrs_evaluated = (1 << 4),
+
+ //! Whether action is allowed to be part of a live migration
+ pcmk_action_migratable = (1 << 7),
+
+ //! Whether action has been added to transition graph
+ pcmk_action_added_to_graph = (1 << 8),
+
+ //! Whether action is a stop to abort a dangling migration
+ pcmk_action_migration_abort = (1 << 11),
+
+ /*!
+ * Whether action is an ordering point for minimum required instances
+ * (used to implement ordering after clones with clone-min configured,
+ * and ordered sets with require-all=false)
+ */
+ pcmk_action_min_runnable = (1 << 12),
+
+ //! Whether action is recurring monitor that must be rescheduled if active
+ pcmk_action_reschedule = (1 << 13),
+
+ //! Whether action has already been processed by a recursive procedure
+ pcmk_action_detect_loop = (1 << 14),
+
+ //! Whether action's inputs have been de-duplicated yet
+ pcmk_action_inputs_deduplicated = (1 << 15),
+
+ //! Whether action can be executed on DC rather than own node
+ pcmk_action_on_dc = (1 << 16),
+
+#if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1)
+ //! \deprecated Use pcmk_action_pseudo instead
+ pe_action_pseudo = pcmk_action_pseudo,
+
+ //! \deprecated Use pcmk_action_runnable instead
+ pe_action_runnable = pcmk_action_runnable,
+
+ //! \deprecated Use pcmk_action_optional instead
+ pe_action_optional = pcmk_action_optional,
+
+ //! \deprecated Use pcmk_action_always_in_graph instead
+ pe_action_print_always = pcmk_action_always_in_graph,
+
+ //! \deprecated Use pcmk_action_attrs_evaluated instead
+ pe_action_have_node_attrs = pcmk_action_attrs_evaluated,
+
+ //! \deprecated Do not use
+ pe_action_implied_by_stonith = (1 << 6),
+
+ //! \deprecated Use pcmk_action_migratable instead
+ pe_action_migrate_runnable = pcmk_action_migratable,
+
+ //! \deprecated Use pcmk_action_added_to_graph instead
+ pe_action_dumped = pcmk_action_added_to_graph,
+
+ //! \deprecated Do not use
+ pe_action_processed = (1 << 9),
+
+ //! \deprecated Do not use
+ pe_action_clear = (1 << 10),
+
+ //! \deprecated Use pcmk_action_migration_abort instead
+ pe_action_dangle = pcmk_action_migration_abort,
+
+ //! \deprecated Use pcmk_action_min_runnable instead
+ pe_action_requires_any = pcmk_action_min_runnable,
+
+ //! \deprecated Use pcmk_action_reschedule instead
+ pe_action_reschedule = pcmk_action_reschedule,
+
+ //! \deprecated Use pcmk_action_detect_loop instead
+ pe_action_tracking = pcmk_action_detect_loop,
+
+ //! \deprecated Use pcmk_action_inputs_deduplicated instead
+ pe_action_dedup = pcmk_action_inputs_deduplicated,
+
+ //! \deprecated Use pcmk_action_on_dc instead
+ pe_action_dc = pcmk_action_on_dc,
+#endif
+};
+
+/* @COMPAT enum pe_link_state and enum pe_ordering are currently needed for
+ * struct pe_action_wrapper_s (which is public) but should be removed at an
+ * API compatibility break when that can be refactored and made internal
+ */
+
+//!@{
+//! \deprecated Do not use
+enum pe_link_state {
+ pe_link_not_dumped = 0,
+ pe_link_dumped = 1,
+#if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1)
+ pe_link_dup = 2,
+#endif
+};
+
+enum pe_ordering {
+ pe_order_none = 0x0,
+#if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1)
+ pe_order_optional = 0x1,
+ pe_order_apply_first_non_migratable = 0x2,
+ pe_order_implies_first = 0x10,
+ pe_order_implies_then = 0x20,
+ pe_order_promoted_implies_first = 0x40,
+ pe_order_implies_first_migratable = 0x80,
+ pe_order_runnable_left = 0x100,
+ pe_order_pseudo_left = 0x200,
+ pe_order_implies_then_on_node = 0x400,
+ pe_order_probe = 0x800,
+ pe_order_restart = 0x1000,
+ pe_order_stonith_stop = 0x2000,
+ pe_order_serialize_only = 0x4000,
+ pe_order_same_node = 0x8000,
+ pe_order_implies_first_printed = 0x10000,
+ pe_order_implies_then_printed = 0x20000,
+ pe_order_asymmetrical = 0x100000,
+ pe_order_load = 0x200000,
+ pe_order_one_or_more = 0x400000,
+ pe_order_anti_colocation = 0x800000,
+ pe_order_preserve = 0x1000000,
+ pe_order_then_cancels_first = 0x2000000,
+ pe_order_trace = 0x4000000,
+ pe_order_implies_first_master = pe_order_promoted_implies_first,
+#endif
+};
+
+// Action sequenced relative to another action
+// @COMPAT This should be internal
+struct pe_action_wrapper_s {
+ // @COMPAT This should be uint32_t
+ enum pe_ordering type; // Group of enum pcmk__action_relation_flags
+
+ // @COMPAT This should be a bool
+ enum pe_link_state state; // Whether action has been added to graph yet
+
+ pcmk_action_t *action; // Action to be sequenced
+};
+//!@}
+
+//! Implementation of pcmk_action_t
+struct pe_action_s {
+ int id; //!< Counter to identify action
+
+ /*!
+ * When the controller aborts a transition graph, it sets an abort priority.
+ * If this priority is higher, the action will still be executed anyway.
+ * Pseudo-actions are always allowed, so this is irrelevant for them.
+ */
+ int priority;
+
+ pcmk_resource_t *rsc; //!< Resource to apply action to, if any
+ pcmk_node_t *node; //!< Node to execute action on, if any
+ xmlNode *op_entry; //!< Action XML configuration, if any
+ char *task; //!< Action name
+ char *uuid; //!< Action key
+ char *cancel_task; //!< If task is "cancel", the action being cancelled
+ char *reason; //!< Readable description of why action is needed
+
+ //@ COMPAT Change to uint32_t at a compatibility break
+ enum pe_action_flags flags; //!< Group of enum pe_action_flags
+
+ enum rsc_start_requirement needs; //!< Prerequisite for recovery
+ enum action_fail_response on_fail; //!< Response to failure
+ enum rsc_role_e fail_role; //!< Resource role if action fails
+ GHashTable *meta; //!< Meta-attributes relevant to action
+ GHashTable *extra; //!< Action-specific instance attributes
+
+ /* Current count of runnable instance actions for "first" action in an
+ * ordering dependency with pcmk__ar_min_runnable set.
+ */
+ int runnable_before; //!< For Pacemaker use only
+
+ /*!
+ * Number of instance actions for "first" action in an ordering dependency
+ * with pcmk__ar_min_runnable set that must be runnable before this action
+ * can be runnable.
+ */
+ int required_runnable_before;
+
+ // Actions in a relation with this one (as pcmk__related_action_t *)
+ GList *actions_before; //!< For Pacemaker use only
+ GList *actions_after; //!< For Pacemaker use only
+
+ /* This is intended to hold data that varies by the type of action, but is
+ * not currently used. Some of the above fields could be moved here except
+ * for API backward compatibility.
+ */
+ void *action_details; //!< For Pacemaker use only
+};
+
+// For parsing various action-related string specifications
+gboolean parse_op_key(const char *key, char **rsc_id, char **op_type,
+ guint *interval_ms);
+gboolean decode_transition_key(const char *key, char **uuid, int *transition_id,
+ int *action_id, int *target_rc);
+gboolean decode_transition_magic(const char *magic, char **uuid,
+ int *transition_id, int *action_id,
+ int *op_status, int *op_rc, int *target_rc);
+
+// @COMPAT Either these shouldn't be in libcrmcommon or lrmd_event_data_t should
+int rsc_op_expected_rc(const lrmd_event_data_t *event);
+gboolean did_rsc_op_fail(lrmd_event_data_t *event, int target_rc);
+
+bool crm_op_needs_metadata(const char *rsc_class, const char *op);
+
+xmlNode *crm_create_op_xml(xmlNode *parent, const char *prefix,
+ const char *task, const char *interval_spec,
+ const char *timeout);
+
+bool pcmk_is_probe(const char *task, guint interval);
+bool pcmk_xe_is_probe(const xmlNode *xml_op);
+bool pcmk_xe_mask_probe_failure(const xmlNode *xml_op);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // PCMK__CRM_COMMON_ACTIONS__H
diff --git a/include/crm/common/actions_internal.h b/include/crm/common/actions_internal.h
new file mode 100644
index 0000000..7e794e6
--- /dev/null
+++ b/include/crm/common/actions_internal.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2004-2023 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU Lesser General Public License
+ * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
+ */
+
+#ifndef PCMK__CRM_COMMON_ACTIONS_INTERNAL__H
+#define PCMK__CRM_COMMON_ACTIONS_INTERNAL__H
+
+#include <stdbool.h> // bool
+#include <glib.h> // guint
+#include <libxml/tree.h> // xmlNode
+
+#include <crm/common/actions.h> // PCMK_ACTION_MONITOR
+#include <crm/common/strings_internal.h> // pcmk__str_eq()
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+//! printf-style format to create operation key from resource, action, interval
+#define PCMK__OP_FMT "%s_%s_%u"
+
+char *pcmk__op_key(const char *rsc_id, const char *op_type, guint interval_ms);
+char *pcmk__notify_key(const char *rsc_id, const char *notify_type,
+ const char *op_type);
+char *pcmk__transition_key(int transition_id, int action_id, int target_rc,
+ const char *node);
+void pcmk__filter_op_for_digest(xmlNode *param_set);
+bool pcmk__is_fencing_action(const char *action);
+
+/*!
+ * \internal
+ * \brief Get a human-friendly action name
+ *
+ * \param[in] action_name Actual action name
+ * \param[in] interval_ms Action interval (in milliseconds)
+ *
+ * \return Action name suitable for display
+ */
+static inline const char *
+pcmk__readable_action(const char *action_name, guint interval_ms) {
+ if ((interval_ms == 0)
+ && pcmk__str_eq(action_name, PCMK_ACTION_MONITOR, pcmk__str_none)) {
+ return "probe";
+ }
+ return action_name;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // PCMK__CRM_COMMON_ACTIONS_INTERNAL__H
diff --git a/include/crm/common/alerts_internal.h b/include/crm/common/alerts_internal.h
index ef64fab..dc67427 100644
--- a/include/crm/common/alerts_internal.h
+++ b/include/crm/common/alerts_internal.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2015-2022 the Pacemaker project contributors
+ * Copyright 2015-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -76,7 +76,6 @@ void pcmk__add_alert_key(GHashTable *table, enum pcmk__alert_keys_e name,
const char *value);
void pcmk__add_alert_key_int(GHashTable *table, enum pcmk__alert_keys_e name,
int value);
-bool pcmk__alert_in_patchset(xmlNode *msg, bool config);
static inline const char *
pcmk__alert_flag2text(enum pcmk__alert_flags flag)
diff --git a/include/crm/common/cib_internal.h b/include/crm/common/cib_internal.h
new file mode 100644
index 0000000..c41c12e
--- /dev/null
+++ b/include/crm/common/cib_internal.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2023 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU Lesser General Public License
+ * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
+ */
+
+#ifndef PCMK__CRM_COMMON_CIB_INTERNAL__H
+#define PCMK__CRM_COMMON_CIB_INTERNAL__H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+const char *pcmk__cib_abs_xpath_for(const char *element);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // PCMK__COMMON_CIB_INTERNAL__H
diff --git a/include/crm/common/clone_internal.h b/include/crm/common/clone_internal.h
new file mode 100644
index 0000000..494ee74
--- /dev/null
+++ b/include/crm/common/clone_internal.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2004-2023 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU Lesser General Public License
+ * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
+ */
+
+#ifndef PCMK__CRM_COMMON_CLONE_INTERNAL__H
+# define PCMK__CRM_COMMON_CLONE_INTERNAL__H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// Clone resource flags (used in variant data)
+enum pcmk__clone_flags {
+ // Whether instances should be started sequentially
+ pcmk__clone_ordered = (1 << 0),
+
+ // Whether promotion scores have been added
+ pcmk__clone_promotion_added = (1 << 1),
+
+ // Whether promotion constraints have been added
+ pcmk__clone_promotion_constrained = (1 << 2),
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // PCMK__CRM_COMMON_CLONE_INTERNAL__H
diff --git a/include/crm/common/digests_internal.h b/include/crm/common/digests_internal.h
new file mode 100644
index 0000000..7598de2
--- /dev/null
+++ b/include/crm/common/digests_internal.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2004-2023 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU Lesser General Public License
+ * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
+ */
+
+#ifndef PCMK__CRM_COMMON_DIGESTS_INTERNAL__H
+# define PCMK__CRM_COMMON_DIGESTS_INTERNAL__H
+
+#include <libxml/tree.h> // xmlNode
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// Digest comparison results
+enum pcmk__digest_result {
+ pcmk__digest_unknown, // No digest available for comparison
+ pcmk__digest_match, // Digests match
+ pcmk__digest_mismatch, // Any parameter changed (potentially reloadable)
+ pcmk__digest_restart, // Parameters that require a restart changed
+};
+
+bool pcmk__verify_digest(xmlNode *input, const char *expected);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // PCMK__CRM_COMMON_DIGESTS_INTERNAL__H
diff --git a/include/crm/common/failcounts_internal.h b/include/crm/common/failcounts_internal.h
new file mode 100644
index 0000000..4ad01bf
--- /dev/null
+++ b/include/crm/common/failcounts_internal.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2004-2023 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU Lesser General Public License
+ * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
+ */
+
+#ifndef PCMK__CRM_COMMON_FAILCOUNTS_INTERNAL__H
+# define PCMK__CRM_COMMON_FAILCOUNTS_INTERNAL__H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// Options when getting resource fail counts
+enum pcmk__fc_flags {
+ pcmk__fc_default = (1 << 0),
+ pcmk__fc_effective = (1 << 1), // Don't count expired failures
+ pcmk__fc_fillers = (1 << 2), // If container, include filler failures
+};
+
+/*!
+ * \internal
+ * \enum pcmk__rsc_node
+ * \brief Type of resource location lookup to perform
+ */
+enum pcmk__rsc_node {
+ pcmk__rsc_node_assigned = 0, //!< Where resource is assigned
+ pcmk__rsc_node_current = 1, //!< Where resource is running
+
+ // @COMPAT: Use in native_location() at a compatibility break
+ pcmk__rsc_node_pending = 2, //!< Where resource is pending
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // PCMK__CRM_COMMON_FAILCOUNTS_INTERNAL__H
diff --git a/include/crm/common/group_internal.h b/include/crm/common/group_internal.h
new file mode 100644
index 0000000..9e1424d
--- /dev/null
+++ b/include/crm/common/group_internal.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2004-2023 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU Lesser General Public License
+ * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
+ */
+
+#ifndef PCMK__CRM_COMMON_GROUP_INTERNAL__H
+# define PCMK__CRM_COMMON_GROUP_INTERNAL__H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// Group resource flags (used in variant data)
+enum pcmk__group_flags {
+ pcmk__group_ordered = (1 << 0), // Members start sequentially
+ pcmk__group_colocated = (1 << 1), // Members must be on same node
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // PCMK__CRM_COMMON_GROUP_INTERNAL__H
diff --git a/include/crm/common/health_internal.h b/include/crm/common/health_internal.h
index 277a4c9..f98529c 100644
--- a/include/crm/common/health_internal.h
+++ b/include/crm/common/health_internal.h
@@ -18,7 +18,7 @@ extern "C" {
* \internal
* \brief Possible node health strategies
*
- * \note It would be nice to use this in pe_working_set_t but that will have to
+ * \note It would be nice to use this in pcmk_scheduler_t but that will have to
* wait for an API backward compatibility break.
*/
enum pcmk__health_strategy {
diff --git a/include/crm/common/internal.h b/include/crm/common/internal.h
index bd98780..3078606 100644
--- a/include/crm/common/internal.h
+++ b/include/crm/common/internal.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2015-2022 the Pacemaker project contributors
+ * Copyright 2015-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -20,6 +20,8 @@
#include <crm/common/util.h> // crm_strdup_printf()
#include <crm/common/logging.h> // do_crm_log_unlikely(), etc.
#include <crm/common/mainloop.h> // mainloop_io_t, struct ipc_client_callbacks
+#include <crm/common/actions_internal.h>
+#include <crm/common/digests_internal.h>
#include <crm/common/health_internal.h>
#include <crm/common/io_internal.h>
#include <crm/common/iso8601_internal.h>
@@ -50,11 +52,6 @@ int pcmk__substitute_secrets(const char *rsc_id, GHashTable *params);
#endif
-/* internal digest-related utilities (from digest.c) */
-
-bool pcmk__verify_digest(xmlNode *input, const char *expected);
-
-
/* internal main loop utilities (from mainloop.c) */
int pcmk__add_mainloop_ipc(crm_ipc_t *ipc, int priority, void *userdata,
@@ -164,20 +161,6 @@ int pcmk__pidfile_matches(const char *filename, pid_t expected_pid,
int pcmk__lock_pidfile(const char *filename, const char *name);
-/* internal functions related to resource operations (from operations.c) */
-
-// printf-style format to create operation ID from resource, action, interval
-#define PCMK__OP_FMT "%s_%s_%u"
-
-char *pcmk__op_key(const char *rsc_id, const char *op_type, guint interval_ms);
-char *pcmk__notify_key(const char *rsc_id, const char *notify_type,
- const char *op_type);
-char *pcmk__transition_key(int transition_id, int action_id, int target_rc,
- const char *node);
-void pcmk__filter_op_for_digest(xmlNode *param_set);
-bool pcmk__is_fencing_action(const char *action);
-
-
// bitwise arithmetic utilities
/*!
diff --git a/include/crm/common/ipc.h b/include/crm/common/ipc.h
index 3d4ee10..397c8b1 100644
--- a/include/crm/common/ipc.h
+++ b/include/crm/common/ipc.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2004-2022 the Pacemaker project contributors
+ * Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -170,8 +170,8 @@ void crm_ipc_close(crm_ipc_t * client);
void crm_ipc_destroy(crm_ipc_t * client);
void pcmk_free_ipc_event(struct iovec *event);
-int crm_ipc_send(crm_ipc_t * client, xmlNode * message, enum crm_ipc_flags flags,
- int32_t ms_timeout, xmlNode ** reply);
+int crm_ipc_send(crm_ipc_t *client, const xmlNode *message,
+ enum crm_ipc_flags flags, int32_t ms_timeout, xmlNode **reply);
int crm_ipc_get_fd(crm_ipc_t * client);
bool crm_ipc_connected(crm_ipc_t * client);
diff --git a/include/crm/common/ipc_internal.h b/include/crm/common/ipc_internal.h
index 5099dda..b391e83 100644
--- a/include/crm/common/ipc_internal.h
+++ b/include/crm/common/ipc_internal.h
@@ -96,6 +96,10 @@ extern "C" {
int pcmk__ipc_is_authentic_process_active(const char *name, uid_t refuid,
gid_t refgid, pid_t *gotpid);
+int pcmk__connect_generic_ipc(crm_ipc_t *ipc);
+int pcmk__ipc_fd(crm_ipc_t *ipc, int *fd);
+int pcmk__connect_ipc(pcmk_ipc_api_t *api, enum pcmk_ipc_dispatch dispatch_type,
+ int attempts);
/*
* Server-related
@@ -112,6 +116,7 @@ struct pcmk__remote_s {
int tcp_socket;
mainloop_io_t *source;
time_t uptime;
+ char *start_state;
/* CIB-only */
char *token;
@@ -245,11 +250,11 @@ int pcmk__ipc_send_ack_as(const char *function, int line, pcmk__client_t *c,
#define pcmk__ipc_send_ack(c, req, flags, tag, ver, st) \
pcmk__ipc_send_ack_as(__func__, __LINE__, (c), (req), (flags), (tag), (ver), (st))
-int pcmk__ipc_prepare_iov(uint32_t request, xmlNode *message,
+int pcmk__ipc_prepare_iov(uint32_t request, const xmlNode *message,
uint32_t max_send_size,
struct iovec **result, ssize_t *bytes);
-int pcmk__ipc_send_xml(pcmk__client_t *c, uint32_t request, xmlNode *message,
- uint32_t flags);
+int pcmk__ipc_send_xml(pcmk__client_t *c, uint32_t request,
+ const xmlNode *message, uint32_t flags);
int pcmk__ipc_send_iov(pcmk__client_t *c, struct iovec *iov, uint32_t flags);
xmlNode *pcmk__client_data2xml(pcmk__client_t *c, void *data,
uint32_t *id, uint32_t *flags);
diff --git a/include/crm/common/logging.h b/include/crm/common/logging.h
index 2878fba..eea4cec 100644
--- a/include/crm/common/logging.h
+++ b/include/crm/common/logging.h
@@ -11,6 +11,7 @@
# define PCMK__CRM_COMMON_LOGGING__H
# include <stdio.h>
+# include <stdint.h> // uint8_t, uint32_t
# include <glib.h>
# include <qb/qblog.h>
# include <libxml/tree.h>
@@ -120,7 +121,9 @@ unsigned int set_crm_log_level(unsigned int level);
unsigned int get_crm_log_level(void);
-void pcmk_log_xml_impl(uint8_t level, const char *text, const xmlNode *xml);
+void pcmk_log_xml_as(const char *file, const char *function, uint32_t line,
+ uint32_t tags, uint8_t level, const char *text,
+ const xmlNode *xml);
/*
* Throughout the macros below, note the leading, pre-comma, space in the
@@ -270,7 +273,8 @@ pcmk__clip_log_level(int level)
__LINE__, 0); \
} \
if (crm_is_callsite_active(xml_cs, _level, 0)) { \
- pcmk_log_xml_impl(_level, text, xml); \
+ pcmk_log_xml_as(__FILE__, __func__, __LINE__, 0, \
+ _level, text, (xml)); \
} \
break; \
} \
diff --git a/include/crm/common/logging_compat.h b/include/crm/common/logging_compat.h
index cfdb562..b57a802 100644
--- a/include/crm/common/logging_compat.h
+++ b/include/crm/common/logging_compat.h
@@ -10,6 +10,7 @@
#ifndef PCMK__CRM_COMMON_LOGGING_COMPAT__H
# define PCMK__CRM_COMMON_LOGGING_COMPAT__H
+#include <stdint.h> // uint8_t
#include <glib.h>
#include <libxml/tree.h>
@@ -78,6 +79,9 @@ void log_data_element(int log_level, const char *file, const char *function,
int line, const char *prefix, const xmlNode *data,
int depth, int legacy_options);
+//! \deprecated Do not use Pacemaker for general-purpose logging
+void pcmk_log_xml_impl(uint8_t level, const char *text, const xmlNode *xml);
+
#ifdef __cplusplus
}
#endif
diff --git a/include/crm/common/logging_internal.h b/include/crm/common/logging_internal.h
index 479dcab..981ddf3 100644
--- a/include/crm/common/logging_internal.h
+++ b/include/crm/common/logging_internal.h
@@ -19,6 +19,18 @@ extern "C" {
# include <crm/common/logging.h>
# include <crm/common/output_internal.h>
+typedef void (*pcmk__config_error_func) (void *ctx, const char *msg, ...);
+typedef void (*pcmk__config_warning_func) (void *ctx, const char *msg, ...);
+
+extern pcmk__config_error_func pcmk__config_error_handler;
+extern pcmk__config_warning_func pcmk__config_warning_handler;
+
+extern void *pcmk__config_error_context;
+extern void *pcmk__config_warning_context;
+
+void pcmk__set_config_error_handler(pcmk__config_error_func error_handler, void *error_context);
+void pcmk__set_config_warning_handler(pcmk__config_warning_func warning_handler, void *warning_context);
+
/*!
* \internal
* \brief Log a configuration error
@@ -26,9 +38,13 @@ extern "C" {
* \param[in] fmt printf(3)-style format string
* \param[in] ... Arguments for format string
*/
-# define pcmk__config_err(fmt...) do { \
- crm_config_error = TRUE; \
- crm_err(fmt); \
+# define pcmk__config_err(fmt...) do { \
+ crm_config_error = TRUE; \
+ if (pcmk__config_error_handler == NULL) { \
+ crm_err(fmt); \
+ } else { \
+ pcmk__config_error_handler(pcmk__config_error_context, fmt); \
+ } \
} while (0)
/*!
@@ -38,9 +54,13 @@ extern "C" {
* \param[in] fmt printf(3)-style format string
* \param[in] ... Arguments for format string
*/
-# define pcmk__config_warn(fmt...) do { \
- crm_config_warning = TRUE; \
- crm_warn(fmt); \
+# define pcmk__config_warn(fmt...) do { \
+ crm_config_warning = TRUE; \
+ if (pcmk__config_warning_handler == NULL) { \
+ crm_warn(fmt); \
+ } else { \
+ pcmk__config_warning_handler(pcmk__config_warning_context, fmt); \
+ } \
} while (0)
/*!
@@ -74,6 +94,76 @@ extern "C" {
/*!
* \internal
+ * \brief Log XML changes line-by-line in a formatted fashion
+ *
+ * \param[in] level Priority at which to log the messages
+ * \param[in] xml XML to log
+ *
+ * \note This does nothing when \p level is \c LOG_STDOUT.
+ */
+#define pcmk__log_xml_changes(level, xml) do { \
+ uint8_t _level = pcmk__clip_log_level(level); \
+ static struct qb_log_callsite *xml_cs = NULL; \
+ \
+ switch (_level) { \
+ case LOG_STDOUT: \
+ case LOG_NEVER: \
+ break; \
+ default: \
+ if (xml_cs == NULL) { \
+ xml_cs = qb_log_callsite_get(__func__, __FILE__, \
+ "xml-changes", _level, \
+ __LINE__, 0); \
+ } \
+ if (crm_is_callsite_active(xml_cs, _level, 0)) { \
+ pcmk__log_xml_changes_as(__FILE__, __func__, __LINE__, \
+ 0, _level, xml); \
+ } \
+ break; \
+ } \
+ } while(0)
+
+/*!
+ * \internal
+ * \brief Log an XML patchset line-by-line in a formatted fashion
+ *
+ * \param[in] level Priority at which to log the messages
+ * \param[in] patchset XML patchset to log
+ *
+ * \note This does nothing when \p level is \c LOG_STDOUT.
+ */
+#define pcmk__log_xml_patchset(level, patchset) do { \
+ uint8_t _level = pcmk__clip_log_level(level); \
+ static struct qb_log_callsite *xml_cs = NULL; \
+ \
+ switch (_level) { \
+ case LOG_STDOUT: \
+ case LOG_NEVER: \
+ break; \
+ default: \
+ if (xml_cs == NULL) { \
+ xml_cs = qb_log_callsite_get(__func__, __FILE__, \
+ "xml-patchset", _level, \
+ __LINE__, 0); \
+ } \
+ if (crm_is_callsite_active(xml_cs, _level, 0)) { \
+ pcmk__log_xml_patchset_as(__FILE__, __func__, __LINE__, \
+ 0, _level, patchset); \
+ } \
+ break; \
+ } \
+ } while(0)
+
+void pcmk__log_xml_changes_as(const char *file, const char *function,
+ uint32_t line, uint32_t tags, uint8_t level,
+ const xmlNode *xml);
+
+void pcmk__log_xml_patchset_as(const char *file, const char *function,
+ uint32_t line, uint32_t tags, uint8_t level,
+ const xmlNode *patchset);
+
+/*!
+ * \internal
* \brief Initialize logging for command line tools
*
* \param[in] name The name of the program
diff --git a/include/crm/common/nodes.h b/include/crm/common/nodes.h
new file mode 100644
index 0000000..fbc3758
--- /dev/null
+++ b/include/crm/common/nodes.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright 2004-2023 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU Lesser General Public License
+ * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
+ */
+
+#ifndef PCMK__CRM_COMMON_NODES__H
+# define PCMK__CRM_COMMON_NODES__H
+
+#include <glib.h> // gboolean, GList, GHashTable
+
+#include <crm/common/scheduler_types.h> // pcmk_resource_t, pcmk_scheduler_t
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*!
+ * \file
+ * \brief Scheduler API for nodes
+ * \ingroup core
+ */
+
+// Special node attributes
+
+#define PCMK_NODE_ATTR_TERMINATE "terminate"
+
+
+//! Possible node types
+enum node_type {
+ pcmk_node_variant_cluster = 1, //!< Cluster layer node
+ pcmk_node_variant_remote = 2, //!< Pacemaker Remote node
+
+ node_ping = 0, //!< \deprecated Do not use
+#if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1)
+ //! \deprecated Use pcmk_node_variant_cluster instead
+ node_member = pcmk_node_variant_cluster,
+
+ //! \deprecated Use pcmk_node_variant_remote instead
+ node_remote = pcmk_node_variant_remote,
+#endif
+};
+
+//! When to probe a resource on a node (as specified in location constraints)
+enum pe_discover_e {
+ pcmk_probe_always = 0, //! Always probe resource on node
+ pcmk_probe_never = 1, //! Never probe resource on node
+ pcmk_probe_exclusive = 2, //! Probe only on designated nodes
+
+#if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1)
+ //! \deprecated Use pcmk_probe_always instead
+ pe_discover_always = pcmk_probe_always,
+
+ //! \deprecated Use pcmk_probe_never instead
+ pe_discover_never = pcmk_probe_never,
+
+ //! \deprecated Use pcmk_probe_exclusive instead
+ pe_discover_exclusive = pcmk_probe_exclusive,
+#endif
+};
+
+//! Basic node information (all node objects for the same node share this)
+struct pe_node_shared_s {
+ const char *id; //!< Node ID at the cluster layer
+ const char *uname; //!< Node name in cluster
+ enum node_type type; //!< Node variant
+
+ // @TODO Convert these into a flag group
+ gboolean online; //!< Whether online
+ gboolean standby; //!< Whether in standby mode
+ gboolean standby_onfail; //!< Whether in standby mode due to on-fail
+ gboolean pending; //!< Whether controller membership is pending
+ gboolean unclean; //!< Whether node requires fencing
+ gboolean unseen; //!< Whether node has never joined cluster
+ gboolean shutdown; //!< Whether shutting down
+ gboolean expected_up; //!< Whether expected join state is member
+ gboolean is_dc; //!< Whether node is cluster's DC
+ gboolean maintenance; //!< Whether in maintenance mode
+ gboolean rsc_discovery_enabled; //!< Whether probes are allowed on node
+
+ /*!
+ * Whether this is a guest node whose guest resource must be recovered or a
+ * remote node that must be fenced
+ */
+ gboolean remote_requires_reset;
+
+ /*!
+ * Whether this is a Pacemaker Remote node that was fenced since it was last
+ * connected by the cluster
+ */
+ gboolean remote_was_fenced;
+
+ /*!
+ * Whether this is a Pacemaker Remote node previously marked in its
+ * node state as being in maintenance mode
+ */
+ gboolean remote_maintenance;
+
+ gboolean unpacked; //!< Whether node history has been unpacked
+
+ /*!
+ * Number of resources active on this node (valid after CIB status section
+ * has been unpacked, as long as pcmk_sched_no_counts was not set)
+ */
+ int num_resources;
+
+ //! Remote connection resource for node, if it is a Pacemaker Remote node
+ pcmk_resource_t *remote_rsc;
+
+ GList *running_rsc; //!< List of resources active on node
+ GList *allocated_rsc; //!< List of resources assigned to node
+ GHashTable *attrs; //!< Node attributes
+ GHashTable *utilization; //!< Node utilization attributes
+ GHashTable *digest_cache; //!< Cache of calculated resource digests
+
+ /*!
+ * Sum of priorities of all resources active on node and on any guest nodes
+ * connected to this node, with +1 for promoted instances (used to compare
+ * nodes for priority-fencing-delay)
+ */
+ int priority;
+
+ pcmk_scheduler_t *data_set; //!< Cluster that node is part of
+};
+
+//! Implementation of pcmk_node_t
+struct pe_node_s {
+ int weight; //!< Node score for a given resource
+ gboolean fixed; //!< \deprecated Do not use
+ int count; //!< Counter reused by assignment and promotion code
+ struct pe_node_shared_s *details; //!< Basic node information
+
+ // @COMPAT This should be enum pe_discover_e
+ int rsc_discover_mode; //!< Probe mode (enum pe_discover_e)
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // PCMK__CRM_COMMON_NODES__H
diff --git a/include/crm/common/nvpair.h b/include/crm/common/nvpair.h
index aebc199..185bdc3 100644
--- a/include/crm/common/nvpair.h
+++ b/include/crm/common/nvpair.h
@@ -46,7 +46,6 @@ void hash2smartfield(gpointer key, gpointer value, gpointer user_data);
GHashTable *xml2list(const xmlNode *parent);
const char *crm_xml_add(xmlNode *node, const char *name, const char *value);
-const char *crm_xml_replace(xmlNode *node, const char *name, const char *value);
const char *crm_xml_add_int(xmlNode *node, const char *name, int value);
const char *crm_xml_add_ll(xmlNode *node, const char *name, long long value);
const char *crm_xml_add_ms(xmlNode *node, const char *name, guint ms);
diff --git a/include/crm/common/options_internal.h b/include/crm/common/options_internal.h
index 4157b58..5c561fd 100644
--- a/include/crm/common/options_internal.h
+++ b/include/crm/common/options_internal.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2006-2022 the Pacemaker project contributors
+ * Copyright 2006-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -26,7 +26,7 @@ _Noreturn void pcmk__cli_help(char cmd);
*/
const char *pcmk__env_option(const char *option);
-void pcmk__set_env_option(const char *option, const char *value);
+void pcmk__set_env_option(const char *option, const char *value, bool compat);
bool pcmk__env_option_enabled(const char *daemon, const char *option);
@@ -76,18 +76,52 @@ long pcmk__auto_watchdog_timeout(void);
bool pcmk__valid_sbd_timeout(const char *value);
// Constants for environment variable names
+#define PCMK__ENV_AUTHKEY_LOCATION "authkey_location"
#define PCMK__ENV_BLACKBOX "blackbox"
+#define PCMK__ENV_CALLGRIND_ENABLED "callgrind_enabled"
#define PCMK__ENV_CLUSTER_TYPE "cluster_type"
#define PCMK__ENV_DEBUG "debug"
+#define PCMK__ENV_DH_MAX_BITS "dh_max_bits"
+#define PCMK__ENV_DH_MIN_BITS "dh_min_bits"
+#define PCMK__ENV_FAIL_FAST "fail_fast"
+#define PCMK__ENV_IPC_BUFFER "ipc_buffer"
+#define PCMK__ENV_IPC_TYPE "ipc_type"
#define PCMK__ENV_LOGFACILITY "logfacility"
#define PCMK__ENV_LOGFILE "logfile"
+#define PCMK__ENV_LOGFILE_MODE "logfile_mode"
#define PCMK__ENV_LOGPRIORITY "logpriority"
-#define PCMK__ENV_MCP "mcp"
+#define PCMK__ENV_NODE_ACTION_LIMIT "node_action_limit"
#define PCMK__ENV_NODE_START_STATE "node_start_state"
+#define PCMK__ENV_PANIC_ACTION "panic_action"
#define PCMK__ENV_PHYSICAL_HOST "physical_host"
+#define PCMK__ENV_REMOTE_ADDRESS "remote_address"
+#define PCMK__ENV_REMOTE_PID1 "remote_pid1"
+#define PCMK__ENV_REMOTE_PORT "remote_port"
+#define PCMK__ENV_RESPAWNED "respawned"
+#define PCMK__ENV_SCHEMA_DIRECTORY "schema_directory"
+#define PCMK__ENV_SERVICE "service"
+#define PCMK__ENV_STDERR "stderr"
+#define PCMK__ENV_TLS_PRIORITIES "tls_priorities"
+#define PCMK__ENV_TRACE_BLACKBOX "trace_blackbox"
+#define PCMK__ENV_TRACE_FILES "trace_files"
+#define PCMK__ENV_TRACE_FORMATS "trace_formats"
+#define PCMK__ENV_TRACE_FUNCTIONS "trace_functions"
+#define PCMK__ENV_TRACE_TAGS "trace_tags"
+#define PCMK__ENV_VALGRIND_ENABLED "valgrind_enabled"
+
+// @COMPAT Drop at 3.0.0; default is plenty
+#define PCMK__ENV_CIB_TIMEOUT "cib_timeout"
+
+// @COMPAT Drop at 3.0.0; likely last used in 1.1.24
+#define PCMK__ENV_MCP "mcp"
+
+// @COMPAT Drop at 3.0.0; added unused in 1.1.9
#define PCMK__ENV_QUORUM_TYPE "quorum_type"
+
+/* @COMPAT Drop at 3.0.0; added to debug shutdown issues when Pacemaker is
+ * managed by systemd, but no longer useful.
+ */
#define PCMK__ENV_SHUTDOWN_DELAY "shutdown_delay"
-#define PCMK__ENV_STDERR "stderr"
// Constants for cluster option names
#define PCMK__OPT_NODE_HEALTH_BASE "node-health-base"
diff --git a/include/crm/common/output_internal.h b/include/crm/common/output_internal.h
index e7b631e..274bd85 100644
--- a/include/crm/common/output_internal.h
+++ b/include/crm/common/output_internal.h
@@ -763,6 +763,11 @@ pcmk__output_get_log_level(const pcmk__output_t *out);
void
pcmk__output_set_log_level(pcmk__output_t *out, uint8_t log_level);
+void pcmk__output_set_log_filter(pcmk__output_t *out, const char *file,
+ const char *function, uint32_t line,
+ uint32_t tags);
+
+
/*!
* \internal
* \brief Create and return a new XML node with the given name, as a child of the
diff --git a/include/crm/common/remote_internal.h b/include/crm/common/remote_internal.h
index 8473668..030c7a4 100644
--- a/include/crm/common/remote_internal.h
+++ b/include/crm/common/remote_internal.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2008-2022 the Pacemaker project contributors
+ * Copyright 2008-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -14,7 +14,7 @@
typedef struct pcmk__remote_s pcmk__remote_t;
-int pcmk__remote_send_xml(pcmk__remote_t *remote, xmlNode *msg);
+int pcmk__remote_send_xml(pcmk__remote_t *remote, const xmlNode *msg);
int pcmk__remote_ready(const pcmk__remote_t *remote, int timeout_ms);
int pcmk__read_remote_message(pcmk__remote_t *remote, int timeout_ms);
xmlNode *pcmk__remote_message_xml(pcmk__remote_t *remote);
diff --git a/include/crm/common/resources.h b/include/crm/common/resources.h
new file mode 100644
index 0000000..043dc1c
--- /dev/null
+++ b/include/crm/common/resources.h
@@ -0,0 +1,502 @@
+/*
+ * Copyright 2004-2023 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU Lesser General Public License
+ * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
+ */
+
+#ifndef PCMK__CRM_COMMON_RESOURCES__H
+# define PCMK__CRM_COMMON_RESOURCES__H
+
+#include <sys/types.h> // time_t
+#include <libxml/tree.h> // xmlNode
+#include <glib.h> // gboolean, guint, GList, GHashTable
+
+#include <crm/common/roles.h> // enum rsc_role_e
+#include <crm/common/scheduler_types.h> // pcmk_resource_t, etc.
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*!
+ * \file
+ * \brief Scheduler API for resources
+ * \ingroup core
+ */
+
+//! Resource variants supported by Pacemaker
+enum pe_obj_types {
+ // Order matters: some code compares greater or lesser than
+ pcmk_rsc_variant_unknown = -1, //!< Unknown resource variant
+ pcmk_rsc_variant_primitive = 0, //!< Primitive resource
+ pcmk_rsc_variant_group = 1, //!< Group resource
+ pcmk_rsc_variant_clone = 2, //!< Clone resource
+ pcmk_rsc_variant_bundle = 3, //!< Bundle resource
+
+#if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1)
+ //! \deprecated Use pcmk_rsc_variant_unknown instead
+ pe_unknown = pcmk_rsc_variant_unknown,
+
+ //! \deprecated Use pcmk_rsc_variant_primitive instead
+ pe_native = pcmk_rsc_variant_primitive,
+
+ //! \deprecated Use pcmk_rsc_variant_group instead
+ pe_group = pcmk_rsc_variant_group,
+
+ //! \deprecated Use pcmk_rsc_variant_clone instead
+ pe_clone = pcmk_rsc_variant_clone,
+
+ //! \deprecated Use pcmk_rsc_variant_bundle instead
+ pe_container = pcmk_rsc_variant_bundle,
+#endif
+};
+
+//! What resource needs before it can be recovered from a failed node
+enum rsc_start_requirement {
+ pcmk_requires_nothing = 0, //!< Resource can be recovered immediately
+ pcmk_requires_quorum = 1, //!< Resource can be recovered if quorate
+ pcmk_requires_fencing = 2, //!< Resource can be recovered after fencing
+
+#if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1)
+ //! \deprecated Use pcmk_requires_nothing instead
+ rsc_req_nothing = pcmk_requires_nothing,
+
+ //! \deprecated Use pcmk_requires_quorum instead
+ rsc_req_quorum = pcmk_requires_quorum,
+
+ //! \deprecated Use pcmk_requires_fencing instead
+ rsc_req_stonith = pcmk_requires_fencing,
+#endif
+};
+
+//! How to recover a resource that is incorrectly active on multiple nodes
+enum rsc_recovery_type {
+ pcmk_multiply_active_restart = 0, //!< Stop on all, start on desired
+ pcmk_multiply_active_stop = 1, //!< Stop on all and leave stopped
+ pcmk_multiply_active_block = 2, //!< Do nothing to resource
+ pcmk_multiply_active_unexpected = 3, //!< Stop unexpected instances
+
+#if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1)
+ //! \deprecated Use pcmk_multiply_active_restart instead
+ recovery_stop_start = pcmk_multiply_active_restart,
+
+ //! \deprecated Use pcmk_multiply_active_stop instead
+ recovery_stop_only = pcmk_multiply_active_stop,
+
+ //! \deprecated Use pcmk_multiply_active_block instead
+ recovery_block = pcmk_multiply_active_block,
+
+ //! \deprecated Use pcmk_multiply_active_unexpected instead
+ recovery_stop_unexpected = pcmk_multiply_active_unexpected,
+#endif
+};
+
+//! Resource scheduling flags
+enum pcmk_rsc_flags {
+ //! No resource flags set (compare with equality rather than bit set)
+ pcmk_no_rsc_flags = 0ULL,
+
+ //! Whether resource has been removed from the configuration
+ pcmk_rsc_removed = (1ULL << 0),
+
+ //! Whether resource is managed
+ pcmk_rsc_managed = (1ULL << 1),
+
+ //! Whether resource is blocked from further action
+ pcmk_rsc_blocked = (1ULL << 2),
+
+ //! Whether resource has been removed but has a container
+ pcmk_rsc_removed_filler = (1ULL << 3),
+
+ //! Whether resource has clone notifications enabled
+ pcmk_rsc_notify = (1ULL << 4),
+
+ //! Whether resource is not an anonymous clone instance
+ pcmk_rsc_unique = (1ULL << 5),
+
+ //! Whether resource's class is "stonith"
+ pcmk_rsc_fence_device = (1ULL << 6),
+
+ //! Whether resource can be promoted and demoted
+ pcmk_rsc_promotable = (1ULL << 7),
+
+ //! Whether resource has not yet been assigned to a node
+ pcmk_rsc_unassigned = (1ULL << 8),
+
+ //! Whether resource is in the process of being assigned to a node
+ pcmk_rsc_assigning = (1ULL << 9),
+
+ //! Whether resource is in the process of modifying allowed node scores
+ pcmk_rsc_updating_nodes = (1ULL << 10),
+
+ //! Whether resource is in the process of scheduling actions to restart
+ pcmk_rsc_restarting = (1ULL << 11),
+
+ //! Whether resource must be stopped (instead of demoted) if it is failed
+ pcmk_rsc_stop_if_failed = (1ULL << 12),
+
+ //! Whether a reload action has been scheduled for resource
+ pcmk_rsc_reload = (1ULL << 13),
+
+ //! Whether resource is a remote connection allowed to run on a remote node
+ pcmk_rsc_remote_nesting_allowed = (1ULL << 14),
+
+ //! Whether resource has "critical" meta-attribute enabled
+ pcmk_rsc_critical = (1ULL << 15),
+
+ //! Whether resource is considered failed
+ pcmk_rsc_failed = (1ULL << 16),
+
+ //! Flag for non-scheduler code to use to detect recursion loops
+ pcmk_rsc_detect_loop = (1ULL << 17),
+
+ //! \deprecated Do not use
+ pcmk_rsc_runnable = (1ULL << 18),
+
+ //! Whether resource has pending start action in history
+ pcmk_rsc_start_pending = (1ULL << 19),
+
+ //! \deprecated Do not use
+ pcmk_rsc_starting = (1ULL << 20),
+
+ //! \deprecated Do not use
+ pcmk_rsc_stopping = (1ULL << 21),
+
+ //! Whether resource is multiply active with recovery set to stop_unexpected
+ pcmk_rsc_stop_unexpected = (1ULL << 22),
+
+ //! Whether resource is allowed to live-migrate
+ pcmk_rsc_migratable = (1ULL << 23),
+
+ //! Whether resource has an ignorable failure
+ pcmk_rsc_ignore_failure = (1ULL << 24),
+
+ //! Whether resource is an implicit container resource for a bundle replica
+ pcmk_rsc_replica_container = (1ULL << 25),
+
+ //! Whether resource, its node, or entire cluster is in maintenance mode
+ pcmk_rsc_maintenance = (1ULL << 26),
+
+ //! \deprecated Do not use
+ pcmk_rsc_has_filler = (1ULL << 27),
+
+ //! Whether resource can be started or promoted only on quorate nodes
+ pcmk_rsc_needs_quorum = (1ULL << 28),
+
+ //! Whether resource requires fencing before recovery if on unclean node
+ pcmk_rsc_needs_fencing = (1ULL << 29),
+
+ //! Whether resource can be started or promoted only on unfenced nodes
+ pcmk_rsc_needs_unfencing = (1ULL << 30),
+};
+
+//! Search options for resources (exact resource ID always matches)
+enum pe_find {
+ //! Also match clone instance ID from resource history
+ pcmk_rsc_match_history = (1 << 0),
+
+ //! Also match anonymous clone instances by base name
+ pcmk_rsc_match_anon_basename = (1 << 1),
+
+ //! Match only clones and their instances, by either clone or instance ID
+ pcmk_rsc_match_clone_only = (1 << 2),
+
+ //! If matching by node, compare current node instead of assigned node
+ pcmk_rsc_match_current_node = (1 << 3),
+
+ //! \deprecated Do not use
+ pe_find_inactive = (1 << 4),
+
+ //! Match clone instances (even unique) by base name as well as exact ID
+ pcmk_rsc_match_basename = (1 << 5),
+
+#if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1)
+ //! \deprecated Use pcmk_rsc_match_history instead
+ pe_find_renamed = pcmk_rsc_match_history,
+
+ //! \deprecated Use pcmk_rsc_match_anon_basename instead
+ pe_find_anon = pcmk_rsc_match_anon_basename,
+
+ //! \deprecated Use pcmk_rsc_match_clone_only instead
+ pe_find_clone = pcmk_rsc_match_clone_only,
+
+ //! \deprecated Use pcmk_rsc_match_current_node instead
+ pe_find_current = pcmk_rsc_match_current_node,
+
+ //! \deprecated Use pcmk_rsc_match_basename instead
+ pe_find_any = pcmk_rsc_match_basename,
+#endif
+};
+
+//!@{
+//! \deprecated Do not use
+enum pe_restart {
+ pe_restart_restart,
+ pe_restart_ignore,
+};
+
+enum pe_print_options {
+ pe_print_log = (1 << 0),
+ pe_print_html = (1 << 1),
+ pe_print_ncurses = (1 << 2),
+ pe_print_printf = (1 << 3),
+ pe_print_dev = (1 << 4), // Ignored
+ pe_print_details = (1 << 5), // Ignored
+ pe_print_max_details = (1 << 6), // Ignored
+ pe_print_rsconly = (1 << 7),
+ pe_print_ops = (1 << 8),
+ pe_print_suppres_nl = (1 << 9),
+ pe_print_xml = (1 << 10),
+ pe_print_brief = (1 << 11),
+ pe_print_pending = (1 << 12),
+ pe_print_clone_details = (1 << 13),
+ pe_print_clone_active = (1 << 14), // Print clone instances only if active
+ pe_print_implicit = (1 << 15) // Print implicitly created resources
+};
+//!@}
+
+// Resource assignment methods (implementation defined by libpacemaker)
+//! This type should be considered internal to Pacemaker
+typedef struct resource_alloc_functions_s pcmk_assignment_methods_t;
+
+//! Resource object methods
+typedef struct resource_object_functions_s {
+ /*!
+ * \brief Parse variant-specific resource XML from CIB into struct members
+ *
+ * \param[in,out] rsc Partially unpacked resource
+ * \param[in,out] scheduler Scheduler data
+ *
+ * \return TRUE if resource was unpacked successfully, otherwise FALSE
+ */
+ gboolean (*unpack)(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler);
+
+ /*!
+ * \brief Search for a resource ID in a resource and its children
+ *
+ * \param[in] rsc Search this resource and its children
+ * \param[in] id Search for this resource ID
+ * \param[in] on_node If not NULL, limit search to resources on this node
+ * \param[in] flags Group of enum pe_find flags
+ *
+ * \return Resource that matches search criteria if any, otherwise NULL
+ */
+ pcmk_resource_t *(*find_rsc)(pcmk_resource_t *rsc, const char *search,
+ const pcmk_node_t *node, int flags);
+
+ /*!
+ * \brief Get value of a resource instance attribute
+ *
+ * \param[in,out] rsc Resource to check
+ * \param[in] node Node to use to evaluate rules
+ * \param[in] create Ignored
+ * \param[in] name Name of instance attribute to check
+ * \param[in,out] scheduler Scheduler data
+ *
+ * \return Value of requested attribute if available, otherwise NULL
+ * \note The caller is responsible for freeing the result using free().
+ */
+ char *(*parameter)(pcmk_resource_t *rsc, pcmk_node_t *node, gboolean create,
+ const char *name, pcmk_scheduler_t *scheduler);
+
+ //! \deprecated Do not use
+ void (*print)(pcmk_resource_t *rsc, const char *pre_text, long options,
+ void *print_data);
+
+ /*!
+ * \brief Check whether a resource is active
+ *
+ * \param[in] rsc Resource to check
+ * \param[in] all If \p rsc is collective, all instances must be active
+ *
+ * \return TRUE if \p rsc is active, otherwise FALSE
+ */
+ gboolean (*active)(pcmk_resource_t *rsc, gboolean all);
+
+ /*!
+ * \brief Get resource's current or assigned role
+ *
+ * \param[in] rsc Resource to check
+ * \param[in] current If TRUE, check current role, otherwise assigned role
+ *
+ * \return Current or assigned role of \p rsc
+ */
+ enum rsc_role_e (*state)(const pcmk_resource_t *rsc, gboolean current);
+
+ /*!
+ * \brief List nodes where a resource (or any of its children) is
+ *
+ * \param[in] rsc Resource to check
+ * \param[out] list List to add result to
+ * \param[in] current If 0, list nodes where \p rsc is assigned;
+ * if 1, where active; if 2, where active or pending
+ *
+ * \return If list contains only one node, that node, otherwise NULL
+ */
+ pcmk_node_t *(*location)(const pcmk_resource_t *rsc, GList **list,
+ int current);
+
+ /*!
+ * \brief Free all memory used by a resource
+ *
+ * \param[in,out] rsc Resource to free
+ */
+ void (*free)(pcmk_resource_t *rsc);
+
+ /*!
+ * \brief Increment cluster's instance counts for a resource
+ *
+ * Given a resource, increment its cluster's ninstances, disabled_resources,
+ * and blocked_resources counts for the resource and its descendants.
+ *
+ * \param[in,out] rsc Resource to count
+ */
+ void (*count)(pcmk_resource_t *rsc);
+
+ /*!
+ * \brief Check whether a given resource is in a list of resources
+ *
+ * \param[in] rsc Resource ID to check for
+ * \param[in] only_rsc List of resource IDs to check
+ * \param[in] check_parent If TRUE, check top ancestor as well
+ *
+ * \return TRUE if \p rsc, its top parent if requested, or '*' is in
+ * \p only_rsc, otherwise FALSE
+ */
+ gboolean (*is_filtered)(const pcmk_resource_t *rsc, GList *only_rsc,
+ gboolean check_parent);
+
+ /*!
+ * \brief Find a node (and optionally count all) where resource is active
+ *
+ * \param[in] rsc Resource to check
+ * \param[out] count_all If not NULL, set this to count of active nodes
+ * \param[out] count_clean If not NULL, set this to count of clean nodes
+ *
+ * \return A node where the resource is active, preferring the source node
+ * if the resource is involved in a partial migration, or a clean,
+ * online node if the resource's "requires" is "quorum" or
+ * "nothing", otherwise NULL.
+ */
+ pcmk_node_t *(*active_node)(const pcmk_resource_t *rsc,
+ unsigned int *count_all,
+ unsigned int *count_clean);
+
+ /*!
+ * \brief Get maximum resource instances per node
+ *
+ * \param[in] rsc Resource to check
+ *
+ * \return Maximum number of \p rsc instances that can be active on one node
+ */
+ unsigned int (*max_per_node)(const pcmk_resource_t *rsc);
+} pcmk_rsc_methods_t;
+
+//! Implementation of pcmk_resource_t
+struct pe_resource_s {
+ char *id; //!< Resource ID in configuration
+ char *clone_name; //!< Resource instance ID in history
+
+ //! Resource configuration (possibly expanded from template)
+ xmlNode *xml;
+
+ //! Original resource configuration, if using template
+ xmlNode *orig_xml;
+
+ //! Configuration of resource operations (possibly expanded from template)
+ xmlNode *ops_xml;
+
+ pcmk_scheduler_t *cluster; //!< Cluster that resource is part of
+ pcmk_resource_t *parent; //!< Resource's parent resource, if any
+ enum pe_obj_types variant; //!< Resource variant
+ void *variant_opaque; //!< Variant-specific (and private) data
+ pcmk_rsc_methods_t *fns; //!< Resource object methods
+ pcmk_assignment_methods_t *cmds; //!< Resource assignment methods
+
+ enum rsc_recovery_type recovery_type; //!< How to recover if failed
+
+ enum pe_restart restart_type; //!< \deprecated Do not use
+ int priority; //!< Configured priority
+ int stickiness; //!< Extra preference for current node
+ int sort_index; //!< Promotion score on assigned node
+ int failure_timeout; //!< Failure timeout
+ int migration_threshold; //!< Migration threshold
+ guint remote_reconnect_ms; //!< Retry interval for remote connections
+ char *pending_task; //!< Pending action in history, if any
+ unsigned long long flags; //!< Group of enum pcmk_rsc_flags
+
+ // @TODO Merge these into flags
+ gboolean is_remote_node; //!< Whether this is a remote connection
+ gboolean exclusive_discover; //!< Whether exclusive probing is enabled
+
+ /* Pay special attention to whether you want to use rsc_cons_lhs and
+ * rsc_cons directly, which include only colocations explicitly involving
+ * this resource, or call libpacemaker's pcmk__with_this_colocations() and
+ * pcmk__this_with_colocations() functions, which may return relevant
+ * colocations involving the resource's ancestors as well.
+ */
+
+ //!@{
+ //! This field should be treated as internal to Pacemaker
+ GList *rsc_cons_lhs; // Colocations of other resources with this one
+ GList *rsc_cons; // Colocations of this resource with others
+ GList *rsc_location; // Location constraints for resource
+ GList *actions; // Actions scheduled for resource
+ GList *rsc_tickets; // Ticket constraints for resource
+ //!@}
+
+ pcmk_node_t *allocated_to; //!< Node resource is assigned to
+
+ //! The destination node, if migrate_to completed but migrate_from has not
+ pcmk_node_t *partial_migration_target;
+
+ //! The source node, if migrate_to completed but migrate_from has not
+ pcmk_node_t *partial_migration_source;
+
+ //! Nodes where resource may be active
+ GList *running_on;
+
+ //! Nodes where resource has been probed (key is node ID, not name)
+ GHashTable *known_on;
+
+ //! Nodes where resource may run (key is node ID, not name)
+ GHashTable *allowed_nodes;
+
+ enum rsc_role_e role; //!< Resource's current role
+ enum rsc_role_e next_role; //!< Resource's scheduled next role
+
+ GHashTable *meta; //!< Resource's meta-attributes
+ GHashTable *parameters; //!< \deprecated Use pe_rsc_params() instead
+ GHashTable *utilization; //!< Resource's utilization attributes
+
+ GList *children; //!< Resource's child resources, if any
+
+ // Source nodes where stop is needed after migrate_from and migrate_to
+ GList *dangling_migrations;
+
+ pcmk_resource_t *container; //!< Resource containing this one, if any
+ GList *fillers; //!< Resources contained by this one, if any
+
+ // @COMPAT These should be made const at next API compatibility break
+ pcmk_node_t *pending_node; //!< Node on which pending_task is happening
+ pcmk_node_t *lock_node; //!< Resource shutdown-locked to this node
+
+ time_t lock_time; //!< When shutdown lock started
+
+ /*!
+ * Resource parameters may have node-attribute-based rules, which means the
+ * values can vary by node. This table has node names as keys and parameter
+ * name/value tables as values. Use pe_rsc_params() to get the table for a
+ * given node rather than use this directly.
+ */
+ GHashTable *parameter_cache;
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // PCMK__CRM_COMMON_RESOURCES__H
diff --git a/include/crm/common/results.h b/include/crm/common/results.h
index 224bcbe..87d00d2 100644
--- a/include/crm/common/results.h
+++ b/include/crm/common/results.h
@@ -108,6 +108,9 @@ enum pcmk_rc_e {
/* When adding new values, use consecutively lower numbers, update the array
* in lib/common/results.c, and test with crm_error.
*/
+ pcmk_rc_compression = -1039,
+ pcmk_rc_ns_resolution = -1038,
+ pcmk_rc_no_transaction = -1037,
pcmk_rc_bad_xml_patch = -1036,
pcmk_rc_bad_input = -1035,
pcmk_rc_disabled = -1034,
@@ -360,7 +363,6 @@ int pcmk_rc2legacy(int rc);
int pcmk_legacy2rc(int legacy_rc);
const char *pcmk_strerror(int rc);
const char *pcmk_errorname(int rc);
-const char *bz2_strerror(int rc);
const char *crm_exit_name(crm_exit_t exit_code);
const char *crm_exit_str(crm_exit_t exit_code);
_Noreturn crm_exit_t crm_exit(crm_exit_t rc);
diff --git a/include/crm/common/results_compat.h b/include/crm/common/results_compat.h
index 00ac6b2..278e48e 100644
--- a/include/crm/common/results_compat.h
+++ b/include/crm/common/results_compat.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2022 the Pacemaker project contributors
+ * Copyright 2022-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -25,6 +25,9 @@ extern "C" {
* release.
*/
+//! \deprecated Do not use
+const char *bz2_strerror(int rc);
+
//! \deprecated Use pcmk_rc2exitc(pcmk_legacy2rc(rc)) instead
crm_exit_t crm_errno2exit(int rc);
diff --git a/include/crm/common/results_internal.h b/include/crm/common/results_internal.h
index be62780..09907e9 100644
--- a/include/crm/common/results_internal.h
+++ b/include/crm/common/results_internal.h
@@ -69,6 +69,9 @@ void pcmk__reset_result(pcmk__action_result_t *result);
void pcmk__copy_result(const pcmk__action_result_t *src,
pcmk__action_result_t *dst);
+int pcmk__gaierror2rc(int gai);
+int pcmk__bzlib2rc(int bz2);
+
/*!
* \internal
* \brief Check whether a result is OK
diff --git a/include/crm/common/roles.h b/include/crm/common/roles.h
new file mode 100644
index 0000000..1498097
--- /dev/null
+++ b/include/crm/common/roles.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2004-2023 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU Lesser General Public License
+ * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
+ */
+
+#ifndef PCMK__CRM_COMMON_ROLES__H
+# define PCMK__CRM_COMMON_ROLES__H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*!
+ * \file
+ * \brief Scheduler API for resource roles
+ * \ingroup core
+ */
+
+/*!
+ * Possible roles that a resource can be in
+ * (order matters; values can be compared with less than and greater than)
+ */
+enum rsc_role_e {
+ pcmk_role_unknown = 0, //!< Resource role is unknown
+ pcmk_role_stopped = 1, //!< Stopped
+ pcmk_role_started = 2, //!< Started
+ pcmk_role_unpromoted = 3, //!< Unpromoted
+ pcmk_role_promoted = 4, //!< Promoted
+
+#if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1)
+ //! \deprecated Use pcmk_role_unknown instead
+ RSC_ROLE_UNKNOWN = pcmk_role_unknown,
+
+ //! \deprecated Use pcmk_role_stopped instead
+ RSC_ROLE_STOPPED = pcmk_role_stopped,
+
+ //! \deprecated Use pcmk_role_started instead
+ RSC_ROLE_STARTED = pcmk_role_started,
+
+ //! \deprecated Use pcmk_role_unpromoted instead
+ RSC_ROLE_UNPROMOTED = pcmk_role_unpromoted,
+
+ //! \deprecated Use pcmk_role_unpromoted instead
+ RSC_ROLE_SLAVE = pcmk_role_unpromoted,
+
+ //! \deprecated Use pcmk_role_promoted instead
+ RSC_ROLE_PROMOTED = pcmk_role_promoted,
+
+ //! \deprecated Use pcmk_role_promoted instead
+ RSC_ROLE_MASTER = pcmk_role_promoted,
+#endif
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // PCMK__CRM_COMMON_ROLES__H
diff --git a/include/crm/common/roles_internal.h b/include/crm/common/roles_internal.h
new file mode 100644
index 0000000..e304f13
--- /dev/null
+++ b/include/crm/common/roles_internal.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2004-2023 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU Lesser General Public License
+ * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
+ */
+
+#ifndef PCMK__CRM_COMMON_ROLES_INTERNAL__H
+# define PCMK__CRM_COMMON_ROLES_INTERNAL__H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// String equivalents of enum rsc_role_e
+#define PCMK__ROLE_UNKNOWN "Unknown"
+#define PCMK__ROLE_STOPPED "Stopped"
+#define PCMK__ROLE_STARTED "Started"
+#define PCMK__ROLE_UNPROMOTED "Unpromoted"
+#define PCMK__ROLE_PROMOTED "Promoted"
+#define PCMK__ROLE_UNPROMOTED_LEGACY "Slave"
+#define PCMK__ROLE_PROMOTED_LEGACY "Master"
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // PCMK__CRM_COMMON_ROLES_INTERNAL__H
diff --git a/include/crm/common/scheduler.h b/include/crm/common/scheduler.h
new file mode 100644
index 0000000..96f9a62
--- /dev/null
+++ b/include/crm/common/scheduler.h
@@ -0,0 +1,238 @@
+/*
+ * Copyright 2004-2023 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU Lesser General Public License
+ * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
+ */
+
+#ifndef PCMK__CRM_COMMON_SCHEDULER__H
+# define PCMK__CRM_COMMON_SCHEDULER__H
+
+#include <sys/types.h> // time_t
+#include <libxml/tree.h> // xmlNode
+#include <glib.h> // guint, GList, GHashTable
+
+#include <crm/common/iso8601.h> // crm_time_t
+
+#include <crm/common/actions.h>
+#include <crm/common/nodes.h>
+#include <crm/common/resources.h>
+#include <crm/common/roles.h>
+#include <crm/common/scheduler_types.h>
+#include <crm/common/tags.h>
+#include <crm/common/tickets.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*!
+ * \file
+ * \brief Scheduler API
+ * \ingroup core
+ */
+
+//! Possible responses to loss of quorum
+enum pe_quorum_policy {
+ pcmk_no_quorum_freeze, //<! Do not recover resources from outside partition
+ pcmk_no_quorum_stop, //<! Stop all resources in partition
+ pcmk_no_quorum_ignore, //<! Act as if partition still holds quorum
+ pcmk_no_quorum_fence, //<! Fence all nodes in partition
+ pcmk_no_quorum_demote, //<! Demote promotable resources and stop all others
+
+#if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1)
+ //! \deprecated Use pcmk_no_quorum_freeze instead
+ no_quorum_freeze = pcmk_no_quorum_freeze,
+
+ //! \deprecated Use pcmk_no_quorum_stop instead
+ no_quorum_stop = pcmk_no_quorum_stop,
+
+ //! \deprecated Use pcmk_no_quorum_ignore instead
+ no_quorum_ignore = pcmk_no_quorum_ignore,
+
+ //! \deprecated Use pcmk_no_quorum_fence instead
+ no_quorum_suicide = pcmk_no_quorum_fence,
+
+ //! \deprecated Use pcmk_no_quorum_demote instead
+ no_quorum_demote = pcmk_no_quorum_demote,
+#endif
+};
+
+//! Scheduling options and conditions
+enum pcmk_scheduler_flags {
+ //! No scheduler flags set (compare with equality rather than bit set)
+ pcmk_sched_none = 0ULL,
+
+ // These flags are dynamically determined conditions
+
+ //! Whether partition has quorum (via have-quorum property)
+ pcmk_sched_quorate = (1ULL << 0),
+
+ //! Whether cluster is symmetric (via symmetric-cluster property)
+ pcmk_sched_symmetric_cluster = (1ULL << 1),
+
+ //! Whether cluster is in maintenance mode (via maintenance-mode property)
+ pcmk_sched_in_maintenance = (1ULL << 3),
+
+ //! Whether fencing is enabled (via stonith-enabled property)
+ pcmk_sched_fencing_enabled = (1ULL << 4),
+
+ //! Whether cluster has a fencing resource (via CIB resources)
+ pcmk_sched_have_fencing = (1ULL << 5),
+
+ //! Whether any resource provides or requires unfencing (via CIB resources)
+ pcmk_sched_enable_unfencing = (1ULL << 6),
+
+ //! Whether concurrent fencing is allowed (via concurrent-fencing property)
+ pcmk_sched_concurrent_fencing = (1ULL << 7),
+
+ /*!
+ * Whether resources removed from the configuration should be stopped (via
+ * stop-orphan-resources property)
+ */
+ pcmk_sched_stop_removed_resources = (1ULL << 8),
+
+ /*!
+ * Whether recurring actions removed from the configuration should be
+ * cancelled (via stop-orphan-actions property)
+ */
+ pcmk_sched_cancel_removed_actions = (1ULL << 9),
+
+ //! Whether to stop all resources (via stop-all-resources property)
+ pcmk_sched_stop_all = (1ULL << 10),
+
+ /*!
+ * Whether start failure should be treated as if migration-threshold is 1
+ * (via start-failure-is-fatal property)
+ */
+ pcmk_sched_start_failure_fatal = (1ULL << 12),
+
+ //! \deprecated Do not use
+ pcmk_sched_remove_after_stop = (1ULL << 13),
+
+ //! Whether unseen nodes should be fenced (via startup-fencing property)
+ pcmk_sched_startup_fencing = (1ULL << 14),
+
+ /*!
+ * Whether resources should be left stopped when their node shuts down
+ * cleanly (via shutdown-lock property)
+ */
+ pcmk_sched_shutdown_lock = (1ULL << 15),
+
+ /*!
+ * Whether resources' current state should be probed (when unknown) before
+ * scheduling any other actions (via the enable-startup-probes property)
+ */
+ pcmk_sched_probe_resources = (1ULL << 16),
+
+ //! Whether the CIB status section has been parsed yet
+ pcmk_sched_have_status = (1ULL << 17),
+
+ //! Whether the cluster includes any Pacemaker Remote nodes (via CIB)
+ pcmk_sched_have_remote_nodes = (1ULL << 18),
+
+ // The remaining flags are scheduling options that must be set explicitly
+
+ /*!
+ * Whether to skip unpacking the CIB status section and stop the scheduling
+ * sequence after applying node-specific location criteria (skipping
+ * assignment, ordering, actions, etc.).
+ */
+ pcmk_sched_location_only = (1ULL << 20),
+
+ //! Whether sensitive resource attributes have been masked
+ pcmk_sched_sanitized = (1ULL << 21),
+
+ //! Skip counting of total, disabled, and blocked resource instances
+ pcmk_sched_no_counts = (1ULL << 23),
+
+ /*!
+ * Skip deprecated code kept solely for backward API compatibility
+ * (internal code should always set this)
+ */
+ pcmk_sched_no_compat = (1ULL << 24),
+
+ //! Whether node scores should be output instead of logged
+ pcmk_sched_output_scores = (1ULL << 25),
+
+ //! Whether to show node and resource utilization (in log or output)
+ pcmk_sched_show_utilization = (1ULL << 26),
+
+ /*!
+ * Whether to stop the scheduling sequence after unpacking the CIB,
+ * calculating cluster status, and applying node health (skipping
+ * applying node-specific location criteria, assignment, etc.)
+ */
+ pcmk_sched_validate_only = (1ULL << 27),
+};
+
+//! Implementation of pcmk_scheduler_t
+struct pe_working_set_s {
+ // Be careful about when each piece of information is available and final
+
+ xmlNode *input; //!< CIB XML
+ crm_time_t *now; //!< Current time for evaluation purposes
+ char *dc_uuid; //!< Node ID of designated controller
+ pcmk_node_t *dc_node; //!< Node object for DC
+ const char *stonith_action; //!< Default fencing action
+ const char *placement_strategy; //!< Value of placement-strategy property
+
+ // @COMPAT Change to uint64_t at a compatibility break
+ unsigned long long flags; //!< Group of enum pcmk_scheduler_flags
+
+ int stonith_timeout; //!< Value of stonith-timeout property
+ enum pe_quorum_policy no_quorum_policy; //!< Response to loss of quorum
+ GHashTable *config_hash; //!< Cluster properties
+
+ //!< Ticket constraints unpacked from ticket state
+ GHashTable *tickets;
+
+ //! Actions for which there can be only one (such as "fence node X")
+ GHashTable *singletons;
+
+ GList *nodes; //!< Nodes in cluster
+ GList *resources; //!< Resources in cluster
+ GList *placement_constraints; //!< Location constraints
+ GList *ordering_constraints; //!< Ordering constraints
+ GList *colocation_constraints; //!< Colocation constraints
+
+ //!< Ticket constraints unpacked by libpacemaker
+ GList *ticket_constraints;
+
+ GList *actions; //!< Scheduled actions
+ xmlNode *failed; //!< History entries of failed actions
+ xmlNode *op_defaults; //!< Configured operation defaults
+ xmlNode *rsc_defaults; //!< Configured resource defaults
+ int num_synapse; //!< Number of transition graph synapses
+ int max_valid_nodes; //!< \deprecated Do not use
+ int order_id; //!< ID to use for next created ordering
+ int action_id; //!< ID to use for next created action
+ xmlNode *graph; //!< Transition graph
+ GHashTable *template_rsc_sets; //!< Mappings of template ID to resource ID
+
+ // @COMPAT Replace this with a fencer variable (only place it's used)
+ const char *localhost; //!< \deprecated Do not use
+
+ GHashTable *tags; //!< Configuration tags (ID -> pcmk_tag_t *)
+ int blocked_resources; //!< Number of blocked resources in cluster
+ int disabled_resources; //!< Number of disabled resources in cluster
+ GList *param_check; //!< History entries that need to be checked
+ GList *stop_needed; //!< Containers that need stop actions
+ time_t recheck_by; //!< Hint to controller when to reschedule
+ int ninstances; //!< Total number of resource instances
+ guint shutdown_lock; //!< How long to lock resources (seconds)
+ int priority_fencing_delay; //!< Priority fencing delay
+
+ // pcmk__output_t *
+ void *priv; //!< For Pacemaker use only
+
+ guint node_pending_timeout; //!< Pending join times out after this (ms)
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // PCMK__CRM_COMMON_SCHEDULER__H
diff --git a/include/crm/common/scheduler_internal.h b/include/crm/common/scheduler_internal.h
new file mode 100644
index 0000000..1f1da9f
--- /dev/null
+++ b/include/crm/common/scheduler_internal.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2004-2023 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU Lesser General Public License
+ * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
+ */
+
+#ifndef PCMK__CRM_COMMON_SCHEDULER_INTERNAL__H
+# define PCMK__CRM_COMMON_SCHEDULER_INTERNAL__H
+
+#include <crm/common/action_relation_internal.h>
+#include <crm/common/clone_internal.h>
+#include <crm/common/digests_internal.h>
+#include <crm/common/failcounts_internal.h>
+#include <crm/common/group_internal.h>
+#include <crm/common/roles_internal.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Some warnings are too noisy when logged every time a give function is called
+ * (for example, using a deprecated feature). As an alternative, we allow
+ * warnings to be logged once per scheduler sequence (transition). Each of those
+ * warnings needs a flag defined here.
+ */
+enum pcmk__sched_warnings {
+ pcmk__wo_blind = (1 << 0),
+ pcmk__wo_restart_type = (1 << 1),
+ pcmk__wo_role_after = (1 << 2),
+ pcmk__wo_poweroff = (1 << 3),
+ pcmk__wo_require_all = (1 << 4),
+ pcmk__wo_order_score = (1 << 5),
+ pcmk__wo_neg_threshold = (1 << 6),
+ pcmk__wo_remove_after = (1 << 7),
+ pcmk__wo_ping_node = (1 << 8),
+ pcmk__wo_order_inst = (1 << 9),
+ pcmk__wo_coloc_inst = (1 << 10),
+ pcmk__wo_group_order = (1 << 11),
+ pcmk__wo_group_coloc = (1 << 12),
+ pcmk__wo_upstart = (1 << 13),
+ pcmk__wo_nagios = (1 << 14),
+ pcmk__wo_set_ordering = (1 << 15),
+};
+
+enum pcmk__check_parameters {
+ /* Clear fail count if parameters changed for un-expired start or monitor
+ * last_failure.
+ */
+ pcmk__check_last_failure,
+
+ /* Clear fail count if parameters changed for start, monitor, promote, or
+ * migrate_from actions for active resources.
+ */
+ pcmk__check_active,
+};
+
+// Group of enum pcmk__sched_warnings flags for warnings we want to log once
+extern uint32_t pcmk__warnings;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // PCMK__CRM_COMMON_SCHEDULER_INTERNAL__H
diff --git a/include/crm/common/scheduler_types.h b/include/crm/common/scheduler_types.h
new file mode 100644
index 0000000..5c4a193
--- /dev/null
+++ b/include/crm/common/scheduler_types.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2023 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU Lesser General Public License
+ * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
+ */
+
+#ifndef PCMK__CRM_COMMON_SCHEDULER_TYPES__H
+# define PCMK__CRM_COMMON_SCHEDULER_TYPES__H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*!
+ * \file
+ * \brief Type aliases needed to define scheduler objects
+ * \ingroup core
+ */
+
+//! Node object (including information that may vary depending on resource)
+typedef struct pe_node_s pcmk_node_t;
+
+//! Resource object
+typedef struct pe_resource_s pcmk_resource_t;
+
+//! Action object
+typedef struct pe_action_s pcmk_action_t;
+
+//! Scheduler object
+typedef struct pe_working_set_s pcmk_scheduler_t;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // PCMK__CRM_COMMON_SCHEDULER_TYPES__H
diff --git a/include/crm/common/tags.h b/include/crm/common/tags.h
new file mode 100644
index 0000000..3f4861d
--- /dev/null
+++ b/include/crm/common/tags.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2004-2023 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU Lesser General Public License
+ * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
+ */
+
+#ifndef PCMK__CRM_COMMON_TAGS__H
+# define PCMK__CRM_COMMON_TAGS__H
+
+#include <glib.h> // GList
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*!
+ * \file
+ * \brief Scheduler API for configuration tags
+ * \ingroup core
+ */
+
+//! Configuration tag object
+typedef struct pe_tag_s {
+ char *id; //!< XML ID of tag
+ GList *refs; //!< XML IDs of objects that reference the tag
+} pcmk_tag_t;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // PCMK__CRM_COMMON_TAGS__H
diff --git a/include/crm/common/tickets.h b/include/crm/common/tickets.h
new file mode 100644
index 0000000..40079e9
--- /dev/null
+++ b/include/crm/common/tickets.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2004-2023 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU Lesser General Public License
+ * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
+ */
+
+#ifndef PCMK__CRM_COMMON_TICKETS__H
+# define PCMK__CRM_COMMON_TICKETS__H
+
+#include <sys/types.h> // time_t
+#include <glib.h> // gboolean, GHashTable
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*!
+ * \file
+ * \brief Scheduler API for tickets
+ * \ingroup core
+ */
+
+//! Ticket constraint object
+typedef struct pe_ticket_s {
+ char *id; //!< XML ID of ticket constraint or state
+ gboolean granted; //!< Whether cluster has been granted the ticket
+ time_t last_granted; //!< When cluster was last granted the ticket
+ gboolean standby; //!< Whether ticket is temporarily suspended
+ GHashTable *state; //!< XML attributes from ticket state
+} pcmk_ticket_t;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // PCMK__CRM_COMMON_TICKETS__H
diff --git a/include/crm/common/unittest_internal.h b/include/crm/common/unittest_internal.h
index b8f78cf..1fc8501 100644
--- a/include/crm/common/unittest_internal.h
+++ b/include/crm/common/unittest_internal.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2022 the Pacemaker project contributors
+ * Copyright 2022-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -65,6 +65,44 @@
} \
} while (0);
+/*!
+ * \internal
+ * \brief Assert that a statement exits with the expected exit status.
+ *
+ * \param[in] stmt Statement to execute; can be an expression.
+ * \param[in] rc The expected exit status.
+ *
+ * This functions just like \c pcmk__assert_asserts, except that it tests for
+ * an expected exit status. Abnormal termination or incorrect exit status is
+ * treated as a failure of the test.
+ *
+ * In the event that stmt does not exit at all, the special code \c CRM_EX_NONE
+ * will be returned. It is expected that this code is not used anywhere, thus
+ * always causing an error.
+ */
+#define pcmk__assert_exits(rc, stmt) \
+ do { \
+ pid_t p = fork(); \
+ if (p == 0) { \
+ struct rlimit cores = { 0, 0 }; \
+ setrlimit(RLIMIT_CORE, &cores); \
+ stmt; \
+ _exit(CRM_EX_NONE); \
+ } else if (p > 0) { \
+ int wstatus = 0; \
+ if (waitpid(p, &wstatus, 0) == -1) { \
+ fail_msg("waitpid failed"); \
+ } \
+ if (!WIFEXITED(wstatus)) { \
+ fail_msg("statement terminated abnormally"); \
+ } else if (WEXITSTATUS(wstatus) != rc) { \
+ fail_msg("statement exited with %d, not expected %d", WEXITSTATUS(wstatus), rc); \
+ } \
+ } else { \
+ fail_msg("unable to fork for assert test"); \
+ } \
+ } while (0);
+
/* Generate the main function of most unit test files. Typically, group_setup
* and group_teardown will be NULL. The rest of the arguments are a list of
* calls to cmocka_unit_test or cmocka_unit_test_setup_teardown to run the
diff --git a/include/crm/common/util.h b/include/crm/common/util.h
index 8acdff9..c75a55e 100644
--- a/include/crm/common/util.h
+++ b/include/crm/common/util.h
@@ -18,10 +18,8 @@
# include <signal.h>
# include <glib.h>
-# include <libxml/tree.h>
-
-# include <crm/lrmd.h>
# include <crm/common/acl.h>
+# include <crm/common/actions.h>
# include <crm/common/agents.h>
# include <crm/common/results.h>
@@ -59,26 +57,6 @@ char *crm_strdup_printf(char const *format, ...) G_GNUC_PRINTF(1, 2);
guint crm_parse_interval_spec(const char *input);
-/* public operation functions (from operations.c) */
-gboolean parse_op_key(const char *key, char **rsc_id, char **op_type,
- guint *interval_ms);
-gboolean decode_transition_key(const char *key, char **uuid, int *transition_id,
- int *action_id, int *target_rc);
-gboolean decode_transition_magic(const char *magic, char **uuid,
- int *transition_id, int *action_id,
- int *op_status, int *op_rc, int *target_rc);
-int rsc_op_expected_rc(const lrmd_event_data_t *event);
-gboolean did_rsc_op_fail(lrmd_event_data_t *event, int target_rc);
-bool crm_op_needs_metadata(const char *rsc_class, const char *op);
-xmlNode *crm_create_op_xml(xmlNode *parent, const char *prefix,
- const char *task, const char *interval_spec,
- const char *timeout);
-#define CRM_DEFAULT_OP_TIMEOUT_S "20s"
-
-bool pcmk_is_probe(const char *task, guint interval);
-bool pcmk_xe_is_probe(const xmlNode *xml_op);
-bool pcmk_xe_mask_probe_failure(const xmlNode *xml_op);
-
int compare_version(const char *version1, const char *version2);
/* coverity[+kill] */
diff --git a/include/crm/common/util_compat.h b/include/crm/common/util_compat.h
index 9e02e12..7a60208 100644
--- a/include/crm/common/util_compat.h
+++ b/include/crm/common/util_compat.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2004-2021 the Pacemaker project contributors
+ * Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -11,6 +11,7 @@
# define PCMK__CRM_COMMON_UTIL_COMPAT__H
# include <glib.h>
+# include <libxml/tree.h>
# include <crm/common/util.h>
#ifdef __cplusplus
@@ -29,6 +30,9 @@ extern "C" {
//! \deprecated Use crm_parse_interval_spec() instead
#define crm_get_interval crm_parse_interval_spec
+//! \deprecated Do not use
+#define CRM_DEFAULT_OP_TIMEOUT_S "20s"
+
//! \deprecated Use !pcmk_is_set() or !pcmk_all_flags_set() instead
static inline gboolean
is_not_set(long long word, long long bit)
@@ -69,6 +73,9 @@ int pcmk_scan_nvpair(const char *input, char **name, char **value);
char *pcmk_format_nvpair(const char *name, const char *value,
const char *units);
+//! \deprecated Use \c crm_xml_add() or \c xml_remove_prop() instead
+const char *crm_xml_replace(xmlNode *node, const char *name, const char *value);
+
//! \deprecated Use a standard printf()-style function instead
char *pcmk_format_named_time(const char *name, time_t epoch_time);
diff --git a/include/crm/common/xml.h b/include/crm/common/xml.h
index 682b31c..ac839d3 100644
--- a/include/crm/common/xml.h
+++ b/include/crm/common/xml.h
@@ -52,8 +52,6 @@ typedef const xmlChar *pcmkXmlStr;
gboolean add_message_xml(xmlNode * msg, const char *field, xmlNode * xml);
xmlNode *get_message_xml(const xmlNode *msg, const char *field);
-xmlDoc *getDocPtr(xmlNode * node);
-
/*
* \brief xmlCopyPropList ACLs-sensitive replacement expading i++ notation
*
@@ -132,12 +130,13 @@ xmlNode *stdin2xml(void);
xmlNode *string2xml(const char *input);
-int write_xml_fd(xmlNode * xml_node, const char *filename, int fd, gboolean compress);
-int write_xml_file(xmlNode * xml_node, const char *filename, gboolean compress);
+int write_xml_fd(const xmlNode *xml, const char *filename, int fd,
+ gboolean compress);
+int write_xml_file(const xmlNode *xml, const char *filename, gboolean compress);
-char *dump_xml_formatted(xmlNode * msg);
-char *dump_xml_formatted_with_text(xmlNode * msg);
-char *dump_xml_unformatted(xmlNode * msg);
+char *dump_xml_formatted(const xmlNode *xml);
+char *dump_xml_formatted_with_text(const xmlNode *xml);
+char *dump_xml_unformatted(const xmlNode *xml);
/*
* Diff related Functions
@@ -170,25 +169,17 @@ xmlNode *get_xpath_object(const char *xpath, xmlNode * xml_obj, int error_level)
xmlNode *get_xpath_object_relative(const char *xpath, xmlNode * xml_obj, int error_level);
static inline const char *
-crm_element_name(const xmlNode *xml)
-{
- return xml? (const char *)(xml->name) : NULL;
-}
-
-static inline const char *
crm_map_element_name(const xmlNode *xml)
{
- const char *name = crm_element_name(xml);
-
- if (strcmp(name, "master") == 0) {
+ if (xml == NULL) {
+ return NULL;
+ } else if (strcmp((const char *) xml->name, "master") == 0) {
return "clone";
} else {
- return name;
+ return (const char *) xml->name;
}
}
-gboolean xml_has_children(const xmlNode * root);
-
char *calculate_on_disk_digest(xmlNode * local_cib);
char *calculate_operation_digest(xmlNode * local_cib, const char *version);
char *calculate_xml_versioned_digest(xmlNode * input, gboolean sort, gboolean do_filter,
@@ -196,7 +187,7 @@ char *calculate_xml_versioned_digest(xmlNode * input, gboolean sort, gboolean do
/* schema-related functions (from schemas.c) */
gboolean validate_xml(xmlNode * xml_blob, const char *validation, gboolean to_logs);
-gboolean validate_xml_verbose(xmlNode * xml_blob);
+gboolean validate_xml_verbose(const xmlNode *xml_blob);
/*!
* \brief Update CIB XML to most recent schema version
@@ -258,7 +249,7 @@ xmlNode *first_named_child(const xmlNode *parent, const char *name);
xmlNode *crm_next_same_xml(const xmlNode *sibling);
xmlNode *sorted_xml(xmlNode * input, xmlNode * parent, gboolean recursive);
-xmlXPathObjectPtr xpath_search(xmlNode * xml_top, const char *path);
+xmlXPathObjectPtr xpath_search(const xmlNode *xml_top, const char *path);
void crm_foreach_xpath_result(xmlNode *xml, const char *xpath,
void (*helper)(xmlNode*, void*), void *user_data);
xmlNode *expand_idref(xmlNode * input, xmlNode * top);
@@ -289,7 +280,8 @@ int xml_apply_patchset(xmlNode *xml, xmlNode *patchset, bool check_version);
void patchset_process_digest(xmlNode *patch, xmlNode *source, xmlNode *target, bool with_digest);
-void save_xml_to_file(xmlNode * xml, const char *desc, const char *filename);
+void save_xml_to_file(const xmlNode *xml, const char *desc,
+ const char *filename);
char * crm_xml_escape(const char *text);
void crm_xml_sanitize_id(char *id);
diff --git a/include/crm/common/xml_compat.h b/include/crm/common/xml_compat.h
index bb49b68..85e39ff 100644
--- a/include/crm/common/xml_compat.h
+++ b/include/crm/common/xml_compat.h
@@ -31,6 +31,9 @@ extern "C" {
#define XML_PARANOIA_CHECKS 0
//! \deprecated This function will be removed in a future release
+xmlDoc *getDocPtr(xmlNode *node);
+
+//! \deprecated This function will be removed in a future release
int add_node_nocopy(xmlNode * parent, const char *name, xmlNode * child);
//! \deprecated This function will be removed in a future release
@@ -51,13 +54,23 @@ gboolean apply_xml_diff(xmlNode *old_xml, xmlNode *diff, xmlNode **new_xml);
//! \deprecated Do not use (will be removed in a future release)
void crm_destroy_xml(gpointer data);
-//! \deprecated Use crm_xml_add() with "true" or "false" instead
+//! \deprecated Check children member directly
+gboolean xml_has_children(const xmlNode *root);
+
+//! \deprecated Use crm_xml_add() with "true" or "false" instead
static inline const char *
crm_xml_add_boolean(xmlNode *node, const char *name, gboolean value)
{
return crm_xml_add(node, name, (value? "true" : "false"));
}
+//! \deprecated Use name member directly
+static inline const char *
+crm_element_name(const xmlNode *xml)
+{
+ return (xml == NULL)? NULL : (const char *) xml->name;
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/include/crm/common/xml_internal.h b/include/crm/common/xml_internal.h
index 43b3b8c..ddb4384 100644
--- a/include/crm/common/xml_internal.h
+++ b/include/crm/common/xml_internal.h
@@ -21,6 +21,7 @@
# include <crm/crm.h> /* transitively imports qblog.h */
# include <crm/common/output_internal.h>
+# include <libxml/relaxng.h>
/*!
* \brief Base for directing lib{xml2,xslt} log into standard libqb backend
@@ -135,9 +136,6 @@ enum pcmk__xml_fmt_options {
//! Include indentation and newlines
pcmk__xml_fmt_pretty = (1 << 1),
- //! Include full XML subtree (with any text), using libxml serialization
- pcmk__xml_fmt_full = (1 << 2),
-
//! Include the opening tag of an XML element, and include XML comments
pcmk__xml_fmt_open = (1 << 3),
@@ -147,7 +145,6 @@ enum pcmk__xml_fmt_options {
//! Include the closing tag of an XML element
pcmk__xml_fmt_close = (1 << 5),
- // @COMPAT Remove when log_data_element() is removed
//! Include XML text nodes
pcmk__xml_fmt_text = (1 << 6),
@@ -190,6 +187,16 @@ int pcmk__xml_show_changes(pcmk__output_t *out, const xmlNode *xml);
#define PCMK__XP_REMOTE_NODE_STATUS \
"//" XML_TAG_CIB "//" XML_CIB_TAG_STATUS "//" XML_CIB_TAG_STATE \
"[@" XML_NODE_IS_REMOTE "='true']"
+/*!
+ * \internal
+ * \brief Serialize XML (using libxml) into provided descriptor
+ *
+ * \param[in] fd File descriptor to (piece-wise) write to
+ * \param[in] cur XML subtree to proceed
+ *
+ * \return a standard Pacemaker return code
+ */
+int pcmk__xml2fd(int fd, xmlNode *cur);
enum pcmk__xml_artefact_ns {
pcmk__xml_artefact_ns_legacy_rng = 1,
@@ -235,6 +242,22 @@ char *pcmk__xml_artefact_path(enum pcmk__xml_artefact_ns ns,
/*!
* \internal
+ * \brief Check whether an XML element is of a particular type
+ *
+ * \param[in] xml XML element to compare
+ * \param[in] name XML element name to compare
+ *
+ * \return \c true if \p xml is of type \p name, otherwise \c false
+ */
+static inline bool
+pcmk__xe_is(const xmlNode *xml, const char *name)
+{
+ return (xml != NULL) && (xml->name != NULL) && (name != NULL)
+ && (strcmp((const char *) xml->name, name) == 0);
+}
+
+/*!
+ * \internal
* \brief Return first non-text child node of an XML node
*
* \param[in] parent XML node to check
@@ -411,4 +434,15 @@ pcmk__xe_foreach_child(xmlNode *xml, const char *child_element_name,
int (*handler)(xmlNode *xml, void *userdata),
void *userdata);
+static inline const char *
+pcmk__xml_attr_value(const xmlAttr *attr)
+{
+ return ((attr == NULL) || (attr->children == NULL))? NULL
+ : (const char *) attr->children->content;
+}
+
+gboolean pcmk__validate_xml(xmlNode *xml_blob, const char *validation,
+ xmlRelaxNGValidityErrorFunc error_handler,
+ void *error_handler_context);
+
#endif // PCMK__XML_INTERNAL__H
diff --git a/include/crm/compatibility.h b/include/crm/compatibility.h
index 1281a3c..f8502cc 100644
--- a/include/crm/compatibility.h
+++ b/include/crm/compatibility.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2004-2021 the Pacemaker project contributors
+ * Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -196,41 +196,41 @@ extern "C" {
/* Clone terminology definitions */
// These can no longer be used in a switch together
-#define pe_master pe_clone
+#define pe_master pcmk_rsc_variant_clone
static inline enum pe_obj_types
get_resource_type(const char *name)
{
if (safe_str_eq(name, XML_CIB_TAG_RESOURCE)) {
- return pe_native;
+ return pcmk_rsc_variant_primitive;
} else if (safe_str_eq(name, XML_CIB_TAG_GROUP)) {
- return pe_group;
+ return pcmk_rsc_variant_group;
} else if (safe_str_eq(name, XML_CIB_TAG_INCARNATION)
|| safe_str_eq(name, PCMK_XE_PROMOTABLE_LEGACY)) {
- return pe_clone;
+ return pcmk_rsc_variant_clone;
} else if (safe_str_eq(name, XML_CIB_TAG_CONTAINER)) {
- return pe_container;
+ return pcmk_rsc_variant_bundle;
}
- return pe_unknown;
+ return pcmk_rsc_variant_unknown;
}
static inline const char *
get_resource_typename(enum pe_obj_types type)
{
switch (type) {
- case pe_native:
+ case pcmk_rsc_variant_primitive:
return XML_CIB_TAG_RESOURCE;
- case pe_group:
+ case pcmk_rsc_variant_group:
return XML_CIB_TAG_GROUP;
- case pe_clone:
+ case pcmk_rsc_variant_clone:
return XML_CIB_TAG_INCARNATION;
- case pe_container:
+ case pcmk_rsc_variant_bundle:
return XML_CIB_TAG_CONTAINER;
- case pe_unknown:
+ case pcmk_rsc_variant_unknown:
return "unknown";
}
return "<unknown>";
diff --git a/include/crm/crm.h b/include/crm/crm.h
index e824825..aecfcc8 100644
--- a/include/crm/crm.h
+++ b/include/crm/crm.h
@@ -65,8 +65,9 @@ extern "C" {
* XML v2 patchsets are created by default
* >=3.0.13: Fail counts include operation name and interval
* >=3.2.0: DC supports PCMK_EXEC_INVALID and PCMK_EXEC_NOT_CONNECTED
+ * >=3.19.0: DC supports PCMK__CIB_REQUEST_COMMIT_TRANSACT
*/
-# define CRM_FEATURE_SET "3.17.4"
+# define CRM_FEATURE_SET "3.19.0"
/* Pacemaker's CPG protocols use fixed-width binary fields for the sender and
* recipient of a CPG message. This imposes an arbitrary limit on cluster node
@@ -79,8 +80,6 @@ extern "C" {
extern char *crm_system_name;
-/* *INDENT-OFF* */
-
// How we represent "infinite" scores
# define CRM_SCORE_INFINITY 1000000
# define CRM_INFINITY_S "INFINITY"
@@ -110,6 +109,7 @@ extern char *crm_system_name;
# define CRM_SYSTEM_MCP "pacemakerd"
// Names of internally generated node attributes
+// @TODO Replace these with PCMK_NODE_ATTR_*
# define CRM_ATTR_UNAME "#uname"
# define CRM_ATTR_ID "#id"
# define CRM_ATTR_KIND "#kind"
@@ -140,22 +140,19 @@ extern char *crm_system_name;
# define CRM_OP_QUIT "quit"
# define CRM_OP_LOCAL_SHUTDOWN "start_shutdown"
# define CRM_OP_SHUTDOWN_REQ "req_shutdown"
-# define CRM_OP_SHUTDOWN "do_shutdown"
-# define CRM_OP_FENCE "stonith"
+# define CRM_OP_SHUTDOWN PCMK_ACTION_DO_SHUTDOWN
# define CRM_OP_REGISTER "register"
# define CRM_OP_IPC_FWD "ipc_fwd"
# define CRM_OP_INVOKE_LRM "lrm_invoke"
# define CRM_OP_LRM_REFRESH "lrm_refresh" //!< Deprecated since 1.1.10
-# define CRM_OP_LRM_DELETE "lrm_delete"
+# define CRM_OP_LRM_DELETE PCMK_ACTION_LRM_DELETE
# define CRM_OP_LRM_FAIL "lrm_fail"
# define CRM_OP_PROBED "probe_complete"
# define CRM_OP_REPROBE "probe_again"
-# define CRM_OP_CLEAR_FAILCOUNT "clear_failcount"
+# define CRM_OP_CLEAR_FAILCOUNT PCMK_ACTION_CLEAR_FAILCOUNT
# define CRM_OP_REMOTE_STATE "remote_state"
-# define CRM_OP_RELAXED_SET "one-or-more"
-# define CRM_OP_RELAXED_CLONE "clone-one-or-more"
# define CRM_OP_RM_NODE_CACHE "rm_node_cache"
-# define CRM_OP_MAINTENANCE_NODES "maintenance_nodes"
+# define CRM_OP_MAINTENANCE_NODES PCMK_ACTION_MAINTENANCE_NODES
/* Possible cluster membership states */
# define CRMD_JOINSTATE_DOWN "down"
@@ -163,70 +160,11 @@ extern char *crm_system_name;
# define CRMD_JOINSTATE_MEMBER "member"
# define CRMD_JOINSTATE_NACK "banned"
-# define CRMD_ACTION_DELETE "delete"
-# define CRMD_ACTION_CANCEL "cancel"
-
-# define CRMD_ACTION_RELOAD "reload"
-# define CRMD_ACTION_RELOAD_AGENT "reload-agent"
-# define CRMD_ACTION_MIGRATE "migrate_to"
-# define CRMD_ACTION_MIGRATED "migrate_from"
-
-# define CRMD_ACTION_START "start"
-# define CRMD_ACTION_STARTED "running"
-
-# define CRMD_ACTION_STOP "stop"
-# define CRMD_ACTION_STOPPED "stopped"
-
-# define CRMD_ACTION_PROMOTE "promote"
-# define CRMD_ACTION_PROMOTED "promoted"
-# define CRMD_ACTION_DEMOTE "demote"
-# define CRMD_ACTION_DEMOTED "demoted"
-
-# define CRMD_ACTION_NOTIFY "notify"
-# define CRMD_ACTION_NOTIFIED "notified"
-
-# define CRMD_ACTION_STATUS "monitor"
-# define CRMD_ACTION_METADATA "meta-data"
-# define CRMD_METADATA_CALL_TIMEOUT 30000
-
-/* short names */
-# define RSC_DELETE CRMD_ACTION_DELETE
-# define RSC_CANCEL CRMD_ACTION_CANCEL
-
-# define RSC_MIGRATE CRMD_ACTION_MIGRATE
-# define RSC_MIGRATED CRMD_ACTION_MIGRATED
-
-# define RSC_START CRMD_ACTION_START
-# define RSC_STARTED CRMD_ACTION_STARTED
-
-# define RSC_STOP CRMD_ACTION_STOP
-# define RSC_STOPPED CRMD_ACTION_STOPPED
-
-# define RSC_PROMOTE CRMD_ACTION_PROMOTE
-# define RSC_PROMOTED CRMD_ACTION_PROMOTED
-# define RSC_DEMOTE CRMD_ACTION_DEMOTE
-# define RSC_DEMOTED CRMD_ACTION_DEMOTED
-
-# define RSC_NOTIFY CRMD_ACTION_NOTIFY
-# define RSC_NOTIFIED CRMD_ACTION_NOTIFIED
-
-# define RSC_STATUS CRMD_ACTION_STATUS
-# define RSC_METADATA CRMD_ACTION_METADATA
-/* *INDENT-ON* */
-
+# include <crm/common/actions.h>
# include <crm/common/cib.h>
# include <crm/common/logging.h>
# include <crm/common/util.h>
-static inline const char *
-crm_action_str(const char *task, guint interval_ms) {
- if ((task != NULL) && (interval_ms == 0)
- && (strcasecmp(task, RSC_STATUS) == 0)) {
- return "probe";
- }
- return task;
-}
-
#if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1)
#include <crm/crm_compat.h>
#endif
diff --git a/include/crm/crm_compat.h b/include/crm/crm_compat.h
index 2c0a3dd..bfe1098 100644
--- a/include/crm/crm_compat.h
+++ b/include/crm/crm_compat.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2004-2022 the Pacemaker project contributors
+ * Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -10,8 +10,11 @@
#ifndef PCMK__CRM_CRM_COMPAT__H
# define PCMK__CRM_CRM_COMPAT__H
+#include <strings.h>
#include <glib.h>
+#include <crm/common/actions.h>
+
#ifdef __cplusplus
extern "C" {
#endif
@@ -31,12 +34,126 @@ extern "C" {
//! \deprecated This defined constant will be removed in a future release
#define MAX_IPC_DELAY 120
+//! \deprecated Use PCMK_ACTION_STONITH instead
+#define CRM_OP_FENCE PCMK_ACTION_STONITH
+
//! \deprecated This defined constant will be removed in a future release
#define CRM_OP_LRM_QUERY "lrm_query"
+//! \deprecated Use PCMK_ACTION_CLONE_ONE_OR_MORE instead
+#define CRM_OP_RELAXED_CLONE PCMK_ACTION_CLONE_ONE_OR_MORE
+
+//! \deprecated Use PCMK_ACTION_ONE_OR_MORE instead
+#define CRM_OP_RELAXED_SET PCMK_ACTION_ONE_OR_MORE
+
//! \deprecated This defined constant will be removed in a future release
#define CRM_ATTR_RA_VERSION "#ra-version"
+//! \deprecated Use PCMK_ACTION_CANCEL instead
+#define CRMD_ACTION_CANCEL PCMK_ACTION_CANCEL
+
+//! \deprecated Use PCMK_ACTION_DELETE instead
+#define CRMD_ACTION_DELETE PCMK_ACTION_DELETE
+
+//! \deprecated Use PCMK_ACTION_DEMOTE instead
+#define CRMD_ACTION_DEMOTE PCMK_ACTION_DEMOTE
+
+//! \deprecated Use PCMK_ACTION_DEMOTED instead
+#define CRMD_ACTION_DEMOTED PCMK_ACTION_DEMOTED
+
+//! \deprecated Use PCMK_ACTION_META_DATA instead
+#define CRMD_ACTION_METADATA PCMK_ACTION_META_DATA
+
+//! \deprecated Use PCMK_ACTION_MIGRATE_TO instead
+#define CRMD_ACTION_MIGRATE PCMK_ACTION_MIGRATE_TO
+
+//! \deprecated Use PCMK_ACTION_MIGRATE_FROM instead
+#define CRMD_ACTION_MIGRATED PCMK_ACTION_MIGRATE_FROM
+
+//! \deprecated Use PCMK_ACTION_NOTIFIED instead
+#define CRMD_ACTION_NOTIFIED PCMK_ACTION_NOTIFIED
+
+//! \deprecated Use PCMK_ACTION_NOTIFY instead
+#define CRMD_ACTION_NOTIFY PCMK_ACTION_NOTIFY
+
+//! \deprecated Use PCMK_ACTION_PROMOTE instead
+#define CRMD_ACTION_PROMOTE PCMK_ACTION_PROMOTE
+
+//! \deprecated Use PCMK_ACTION_PROMOTED instead
+#define CRMD_ACTION_PROMOTED PCMK_ACTION_PROMOTED
+
+//! \deprecated Use PCMK_ACTION_RELOAD instead
+#define CRMD_ACTION_RELOAD PCMK_ACTION_RELOAD
+
+//! \deprecated Use PCMK_ACTION_RELOAD_AGENT instead
+#define CRMD_ACTION_RELOAD_AGENT PCMK_ACTION_RELOAD_AGENT
+
+//! \deprecated Use PCMK_ACTION_START instead
+#define CRMD_ACTION_START PCMK_ACTION_START
+
+//! \deprecated Use PCMK_ACTION_RUNNING instead
+#define CRMD_ACTION_STARTED PCMK_ACTION_RUNNING
+
+//! \deprecated Use PCMK_ACTION_MONITOR instead
+#define CRMD_ACTION_STATUS PCMK_ACTION_MONITOR
+
+//! \deprecated Use PCMK_ACTION_STOP instead
+#define CRMD_ACTION_STOP PCMK_ACTION_STOP
+
+//! \deprecated Use PCMK_ACTION_STOPPED instead
+#define CRMD_ACTION_STOPPED PCMK_ACTION_STOPPED
+
+//! \deprecated Do not use
+#define CRMD_METADATA_CALL_TIMEOUT PCMK_DEFAULT_METADATA_TIMEOUT_MS
+
+//! \deprecated Use PCMK_ACTION_CANCEL instead
+#define RSC_CANCEL PCMK_ACTION_CANCEL
+
+//! \deprecated Use PCMK_ACTION_DELETE instead
+#define RSC_DELETE PCMK_ACTION_DELETE
+
+//! \deprecated Use PCMK_ACTION_DEMOTE instead
+#define RSC_DEMOTE PCMK_ACTION_DEMOTE
+
+//! \deprecated Use PCMK_ACTION_DEMOTED instead
+#define RSC_DEMOTED PCMK_ACTION_DEMOTED
+
+//! \deprecated Use PCMK_ACTION_META_DATA instead
+#define RSC_METADATA PCMK_ACTION_META_DATA
+
+//! \deprecated Use PCMK_ACTION_MIGRATE_TO instead
+#define RSC_MIGRATE PCMK_ACTION_MIGRATE_TO
+
+//! \deprecated Use PCMK_ACTION_MIGRATE_FROM instead
+#define RSC_MIGRATED PCMK_ACTION_MIGRATE_FROM
+
+//! \deprecated Use PCMK_ACTION_NOTIFIED instead
+#define RSC_NOTIFIED PCMK_ACTION_NOTIFIED
+
+//! \deprecated Use PCMK_ACTION_NOTIFY instead
+#define RSC_NOTIFY PCMK_ACTION_NOTIFY
+
+//! \deprecated Use PCMK_ACTION_PROMOTE instead
+#define RSC_PROMOTE PCMK_ACTION_PROMOTE
+
+//! \deprecated Use PCMK_ACTION_PROMOTED instead
+#define RSC_PROMOTED PCMK_ACTION_PROMOTED
+
+//! \deprecated Use PCMK_ACTION_START instead
+#define RSC_START PCMK_ACTION_START
+
+//! \deprecated Use PCMK_ACTION_RUNNING instead
+#define RSC_STARTED PCMK_ACTION_RUNNING
+
+//! \deprecated Use PCMK_ACTION_MONITOR instead
+#define RSC_STATUS PCMK_ACTION_MONITOR
+
+//! \deprecated Use PCMK_ACTION_STOP instead
+#define RSC_STOP PCMK_ACTION_STOP
+
+//! \deprecated Use PCMK_ACTION_STOPPED instead
+#define RSC_STOPPED PCMK_ACTION_STOPPED
+
//!@{
//! \deprecated This macro will be removed in a future release
@@ -54,6 +171,16 @@ extern "C" {
//! \deprecated Use GList * instead
typedef GList *GListPtr;
+//! \deprecated Do not use
+static inline const char *
+crm_action_str(const char *task, guint interval_ms) {
+ if ((task != NULL) && (interval_ms == 0)
+ && (strcasecmp(task, PCMK_ACTION_MONITOR) == 0)) {
+ return "probe";
+ }
+ return task;
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/include/crm/lrmd.h b/include/crm/lrmd.h
index dfc2f25..0c5a40b 100644
--- a/include/crm/lrmd.h
+++ b/include/crm/lrmd.h
@@ -13,6 +13,7 @@
#include <stdbool.h> // bool
#include <glib.h> // guint, GList
#include <crm_config.h>
+#include <crm/lrmd_events.h>
#include <crm/services.h>
#ifdef __cplusplus
@@ -203,74 +204,6 @@ enum lrmd_call_options {
lrmd_opt_notify_changes_only = (1 << 2),
};
-enum lrmd_callback_event {
- lrmd_event_register,
- lrmd_event_unregister,
- lrmd_event_exec_complete,
- lrmd_event_disconnect,
- lrmd_event_connect,
- lrmd_event_poke,
- lrmd_event_new_client,
-};
-
-typedef struct lrmd_event_data_s {
- /*! Type of event, register, unregister, call_completed... */
- enum lrmd_callback_event type;
-
- /*! The resource this event occurred on. */
- const char *rsc_id;
- /*! The action performed, start, stop, monitor... */
- const char *op_type;
- /*! The user data passed by caller of exec() API function */
- const char *user_data;
-
- /*! The client api call id associated with this event */
- int call_id;
- /*! The operation's timeout period in ms. */
- int timeout;
- /*! The operation's recurring interval in ms. */
- guint interval_ms;
- /*! The operation's start delay value in ms. */
- int start_delay;
- /*! This operation that just completed is on a deleted rsc. */
- int rsc_deleted;
-
- /*! The executed ra return code mapped to OCF */
- enum ocf_exitcode rc;
- /*! The executor status returned for exec_complete events */
- int op_status;
- /*! stdout from resource agent operation */
- const char *output;
- /*! Timestamp of when op ran */
- unsigned int t_run;
- /*! Timestamp of last rc change */
- unsigned int t_rcchange;
- /*! Time in length op took to execute */
- unsigned int exec_time;
- /*! Time in length spent in queue */
- unsigned int queue_time;
-
- /*! int connection result. Used for connection and poke events */
- int connection_rc;
-
- /* This is a GHashTable containing the
- * parameters given to the operation */
- void *params;
-
- /*! client node name associated with this connection
- * (used to match actions to the proper client when there are multiple)
- */
- const char *remote_nodename;
-
- /*! exit failure reason string from resource agent operation */
- const char *exit_reason;
-} lrmd_event_data_t;
-
-lrmd_event_data_t *lrmd_new_event(const char *rsc_id, const char *task,
- guint interval_ms);
-lrmd_event_data_t *lrmd_copy_event(lrmd_event_data_t * event);
-void lrmd_free_event(lrmd_event_data_t * event);
-
typedef struct lrmd_rsc_info_s {
char *id;
char *type;
diff --git a/include/crm/lrmd_events.h b/include/crm/lrmd_events.h
new file mode 100644
index 0000000..3a1c500
--- /dev/null
+++ b/include/crm/lrmd_events.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2012-2023 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU Lesser General Public License
+ * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
+ */
+
+#ifndef PCMK__CRM_LRMD_EVENTS__H
+# define PCMK__CRM_LRMD_EVENTS__H
+
+#include <glib.h> // guint
+#include <crm/common/results.h> // enum ocf_exitcode
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * \file
+ * \brief Resource agent executor events
+ * \ingroup lrmd
+ */
+
+enum lrmd_callback_event {
+ lrmd_event_register,
+ lrmd_event_unregister,
+ lrmd_event_exec_complete,
+ lrmd_event_disconnect,
+ lrmd_event_connect,
+ lrmd_event_poke,
+ lrmd_event_new_client,
+};
+
+typedef struct lrmd_event_data_s {
+ /*! Type of event, register, unregister, call_completed... */
+ enum lrmd_callback_event type;
+
+ /*! The resource this event occurred on. */
+ const char *rsc_id;
+ /*! The action performed, start, stop, monitor... */
+ const char *op_type;
+ /*! The user data passed by caller of exec() API function */
+ const char *user_data;
+
+ /*! The client api call id associated with this event */
+ int call_id;
+
+ /*! The operation's timeout period in ms. */
+ int timeout;
+
+ /*! The operation's recurring interval in ms. */
+ guint interval_ms;
+
+ /*! The operation's start delay value in ms. */
+ int start_delay;
+
+ /*! This operation that just completed is on a deleted rsc. */
+ int rsc_deleted;
+
+ /*! The executed ra return code mapped to OCF */
+ enum ocf_exitcode rc;
+
+ /*! The executor status returned for exec_complete events */
+ int op_status;
+
+ /*! stdout from resource agent operation */
+ const char *output;
+
+ /*! Timestamp of when op ran */
+ unsigned int t_run;
+
+ /*! Timestamp of last rc change */
+ unsigned int t_rcchange;
+
+ /*! Time in length op took to execute */
+ unsigned int exec_time;
+
+ /*! Time in length spent in queue */
+ unsigned int queue_time;
+
+ /*! int connection result. Used for connection and poke events */
+ int connection_rc;
+
+ /* This is a GHashTable containing the
+ * parameters given to the operation */
+ void *params;
+
+ /*! client node name associated with this connection
+ * (used to match actions to the proper client when there are multiple)
+ */
+ const char *remote_nodename;
+
+ /*! exit failure reason string from resource agent operation */
+ const char *exit_reason;
+} lrmd_event_data_t;
+
+lrmd_event_data_t *lrmd_new_event(const char *rsc_id, const char *task,
+ guint interval_ms);
+lrmd_event_data_t *lrmd_copy_event(lrmd_event_data_t *event);
+void lrmd_free_event(lrmd_event_data_t *event);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // PCMK__CRM_LRMD_EVENTS__H
diff --git a/include/crm/lrmd_internal.h b/include/crm/lrmd_internal.h
index 5810554..d1cd25d 100644
--- a/include/crm/lrmd_internal.h
+++ b/include/crm/lrmd_internal.h
@@ -47,6 +47,7 @@ void lrmd__set_result(lrmd_event_data_t *event, enum ocf_exitcode rc,
void lrmd__reset_result(lrmd_event_data_t *event);
time_t lrmd__uptime(lrmd_t *lrmd);
+const char *lrmd__node_start_state(lrmd_t *lrmd);
/* Shared functions for IPC proxy back end */
diff --git a/include/crm/msg_xml.h b/include/crm/msg_xml.h
index 2e50adb..c616182 100644
--- a/include/crm/msg_xml.h
+++ b/include/crm/msg_xml.h
@@ -48,6 +48,8 @@ extern "C" {
* XML attributes
*/
+#define PCMK_XA_FORMAT "format"
+
/* These have been deprecated as CIB <clone> element attributes (aliases for
* "promoted-max" and "promoted-node-max") since 2.0.0.
*/
@@ -59,7 +61,14 @@ extern "C" {
* Meta attributes
*/
+#define PCMK_META_CLONE_MAX "clone-max"
+#define PCMK_META_CLONE_MIN "clone-min"
+#define PCMK_META_CLONE_NODE_MAX "clone-node-max"
#define PCMK_META_ENABLED "enabled"
+#define PCMK_META_FAILURE_TIMEOUT "failure-timeout"
+#define PCMK_META_MIGRATION_THRESHOLD "migration-threshold"
+#define PCMK_META_PROMOTED_MAX "promoted-max"
+#define PCMK_META_PROMOTED_NODE_MAX "promoted-node-max"
/*
@@ -149,7 +158,6 @@ extern "C" {
# define XML_ATTR_IDREF "id-ref"
# define XML_ATTR_ID_LONG "long-id"
# define XML_ATTR_TYPE "type"
-# define XML_ATTR_VERBOSE "verbose"
# define XML_ATTR_OP "op"
# define XML_ATTR_DC_UUID "dc-uuid"
# define XML_ATTR_UPDATE_ORIG "update-origin"
@@ -183,8 +191,6 @@ extern "C" {
# define XML_PING_ATTR_PACEMAKERDSTATE_SHUTDOWNCOMPLETE "shutdown_complete"
# define XML_PING_ATTR_PACEMAKERDSTATE_REMOTE "remote"
-# define XML_TAG_FRAGMENT "cib_fragment"
-
# define XML_FAIL_TAG_CIB "failed_update"
# define XML_FAILCIB_ATTR_ID "id"
@@ -198,7 +204,6 @@ extern "C" {
# define XML_CIB_TAG_STATUS "status"
# define XML_CIB_TAG_RESOURCES "resources"
# define XML_CIB_TAG_NODES "nodes"
-# define XML_CIB_TAG_DOMAINS "domains"
# define XML_CIB_TAG_CONSTRAINTS "constraints"
# define XML_CIB_TAG_CRMCONFIG "crm_config"
# define XML_CIB_TAG_OPCONFIG "op_defaults"
@@ -239,19 +244,12 @@ extern "C" {
# define XML_RSC_ATTR_ORDERED "ordered"
# define XML_RSC_ATTR_INTERLEAVE "interleave"
# define XML_RSC_ATTR_INCARNATION "clone"
-# define XML_RSC_ATTR_INCARNATION_MAX "clone-max"
-# define XML_RSC_ATTR_INCARNATION_MIN "clone-min"
-# define XML_RSC_ATTR_INCARNATION_NODEMAX "clone-node-max"
# define XML_RSC_ATTR_PROMOTABLE "promotable"
-# define XML_RSC_ATTR_PROMOTED_MAX "promoted-max"
-# define XML_RSC_ATTR_PROMOTED_NODEMAX "promoted-node-max"
# define XML_RSC_ATTR_MANAGED "is-managed"
# define XML_RSC_ATTR_TARGET_ROLE "target-role"
# define XML_RSC_ATTR_UNIQUE "globally-unique"
# define XML_RSC_ATTR_NOTIFY "notify"
# define XML_RSC_ATTR_STICKINESS "resource-stickiness"
-# define XML_RSC_ATTR_FAIL_STICKINESS "migration-threshold"
-# define XML_RSC_ATTR_FAIL_TIMEOUT "failure-timeout"
# define XML_RSC_ATTR_MULTIPLE "multiple-active"
# define XML_RSC_ATTR_REQUIRES "requires"
# define XML_RSC_ATTR_CONTAINER "container"
@@ -285,15 +283,8 @@ extern "C" {
//! \deprecated Do not use (will be removed in a future release)
# define XML_CIB_ATTR_REPLACE "replace"
-# define XML_CIB_ATTR_SOURCE "source"
-
# define XML_CIB_ATTR_PRIORITY "priority"
-# define XML_CIB_ATTR_SOURCE "source"
-# define XML_NODE_JOIN_STATE "join"
-# define XML_NODE_EXPECTED "expected"
-# define XML_NODE_IN_CLUSTER "in_ccm"
-# define XML_NODE_IS_PEER "crmd"
# define XML_NODE_IS_REMOTE "remote_node"
# define XML_NODE_IS_FENCED "node_fenced"
# define XML_NODE_IS_MAINTENANCE "node_in_maintenance"
@@ -333,7 +324,6 @@ extern "C" {
# define XML_LRM_ATTR_EXIT_REASON "exit-reason"
# define XML_RSC_OP_LAST_CHANGE "last-rc-change"
-# define XML_RSC_OP_LAST_RUN "last-run" // deprecated since 2.0.3
# define XML_RSC_OP_T_EXEC "exec-time"
# define XML_RSC_OP_T_QUEUE "queue-time"
@@ -413,6 +403,7 @@ extern "C" {
# define XML_CONFIG_ATTR_SHUTDOWN_LOCK "shutdown-lock"
# define XML_CONFIG_ATTR_SHUTDOWN_LOCK_LIMIT "shutdown-lock-limit"
# define XML_CONFIG_ATTR_PRIORITY_FENCING_DELAY "priority-fencing-delay"
+# define XML_CONFIG_ATTR_NODE_PENDING_TIMEOUT "node-pending-timeout"
# define XML_ALERT_ATTR_PATH "path"
# define XML_ALERT_ATTR_TIMEOUT "timeout"
@@ -428,7 +419,10 @@ extern "C" {
# define XML_ATTR_TE_TARGET_RC "op_target_rc"
# define XML_TAG_TRANSIENT_NODEATTRS "transient_attributes"
+//! \deprecated Do not use (will be removed in a future release)
# define XML_TAG_DIFF_ADDED "diff-added"
+
+//! \deprecated Do not use (will be removed in a future release)
# define XML_TAG_DIFF_REMOVED "diff-removed"
# define XML_ACL_TAG_USER "acl_target"
@@ -478,7 +472,6 @@ extern "C" {
# define XML_DIFF_POSITION "position"
# define ID(x) crm_element_value(x, XML_ATTR_ID)
-# define TYPE(x) crm_element_name(x)
#ifdef __cplusplus
}
diff --git a/include/crm/msg_xml_compat.h b/include/crm/msg_xml_compat.h
index aad98e8..612eebf 100644
--- a/include/crm/msg_xml_compat.h
+++ b/include/crm/msg_xml_compat.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2004-2022 the Pacemaker project contributors
+ * Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -25,6 +25,21 @@ extern "C" {
* release.
*/
+//! \deprecated Use PCMK_META_CLONE_MAX instead
+#define XML_RSC_ATTR_INCARNATION_MAX PCMK_META_CLONE_MAX
+
+//! \deprecated Use PCMK_META_CLONE_MIN instead
+#define XML_RSC_ATTR_INCARNATION_MIN PCMK_META_CLONE_MIN
+
+//! \deprecated Use PCMK_META_CLONE_NODE_MAX instead
+#define XML_RSC_ATTR_INCARNATION_NODEMAX PCMK_META_CLONE_NODE_MAX
+
+//! \deprecated Use PCMK_META_PROMOTED_MAX instead
+#define XML_RSC_ATTR_PROMOTED_MAX PCMK_META_PROMOTED_MAX
+
+//! \deprecated Use PCMK_META_PROMOTED_NODE_MAX instead
+#define XML_RSC_ATTR_PROMOTED_NODEMAX PCMK_META_PROMOTED_NODE_MAX
+
//! \deprecated Use PCMK_STONITH_PROVIDES instead
#define XML_RSC_ATTR_PROVIDES PCMK_STONITH_PROVIDES
@@ -40,6 +55,12 @@ extern "C" {
//! \deprecated Use PCMK_XA_PROMOTED_NODE_MAX_LEGACY instead
#define PCMK_XE_PROMOTED_NODE_MAX_LEGACY PCMK_XA_PROMOTED_NODE_MAX_LEGACY
+//! \deprecated Use PCMK_META_MIGRATION_THRESHOLD instead
+#define XML_RSC_ATTR_FAIL_STICKINESS PCMK_META_MIGRATION_THRESHOLD
+
+//! \deprecated Use PCMK_META_FAILURE_TIMEOUT instead
+#define XML_RSC_ATTR_FAIL_TIMEOUT PCMK_META_FAILURE_TIMEOUT
+
//! \deprecated Use PCMK_XA_PROMOTED_NODE_MAX_LEGACY instead
#define XML_RSC_ATTR_MASTER_NODEMAX PCMK_XA_PROMOTED_NODE_MAX_LEGACY
@@ -47,6 +68,9 @@ extern "C" {
#define XML_ATTR_RA_VERSION "ra-version"
//! \deprecated Do not use (will be removed in a future release)
+#define XML_TAG_FRAGMENT "cib_fragment"
+
+//! \deprecated Do not use (will be removed in a future release)
#define XML_TAG_RSC_VER_ATTRS "rsc_versioned_attrs"
//! \deprecated Do not use (will be removed in a future release)
@@ -58,6 +82,33 @@ extern "C" {
//! \deprecated Use \p XML_ATTR_ID instead
#define XML_ATTR_UUID "id"
+//! \deprecated Do not use (will be removed in a future release)
+#define XML_ATTR_VERBOSE "verbose"
+
+//! \deprecated Do not use (will be removed in a future release)
+#define XML_CIB_TAG_DOMAINS "domains"
+
+//! \deprecated Do not use (will be removed in a future release)
+#define XML_CIB_ATTR_SOURCE "source"
+
+//! \deprecated Do not use
+#define XML_NODE_EXPECTED "expected"
+
+//! \deprecated Do not use
+#define XML_NODE_IN_CLUSTER "in_ccm"
+
+//! \deprecated Do not use
+#define XML_NODE_IS_PEER "crmd"
+
+//! \deprecated Do not use
+#define XML_NODE_JOIN_STATE "join"
+
+//! \deprecated Do not use (will be removed in a future release)
+#define XML_RSC_OP_LAST_RUN "last-run"
+
+//! \deprecated Use name member directly
+#define TYPE(x) (((x) == NULL)? NULL : (const char *) ((x)->name))
+
#ifdef __cplusplus
}
#endif
diff --git a/include/crm/pengine/Makefile.am b/include/crm/pengine/Makefile.am
index fac6031..3560d24 100644
--- a/include/crm/pengine/Makefile.am
+++ b/include/crm/pengine/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2006-2021 the Pacemaker project contributors
+# Copyright 2006-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -10,8 +10,13 @@ MAINTAINERCLEANFILES = Makefile.in
headerdir=$(pkgincludedir)/crm/pengine
-noinst_HEADERS = internal.h remote_internal.h rules_internal.h
-header_HEADERS = common.h complex.h pe_types.h rules.h status.h \
+noinst_HEADERS = internal.h \
+ $(wildcard *_internal.h)
+header_HEADERS = common.h \
+ complex.h \
+ pe_types.h \
+ rules.h \
+ status.h \
common_compat.h \
pe_types_compat.h \
rules_compat.h
diff --git a/include/crm/pengine/common.h b/include/crm/pengine/common.h
index 9fe05bd..2feac8a 100644
--- a/include/crm/pengine/common.h
+++ b/include/crm/pengine/common.h
@@ -13,6 +13,7 @@
# include <glib.h>
# include <regex.h>
# include <crm/common/iso8601.h>
+# include <crm/common/scheduler.h>
#ifdef __cplusplus
extern "C" {
@@ -21,120 +22,6 @@ extern "C" {
extern gboolean was_processing_error;
extern gboolean was_processing_warning;
-/* The order is (partially) significant here; the values from action_fail_ignore
- * through action_fail_fence are in order of increasing severity.
- *
- * @COMPAT The values should be ordered and numbered per the "TODO" comments
- * below, so all values are in order of severity and there is room for
- * future additions, but that would break API compatibility.
- * @TODO For now, we just use a function to compare the values specially, but
- * at the next compatibility break, we should arrange things properly.
- */
-enum action_fail_response {
- action_fail_ignore, // @TODO = 10
- // @TODO action_fail_demote = 20,
- action_fail_recover, // @TODO = 30
- // @TODO action_fail_reset_remote = 40,
- // @TODO action_fail_restart_container = 50,
- action_fail_migrate, // @TODO = 60
- action_fail_block, // @TODO = 70
- action_fail_stop, // @TODO = 80
- action_fail_standby, // @TODO = 90
- action_fail_fence, // @TODO = 100
-
- // @COMPAT Values below here are out of order for API compatibility
-
- action_fail_restart_container,
-
- /* This is reserved for internal use for remote node connection resources.
- * Fence the remote node if stonith is enabled, otherwise attempt to recover
- * the connection resource. This allows us to specify types of connection
- * resource failures that should result in fencing the remote node
- * (for example, recurring monitor failures).
- */
- action_fail_reset_remote,
-
- action_fail_demote,
-};
-
-/* the "done" action must be the "pre" action +1 */
-enum action_tasks {
- no_action,
- monitor_rsc,
- stop_rsc,
- stopped_rsc,
- start_rsc,
- started_rsc,
- action_notify,
- action_notified,
- action_promote,
- action_promoted,
- action_demote,
- action_demoted,
- shutdown_crm,
- stonith_node
-};
-
-enum rsc_recovery_type {
- recovery_stop_start,
- recovery_stop_only,
- recovery_block,
- recovery_stop_unexpected,
-};
-
-enum rsc_start_requirement {
- rsc_req_nothing, /* Allowed by custom_action() */
- rsc_req_quorum, /* Enforced by custom_action() */
- rsc_req_stonith /* Enforced by native_start_constraints() */
-};
-
-//! Possible roles that a resource can be in
-enum rsc_role_e {
- RSC_ROLE_UNKNOWN = 0,
- RSC_ROLE_STOPPED = 1,
- RSC_ROLE_STARTED = 2,
- RSC_ROLE_UNPROMOTED = 3,
- RSC_ROLE_PROMOTED = 4,
-
-#if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1)
- //! \deprecated Use RSC_ROLE_UNPROMOTED instead
- RSC_ROLE_SLAVE = RSC_ROLE_UNPROMOTED,
-
- //! \deprecated Use RSC_ROLE_PROMOTED instead
- RSC_ROLE_MASTER = RSC_ROLE_PROMOTED,
-#endif
-};
-
-# define RSC_ROLE_MAX (RSC_ROLE_PROMOTED + 1)
-
-# define RSC_ROLE_UNKNOWN_S "Unknown"
-# define RSC_ROLE_STOPPED_S "Stopped"
-# define RSC_ROLE_STARTED_S "Started"
-# define RSC_ROLE_UNPROMOTED_S "Unpromoted"
-# define RSC_ROLE_PROMOTED_S "Promoted"
-# define RSC_ROLE_UNPROMOTED_LEGACY_S "Slave"
-# define RSC_ROLE_PROMOTED_LEGACY_S "Master"
-
-//! Deprecated
-enum pe_print_options {
- pe_print_log = (1 << 0),
- pe_print_html = (1 << 1),
- pe_print_ncurses = (1 << 2),
- pe_print_printf = (1 << 3),
- pe_print_dev = (1 << 4), //! Ignored
- pe_print_details = (1 << 5), //! Ignored
- pe_print_max_details = (1 << 6), //! Ignored
- pe_print_rsconly = (1 << 7),
- pe_print_ops = (1 << 8),
- pe_print_suppres_nl = (1 << 9),
- pe_print_xml = (1 << 10),
- pe_print_brief = (1 << 11),
- pe_print_pending = (1 << 12),
- pe_print_clone_details = (1 << 13),
- pe_print_clone_active = (1 << 14), // Print clone instances only if active
- pe_print_implicit = (1 << 15) // Print implicitly created resources
-};
-
const char *task2text(enum action_tasks task);
enum action_tasks text2task(const char *task);
enum rsc_role_e text2role(const char *role);
@@ -154,13 +41,13 @@ static inline const char *
recovery2text(enum rsc_recovery_type type)
{
switch (type) {
- case recovery_stop_only:
+ case pcmk_multiply_active_stop:
return "shutting it down";
- case recovery_stop_start:
+ case pcmk_multiply_active_restart:
return "attempting recovery";
- case recovery_block:
+ case pcmk_multiply_active_block:
return "waiting for an administrator";
- case recovery_stop_unexpected:
+ case pcmk_multiply_active_unexpected:
return "stopping unexpected instances";
}
return "Unknown";
diff --git a/include/crm/pengine/common_compat.h b/include/crm/pengine/common_compat.h
index 773bb3d..4330ccf 100644
--- a/include/crm/pengine/common_compat.h
+++ b/include/crm/pengine/common_compat.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2004-2021 the Pacemaker project contributors
+ * Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -10,6 +10,8 @@
#ifndef PCMK__CRM_PENGINE_COMMON_COMPAT__H
# define PCMK__CRM_PENGINE_COMMON_COMPAT__H
+#include <crm/common/scheduler.h>
+
#ifdef __cplusplus
extern "C" {
#endif
@@ -23,12 +25,35 @@ extern "C" {
* release.
*/
-//! \deprecated Use RSC_ROLE_UNPROMOTED_LEGACY_S instead
-# define RSC_ROLE_SLAVE_S RSC_ROLE_UNPROMOTED_LEGACY_S
+//! \deprecated Use (pcmk_role_promoted + 1) instead
+#define RSC_ROLE_MAX (pcmk_role_promoted + 1)
+
+//! \deprecated Use role2text(pcmk_role_unknown) instead
+#define RSC_ROLE_UNKNOWN_S role2text(pcmk_role_unknown)
+
+//! \deprecated Use role2text(pcmk_role_stopped) instead
+#define RSC_ROLE_STOPPED_S role2text(pcmk_role_stopped)
+
+//! \deprecated Use role2text(pcmk_role_started) instead
+#define RSC_ROLE_STARTED_S role2text(pcmk_role_started)
+
+//! \deprecated Use role2text(pcmk_role_unpromoted) instead
+#define RSC_ROLE_UNPROMOTED_S role2text(pcmk_role_unpromoted)
+
+//! \deprecated Use role2text(pcmk_role_promoted) instead
+#define RSC_ROLE_PROMOTED_S role2text(pcmk_role_promoted)
+
+//! \deprecated Do not use
+#define RSC_ROLE_UNPROMOTED_LEGACY_S "Slave"
+
+//! \deprecated Do not use
+#define RSC_ROLE_SLAVE_S RSC_ROLE_UNPROMOTED_LEGACY_S
-//! \deprecated Use RSC_ROLE_PROMOTED_LEGACY_S instead
-# define RSC_ROLE_MASTER_S RSC_ROLE_PROMOTED_LEGACY_S
+//! \deprecated Do not use
+#define RSC_ROLE_PROMOTED_LEGACY_S "Master"
+//! \deprecated Do not use
+#define RSC_ROLE_MASTER_S RSC_ROLE_PROMOTED_LEGACY_S
#ifdef __cplusplus
}
diff --git a/include/crm/pengine/complex.h b/include/crm/pengine/complex.h
index 929e4da..9b6ad1b 100644
--- a/include/crm/pengine/complex.h
+++ b/include/crm/pengine/complex.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2004-2022 the Pacemaker project contributors
+ * Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -12,23 +12,23 @@
#include <glib.h> // gboolean, GHashTable
#include <libxml/tree.h> // xmlNode
-#include <crm/pengine/pe_types.h> // pe_node_t, pe_resource_t, etc.
+#include <crm/pengine/pe_types.h> // pcmk_node_t, pcmk_resource_t, etc.
#ifdef __cplusplus
extern "C" {
#endif
-extern resource_object_functions_t resource_class_functions[];
+extern pcmk_rsc_methods_t resource_class_functions[];
-GHashTable *pe_rsc_params(pe_resource_t *rsc, const pe_node_t *node,
- pe_working_set_t *data_set);
-void get_meta_attributes(GHashTable * meta_hash, pe_resource_t *rsc,
- pe_node_t *node, pe_working_set_t *data_set);
-void get_rsc_attributes(GHashTable *meta_hash, const pe_resource_t *rsc,
- const pe_node_t *node, pe_working_set_t *data_set);
+GHashTable *pe_rsc_params(pcmk_resource_t *rsc, const pcmk_node_t *node,
+ pcmk_scheduler_t *scheduler);
+void get_meta_attributes(GHashTable * meta_hash, pcmk_resource_t *rsc,
+ pcmk_node_t *node, pcmk_scheduler_t *scheduler);
+void get_rsc_attributes(GHashTable *meta_hash, const pcmk_resource_t *rsc,
+ const pcmk_node_t *node, pcmk_scheduler_t *scheduler);
-gboolean is_parent(pe_resource_t *child, pe_resource_t *rsc);
-pe_resource_t *uber_parent(pe_resource_t *rsc);
+gboolean is_parent(pcmk_resource_t *child, pcmk_resource_t *rsc);
+pcmk_resource_t *uber_parent(pcmk_resource_t *rsc);
#ifdef __cplusplus
}
diff --git a/include/crm/pengine/internal.h b/include/crm/pengine/internal.h
index 1b5f6f1..9c8068f 100644
--- a/include/crm/pengine/internal.h
+++ b/include/crm/pengine/internal.h
@@ -10,6 +10,7 @@
#ifndef PE_INTERNAL__H
# define PE_INTERNAL__H
+# include <stdbool.h>
# include <stdint.h>
# include <string.h>
# include <crm/msg_xml.h>
@@ -18,31 +19,17 @@
# include <crm/common/internal.h>
# include <crm/common/options_internal.h>
# include <crm/common/output_internal.h>
+# include <crm/common/scheduler_internal.h>
-const char *pe__resource_description(const pe_resource_t *rsc, uint32_t show_opts);
+const char *pe__resource_description(const pcmk_resource_t *rsc,
+ uint32_t show_opts);
-enum pe__clone_flags {
- // Whether instances should be started sequentially
- pe__clone_ordered = (1 << 0),
+bool pe__clone_is_ordered(const pcmk_resource_t *clone);
+int pe__set_clone_flag(pcmk_resource_t *clone, enum pcmk__clone_flags flag);
+bool pe__clone_flag_is_set(const pcmk_resource_t *clone, uint32_t flags);
- // Whether promotion scores have been added
- pe__clone_promotion_added = (1 << 1),
-
- // Whether promotion constraints have been added
- pe__clone_promotion_constrained = (1 << 2),
-};
-
-bool pe__clone_is_ordered(const pe_resource_t *clone);
-int pe__set_clone_flag(pe_resource_t *clone, enum pe__clone_flags flag);
-
-
-enum pe__group_flags {
- pe__group_ordered = (1 << 0), // Members start sequentially
- pe__group_colocated = (1 << 1), // Members must be on same node
-};
-
-bool pe__group_flag_is_set(const pe_resource_t *group, uint32_t flags);
-pe_resource_t *pe__last_group_member(const pe_resource_t *group);
+bool pe__group_flag_is_set(const pcmk_resource_t *group, uint32_t flags);
+pcmk_resource_t *pe__last_group_member(const pcmk_resource_t *group);
# define pe_rsc_info(rsc, fmt, args...) crm_log_tag(LOG_INFO, rsc ? rsc->id : "<NULL>", fmt, ##args)
@@ -62,16 +49,16 @@ pe_resource_t *pe__last_group_member(const pe_resource_t *group);
# define pe_proc_err(fmt...) { was_processing_error = TRUE; crm_err(fmt); }
# define pe_proc_warn(fmt...) { was_processing_warning = TRUE; crm_warn(fmt); }
-#define pe__set_working_set_flags(working_set, flags_to_set) do { \
- (working_set)->flags = pcmk__set_flags_as(__func__, __LINE__, \
- LOG_TRACE, "Working set", crm_system_name, \
- (working_set)->flags, (flags_to_set), #flags_to_set); \
+#define pe__set_working_set_flags(scheduler, flags_to_set) do { \
+ (scheduler)->flags = pcmk__set_flags_as(__func__, __LINE__, \
+ LOG_TRACE, "Scheduler", crm_system_name, \
+ (scheduler)->flags, (flags_to_set), #flags_to_set); \
} while (0)
-#define pe__clear_working_set_flags(working_set, flags_to_clear) do { \
- (working_set)->flags = pcmk__clear_flags_as(__func__, __LINE__, \
- LOG_TRACE, "Working set", crm_system_name, \
- (working_set)->flags, (flags_to_clear), #flags_to_clear); \
+#define pe__clear_working_set_flags(scheduler, flags_to_clear) do { \
+ (scheduler)->flags = pcmk__clear_flags_as(__func__, __LINE__, \
+ LOG_TRACE, "Scheduler", crm_system_name, \
+ (scheduler)->flags, (flags_to_clear), #flags_to_clear); \
} while (0)
#define pe__set_resource_flags(resource, flags_to_set) do { \
@@ -152,144 +139,127 @@ pe_resource_t *pe__last_group_member(const pe_resource_t *group);
#flags_to_clear); \
} while (0)
-// Some warnings we don't want to print every transition
-
-enum pe_warn_once_e {
- pe_wo_blind = (1 << 0),
- pe_wo_restart_type = (1 << 1),
- pe_wo_role_after = (1 << 2),
- pe_wo_poweroff = (1 << 3),
- pe_wo_require_all = (1 << 4),
- pe_wo_order_score = (1 << 5),
- pe_wo_neg_threshold = (1 << 6),
- pe_wo_remove_after = (1 << 7),
- pe_wo_ping_node = (1 << 8),
- pe_wo_order_inst = (1 << 9),
- pe_wo_coloc_inst = (1 << 10),
- pe_wo_group_order = (1 << 11),
- pe_wo_group_coloc = (1 << 12),
- pe_wo_upstart = (1 << 13),
- pe_wo_nagios = (1 << 14),
-};
-
-extern uint32_t pe_wo;
-
#define pe_warn_once(pe_wo_bit, fmt...) do { \
- if (!pcmk_is_set(pe_wo, pe_wo_bit)) { \
- if (pe_wo_bit == pe_wo_blind) { \
+ if (!pcmk_is_set(pcmk__warnings, pe_wo_bit)) { \
+ if (pe_wo_bit == pcmk__wo_blind) { \
crm_warn(fmt); \
} else { \
pe_warn(fmt); \
} \
- pe_wo = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE, \
- "Warn-once", "logging", pe_wo, \
- (pe_wo_bit), #pe_wo_bit); \
- } \
+ pcmk__warnings = pcmk__set_flags_as(__func__, __LINE__, \
+ LOG_TRACE, \
+ "Warn-once", "logging", \
+ pcmk__warnings, \
+ (pe_wo_bit), #pe_wo_bit); \
+ } \
} while (0);
typedef struct pe__location_constraint_s {
char *id; // Constraint XML ID
- pe_resource_t *rsc_lh; // Resource being located
+ pcmk_resource_t *rsc_lh; // Resource being located
enum rsc_role_e role_filter; // Role to locate
enum pe_discover_e discover_mode; // Resource discovery
- GList *node_list_rh; // List of pe_node_t*
+ GList *node_list_rh; // List of pcmk_node_t*
} pe__location_t;
typedef struct pe__order_constraint_s {
int id;
- uint32_t flags; // Group of enum pe_ordering flags
+ uint32_t flags; // Group of enum pcmk__action_relation_flags
void *lh_opaque;
- pe_resource_t *lh_rsc;
- pe_action_t *lh_action;
+ pcmk_resource_t *lh_rsc;
+ pcmk_action_t *lh_action;
char *lh_action_task;
void *rh_opaque;
- pe_resource_t *rh_rsc;
- pe_action_t *rh_action;
+ pcmk_resource_t *rh_rsc;
+ pcmk_action_t *rh_action;
char *rh_action_task;
} pe__ordering_t;
-const pe_resource_t *pe__const_top_resource(const pe_resource_t *rsc,
- bool include_bundle);
+const pcmk_resource_t *pe__const_top_resource(const pcmk_resource_t *rsc,
+ bool include_bundle);
-int pe__clone_max(const pe_resource_t *clone);
-int pe__clone_node_max(const pe_resource_t *clone);
-int pe__clone_promoted_max(const pe_resource_t *clone);
-int pe__clone_promoted_node_max(const pe_resource_t *clone);
-void pe__create_clone_notifications(pe_resource_t *clone);
-void pe__free_clone_notification_data(pe_resource_t *clone);
-void pe__create_clone_notif_pseudo_ops(pe_resource_t *clone,
- pe_action_t *start, pe_action_t *started,
- pe_action_t *stop, pe_action_t *stopped);
+int pe__clone_max(const pcmk_resource_t *clone);
+int pe__clone_node_max(const pcmk_resource_t *clone);
+int pe__clone_promoted_max(const pcmk_resource_t *clone);
+int pe__clone_promoted_node_max(const pcmk_resource_t *clone);
+void pe__create_clone_notifications(pcmk_resource_t *clone);
+void pe__free_clone_notification_data(pcmk_resource_t *clone);
+void pe__create_clone_notif_pseudo_ops(pcmk_resource_t *clone,
+ pcmk_action_t *start,
+ pcmk_action_t *started,
+ pcmk_action_t *stop,
+ pcmk_action_t *stopped);
+pcmk_action_t *pe__new_rsc_pseudo_action(pcmk_resource_t *rsc, const char *task,
+ bool optional, bool runnable);
-pe_action_t *pe__new_rsc_pseudo_action(pe_resource_t *rsc, const char *task,
- bool optional, bool runnable);
+void pe__create_promotable_pseudo_ops(pcmk_resource_t *clone,
+ bool any_promoting, bool any_demoting);
-void pe__create_promotable_pseudo_ops(pe_resource_t *clone, bool any_promoting,
- bool any_demoting);
-
-bool pe_can_fence(const pe_working_set_t *data_set, const pe_node_t *node);
+bool pe_can_fence(const pcmk_scheduler_t *scheduler, const pcmk_node_t *node);
void add_hash_param(GHashTable * hash, const char *name, const char *value);
-char *native_parameter(pe_resource_t * rsc, pe_node_t * node, gboolean create, const char *name,
- pe_working_set_t * data_set);
-pe_node_t *native_location(const pe_resource_t *rsc, GList **list, int current);
+char *native_parameter(pcmk_resource_t *rsc, pcmk_node_t *node, gboolean create,
+ const char *name, pcmk_scheduler_t *scheduler);
+pcmk_node_t *native_location(const pcmk_resource_t *rsc, GList **list,
+ int current);
void pe_metadata(pcmk__output_t *out);
void verify_pe_options(GHashTable * options);
-void native_add_running(pe_resource_t * rsc, pe_node_t * node, pe_working_set_t * data_set, gboolean failed);
+void native_add_running(pcmk_resource_t *rsc, pcmk_node_t *node,
+ pcmk_scheduler_t *scheduler, gboolean failed);
-gboolean native_unpack(pe_resource_t * rsc, pe_working_set_t * data_set);
-gboolean group_unpack(pe_resource_t * rsc, pe_working_set_t * data_set);
-gboolean clone_unpack(pe_resource_t * rsc, pe_working_set_t * data_set);
-gboolean pe__unpack_bundle(pe_resource_t *rsc, pe_working_set_t *data_set);
+gboolean native_unpack(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler);
+gboolean group_unpack(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler);
+gboolean clone_unpack(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler);
+gboolean pe__unpack_bundle(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler);
-pe_resource_t *native_find_rsc(pe_resource_t *rsc, const char *id, const pe_node_t *node,
- int flags);
+pcmk_resource_t *native_find_rsc(pcmk_resource_t *rsc, const char *id,
+ const pcmk_node_t *node, int flags);
-gboolean native_active(pe_resource_t * rsc, gboolean all);
-gboolean group_active(pe_resource_t * rsc, gboolean all);
-gboolean clone_active(pe_resource_t * rsc, gboolean all);
-gboolean pe__bundle_active(pe_resource_t *rsc, gboolean all);
+gboolean native_active(pcmk_resource_t *rsc, gboolean all);
+gboolean group_active(pcmk_resource_t *rsc, gboolean all);
+gboolean clone_active(pcmk_resource_t *rsc, gboolean all);
+gboolean pe__bundle_active(pcmk_resource_t *rsc, gboolean all);
//! \deprecated This function will be removed in a future release
-void native_print(pe_resource_t *rsc, const char *pre_text, long options,
+void native_print(pcmk_resource_t *rsc, const char *pre_text, long options,
void *print_data);
//! \deprecated This function will be removed in a future release
-void group_print(pe_resource_t *rsc, const char *pre_text, long options,
+void group_print(pcmk_resource_t *rsc, const char *pre_text, long options,
void *print_data);
//! \deprecated This function will be removed in a future release
-void clone_print(pe_resource_t *rsc, const char *pre_text, long options,
+void clone_print(pcmk_resource_t *rsc, const char *pre_text, long options,
void *print_data);
//! \deprecated This function will be removed in a future release
-void pe__print_bundle(pe_resource_t *rsc, const char *pre_text, long options,
+void pe__print_bundle(pcmk_resource_t *rsc, const char *pre_text, long options,
void *print_data);
-gchar *pcmk__native_output_string(const pe_resource_t *rsc, const char *name,
- const pe_node_t *node, uint32_t show_opts,
+gchar *pcmk__native_output_string(const pcmk_resource_t *rsc, const char *name,
+ const pcmk_node_t *node, uint32_t show_opts,
const char *target_role, bool show_nodes);
int pe__name_and_nvpairs_xml(pcmk__output_t *out, bool is_list, const char *tag_name
, size_t pairs_count, ...);
-char *pe__node_display_name(pe_node_t *node, bool print_detail);
+char *pe__node_display_name(pcmk_node_t *node, bool print_detail);
// Clone notifications (pe_notif.c)
-void pe__order_notifs_after_fencing(const pe_action_t *action,
- pe_resource_t *rsc,
- pe_action_t *stonith_op);
+void pe__order_notifs_after_fencing(const pcmk_action_t *action,
+ pcmk_resource_t *rsc,
+ pcmk_action_t *stonith_op);
static inline const char *
-pe__rsc_bool_str(const pe_resource_t *rsc, uint64_t rsc_flag)
+pe__rsc_bool_str(const pcmk_resource_t *rsc, uint64_t rsc_flag)
{
return pcmk__btoa(pcmk_is_set(rsc->flags, rsc_flag));
}
@@ -308,167 +278,156 @@ int pe__resource_xml(pcmk__output_t *out, va_list args);
int pe__resource_html(pcmk__output_t *out, va_list args);
int pe__resource_text(pcmk__output_t *out, va_list args);
-void native_free(pe_resource_t * rsc);
-void group_free(pe_resource_t * rsc);
-void clone_free(pe_resource_t * rsc);
-void pe__free_bundle(pe_resource_t *rsc);
-
-enum rsc_role_e native_resource_state(const pe_resource_t * rsc, gboolean current);
-enum rsc_role_e group_resource_state(const pe_resource_t * rsc, gboolean current);
-enum rsc_role_e clone_resource_state(const pe_resource_t * rsc, gboolean current);
-enum rsc_role_e pe__bundle_resource_state(const pe_resource_t *rsc,
+void native_free(pcmk_resource_t *rsc);
+void group_free(pcmk_resource_t *rsc);
+void clone_free(pcmk_resource_t *rsc);
+void pe__free_bundle(pcmk_resource_t *rsc);
+
+enum rsc_role_e native_resource_state(const pcmk_resource_t *rsc,
+ gboolean current);
+enum rsc_role_e group_resource_state(const pcmk_resource_t *rsc,
+ gboolean current);
+enum rsc_role_e clone_resource_state(const pcmk_resource_t *rsc,
+ gboolean current);
+enum rsc_role_e pe__bundle_resource_state(const pcmk_resource_t *rsc,
gboolean current);
-void pe__count_common(pe_resource_t *rsc);
-void pe__count_bundle(pe_resource_t *rsc);
+void pe__count_common(pcmk_resource_t *rsc);
+void pe__count_bundle(pcmk_resource_t *rsc);
-void common_free(pe_resource_t * rsc);
+void common_free(pcmk_resource_t *rsc);
-pe_node_t *pe__copy_node(const pe_node_t *this_node);
-extern time_t get_effective_time(pe_working_set_t * data_set);
+pcmk_node_t *pe__copy_node(const pcmk_node_t *this_node);
+time_t get_effective_time(pcmk_scheduler_t *scheduler);
/* Failure handling utilities (from failcounts.c) */
-// bit flags for fail count handling options
-enum pe_fc_flags_e {
- pe_fc_default = (1 << 0),
- pe_fc_effective = (1 << 1), // don't count expired failures
- pe_fc_fillers = (1 << 2), // if container, include filler failures in count
-};
-
-int pe_get_failcount(const pe_node_t *node, pe_resource_t *rsc,
+int pe_get_failcount(const pcmk_node_t *node, pcmk_resource_t *rsc,
time_t *last_failure, uint32_t flags,
const xmlNode *xml_op);
-pe_action_t *pe__clear_failcount(pe_resource_t *rsc, const pe_node_t *node,
- const char *reason,
- pe_working_set_t *data_set);
+pcmk_action_t *pe__clear_failcount(pcmk_resource_t *rsc,
+ const pcmk_node_t *node, const char *reason,
+ pcmk_scheduler_t *scheduler);
/* Functions for finding/counting a resource's active nodes */
-bool pe__count_active_node(const pe_resource_t *rsc, pe_node_t *node,
- pe_node_t **active, unsigned int *count_all,
+bool pe__count_active_node(const pcmk_resource_t *rsc, pcmk_node_t *node,
+ pcmk_node_t **active, unsigned int *count_all,
unsigned int *count_clean);
-pe_node_t *pe__find_active_requires(const pe_resource_t *rsc,
+pcmk_node_t *pe__find_active_requires(const pcmk_resource_t *rsc,
unsigned int *count);
-static inline pe_node_t *
-pe__current_node(const pe_resource_t *rsc)
+static inline pcmk_node_t *
+pe__current_node(const pcmk_resource_t *rsc)
{
return (rsc == NULL)? NULL : rsc->fns->active_node(rsc, NULL, NULL);
}
/* Binary like operators for lists of nodes */
-extern void node_list_exclude(GHashTable * list, GList *list2, gboolean merge_scores);
-
GHashTable *pe__node_list2table(const GList *list);
-static inline gpointer
-pe_hash_table_lookup(GHashTable * hash, gconstpointer key)
-{
- if (hash) {
- return g_hash_table_lookup(hash, key);
- }
- return NULL;
-}
-
-extern pe_action_t *get_pseudo_op(const char *name, pe_working_set_t * data_set);
-extern gboolean order_actions(pe_action_t * lh_action, pe_action_t * rh_action, enum pe_ordering order);
-
-void pe__show_node_weights_as(const char *file, const char *function,
- int line, bool to_log, const pe_resource_t *rsc,
- const char *comment, GHashTable *nodes,
- pe_working_set_t *data_set);
-
-#define pe__show_node_weights(level, rsc, text, nodes, data_set) \
- pe__show_node_weights_as(__FILE__, __func__, __LINE__, \
- (level), (rsc), (text), (nodes), (data_set))
-
-xmlNode *find_rsc_op_entry(const pe_resource_t *rsc, const char *key);
-
-pe_action_t *custom_action(pe_resource_t *rsc, char *key, const char *task,
- const pe_node_t *on_node, gboolean optional,
- gboolean foo, pe_working_set_t *data_set);
-
-# define delete_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_DELETE, 0)
+pcmk_action_t *get_pseudo_op(const char *name, pcmk_scheduler_t *scheduler);
+gboolean order_actions(pcmk_action_t *lh_action, pcmk_action_t *rh_action,
+ uint32_t flags);
+
+void pe__show_node_scores_as(const char *file, const char *function,
+ int line, bool to_log, const pcmk_resource_t *rsc,
+ const char *comment, GHashTable *nodes,
+ pcmk_scheduler_t *scheduler);
+
+#define pe__show_node_scores(level, rsc, text, nodes, scheduler) \
+ pe__show_node_scores_as(__FILE__, __func__, __LINE__, \
+ (level), (rsc), (text), (nodes), (scheduler))
+
+GHashTable *pcmk__unpack_action_meta(pcmk_resource_t *rsc,
+ const pcmk_node_t *node,
+ const char *action_name, guint interval_ms,
+ const xmlNode *action_config);
+GHashTable *pcmk__unpack_action_rsc_params(const xmlNode *action_xml,
+ GHashTable *node_attrs,
+ pcmk_scheduler_t *data_set);
+xmlNode *pcmk__find_action_config(const pcmk_resource_t *rsc,
+ const char *action_name, guint interval_ms,
+ bool include_disabled);
+
+enum rsc_start_requirement pcmk__action_requires(const pcmk_resource_t *rsc,
+ const char *action_name);
+
+enum action_fail_response pcmk__parse_on_fail(const pcmk_resource_t *rsc,
+ const char *action_name,
+ guint interval_ms,
+ const char *value);
+
+enum rsc_role_e pcmk__role_after_failure(const pcmk_resource_t *rsc,
+ const char *action_name,
+ enum action_fail_response on_fail,
+ GHashTable *meta);
+
+pcmk_action_t *custom_action(pcmk_resource_t *rsc, char *key, const char *task,
+ const pcmk_node_t *on_node, gboolean optional,
+ pcmk_scheduler_t *scheduler);
+
+# define delete_key(rsc) pcmk__op_key(rsc->id, PCMK_ACTION_DELETE, 0)
# define delete_action(rsc, node, optional) custom_action( \
- rsc, delete_key(rsc), CRMD_ACTION_DELETE, node, \
- optional, TRUE, rsc->cluster);
-
-# define stopped_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_STOPPED, 0)
-# define stopped_action(rsc, node, optional) custom_action( \
- rsc, stopped_key(rsc), CRMD_ACTION_STOPPED, node, \
- optional, TRUE, rsc->cluster);
+ rsc, delete_key(rsc), PCMK_ACTION_DELETE, node, \
+ optional, rsc->cluster);
-# define stop_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_STOP, 0)
+# define stop_key(rsc) pcmk__op_key(rsc->id, PCMK_ACTION_STOP, 0)
# define stop_action(rsc, node, optional) custom_action( \
- rsc, stop_key(rsc), CRMD_ACTION_STOP, node, \
- optional, TRUE, rsc->cluster);
+ rsc, stop_key(rsc), PCMK_ACTION_STOP, node, \
+ optional, rsc->cluster);
-# define reload_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_RELOAD_AGENT, 0)
-# define start_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_START, 0)
+# define reload_key(rsc) pcmk__op_key(rsc->id, PCMK_ACTION_RELOAD_AGENT, 0)
+# define start_key(rsc) pcmk__op_key(rsc->id, PCMK_ACTION_START, 0)
# define start_action(rsc, node, optional) custom_action( \
- rsc, start_key(rsc), CRMD_ACTION_START, node, \
- optional, TRUE, rsc->cluster)
+ rsc, start_key(rsc), PCMK_ACTION_START, node, \
+ optional, rsc->cluster)
-# define started_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_STARTED, 0)
-# define started_action(rsc, node, optional) custom_action( \
- rsc, started_key(rsc), CRMD_ACTION_STARTED, node, \
- optional, TRUE, rsc->cluster)
-
-# define promote_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_PROMOTE, 0)
+# define promote_key(rsc) pcmk__op_key(rsc->id, PCMK_ACTION_PROMOTE, 0)
# define promote_action(rsc, node, optional) custom_action( \
- rsc, promote_key(rsc), CRMD_ACTION_PROMOTE, node, \
- optional, TRUE, rsc->cluster)
-
-# define promoted_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_PROMOTED, 0)
-# define promoted_action(rsc, node, optional) custom_action( \
- rsc, promoted_key(rsc), CRMD_ACTION_PROMOTED, node, \
- optional, TRUE, rsc->cluster)
+ rsc, promote_key(rsc), PCMK_ACTION_PROMOTE, node, \
+ optional, rsc->cluster)
-# define demote_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_DEMOTE, 0)
+# define demote_key(rsc) pcmk__op_key(rsc->id, PCMK_ACTION_DEMOTE, 0)
# define demote_action(rsc, node, optional) custom_action( \
- rsc, demote_key(rsc), CRMD_ACTION_DEMOTE, node, \
- optional, TRUE, rsc->cluster)
+ rsc, demote_key(rsc), PCMK_ACTION_DEMOTE, node, \
+ optional, rsc->cluster)
-# define demoted_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_DEMOTED, 0)
-# define demoted_action(rsc, node, optional) custom_action( \
- rsc, demoted_key(rsc), CRMD_ACTION_DEMOTED, node, \
- optional, TRUE, rsc->cluster)
+extern int pe_get_configured_timeout(pcmk_resource_t *rsc, const char *action,
+ pcmk_scheduler_t *scheduler);
-extern int pe_get_configured_timeout(pe_resource_t *rsc, const char *action,
- pe_working_set_t *data_set);
+pcmk_action_t *find_first_action(const GList *input, const char *uuid,
+ const char *task, const pcmk_node_t *on_node);
-pe_action_t *find_first_action(const GList *input, const char *uuid,
- const char *task, const pe_node_t *on_node);
+enum action_tasks get_complex_task(const pcmk_resource_t *rsc,
+ const char *name);
-enum action_tasks get_complex_task(const pe_resource_t *rsc, const char *name);
-
-extern GList *find_actions(GList *input, const char *key, const pe_node_t *on_node);
+GList *find_actions(GList *input, const char *key, const pcmk_node_t *on_node);
GList *find_actions_exact(GList *input, const char *key,
- const pe_node_t *on_node);
-GList *pe__resource_actions(const pe_resource_t *rsc, const pe_node_t *node,
+ const pcmk_node_t *on_node);
+GList *pe__resource_actions(const pcmk_resource_t *rsc, const pcmk_node_t *node,
const char *task, bool require_node);
-extern void pe_free_action(pe_action_t * action);
+extern void pe_free_action(pcmk_action_t *action);
-void resource_location(pe_resource_t *rsc, const pe_node_t *node, int score,
- const char *tag, pe_working_set_t *data_set);
+void resource_location(pcmk_resource_t *rsc, const pcmk_node_t *node, int score,
+ const char *tag, pcmk_scheduler_t *scheduler);
extern int pe__is_newer_op(const xmlNode *xml_a, const xmlNode *xml_b,
bool same_node_default);
extern gint sort_op_by_callid(gconstpointer a, gconstpointer b);
-gboolean get_target_role(const pe_resource_t *rsc, enum rsc_role_e *role);
-void pe__set_next_role(pe_resource_t *rsc, enum rsc_role_e role,
+gboolean get_target_role(const pcmk_resource_t *rsc, enum rsc_role_e *role);
+void pe__set_next_role(pcmk_resource_t *rsc, enum rsc_role_e role,
const char *why);
-pe_resource_t *find_clone_instance(const pe_resource_t *rsc,
- const char *sub_id);
+pcmk_resource_t *find_clone_instance(const pcmk_resource_t *rsc,
+ const char *sub_id);
extern void destroy_ticket(gpointer data);
-extern pe_ticket_t *ticket_new(const char *ticket_id, pe_working_set_t * data_set);
+pcmk_ticket_t *ticket_new(const char *ticket_id, pcmk_scheduler_t *scheduler);
// Resources for manipulating resource names
const char *pe_base_name_end(const char *id);
@@ -476,7 +435,7 @@ char *clone_strip(const char *last_rsc_id);
char *clone_zero(const char *last_rsc_id);
static inline bool
-pe_base_name_eq(const pe_resource_t *rsc, const char *id)
+pe_base_name_eq(const pcmk_resource_t *rsc, const char *id)
{
if (id && rsc && rsc->id) {
// Number of characters in rsc->id before any clone suffix
@@ -490,22 +449,10 @@ pe_base_name_eq(const pe_resource_t *rsc, const char *id)
int pe__target_rc_from_xml(const xmlNode *xml_op);
gint pe__cmp_node_name(gconstpointer a, gconstpointer b);
-bool is_set_recursive(const pe_resource_t *rsc, long long flag, bool any);
-
-enum rsc_digest_cmp_val {
- /*! Digests are the same */
- RSC_DIGEST_MATCH = 0,
- /*! Params that require a restart changed */
- RSC_DIGEST_RESTART,
- /*! Some parameter changed. */
- RSC_DIGEST_ALL,
- /*! rsc op didn't have a digest associated with it, so
- * it is unknown if parameters changed or not. */
- RSC_DIGEST_UNKNOWN,
-};
+bool is_set_recursive(const pcmk_resource_t *rsc, long long flag, bool any);
typedef struct op_digest_cache_s {
- enum rsc_digest_cmp_val rc;
+ enum pcmk__digest_result rc;
xmlNode *params_all;
xmlNode *params_secure;
xmlNode *params_restart;
@@ -514,35 +461,37 @@ typedef struct op_digest_cache_s {
char *digest_restart_calc;
} op_digest_cache_t;
-op_digest_cache_t *pe__calculate_digests(pe_resource_t *rsc, const char *task,
+op_digest_cache_t *pe__calculate_digests(pcmk_resource_t *rsc, const char *task,
guint *interval_ms,
- const pe_node_t *node,
+ const pcmk_node_t *node,
const xmlNode *xml_op,
GHashTable *overrides,
bool calc_secure,
- pe_working_set_t *data_set);
+ pcmk_scheduler_t *scheduler);
void pe__free_digests(gpointer ptr);
-op_digest_cache_t *rsc_action_digest_cmp(pe_resource_t *rsc,
+op_digest_cache_t *rsc_action_digest_cmp(pcmk_resource_t *rsc,
const xmlNode *xml_op,
- pe_node_t *node,
- pe_working_set_t *data_set);
-
-pe_action_t *pe_fence_op(pe_node_t *node, const char *op, bool optional,
- const char *reason, bool priority_delay,
- pe_working_set_t *data_set);
-void trigger_unfencing(pe_resource_t *rsc, pe_node_t *node,
- const char *reason, pe_action_t *dependency,
- pe_working_set_t *data_set);
-
-char *pe__action2reason(const pe_action_t *action, enum pe_action_flags flag);
-void pe_action_set_reason(pe_action_t *action, const char *reason, bool overwrite);
-void pe__add_action_expected_result(pe_action_t *action, int expected_result);
-
-void pe__set_resource_flags_recursive(pe_resource_t *rsc, uint64_t flags);
-void pe__clear_resource_flags_recursive(pe_resource_t *rsc, uint64_t flags);
-void pe__clear_resource_flags_on_all(pe_working_set_t *data_set, uint64_t flag);
+ pcmk_node_t *node,
+ pcmk_scheduler_t *scheduler);
+
+pcmk_action_t *pe_fence_op(pcmk_node_t *node, const char *op, bool optional,
+ const char *reason, bool priority_delay,
+ pcmk_scheduler_t *scheduler);
+void trigger_unfencing(pcmk_resource_t *rsc, pcmk_node_t *node,
+ const char *reason, pcmk_action_t *dependency,
+ pcmk_scheduler_t *scheduler);
+
+char *pe__action2reason(const pcmk_action_t *action, enum pe_action_flags flag);
+void pe_action_set_reason(pcmk_action_t *action, const char *reason,
+ bool overwrite);
+void pe__add_action_expected_result(pcmk_action_t *action, int expected_result);
+
+void pe__set_resource_flags_recursive(pcmk_resource_t *rsc, uint64_t flags);
+void pe__clear_resource_flags_recursive(pcmk_resource_t *rsc, uint64_t flags);
+void pe__clear_resource_flags_on_all(pcmk_scheduler_t *scheduler,
+ uint64_t flag);
gboolean add_tag_ref(GHashTable * tags, const char * tag_name, const char * obj_ref);
@@ -550,49 +499,75 @@ gboolean add_tag_ref(GHashTable * tags, const char * tag_name, const char * obj
void print_rscs_brief(GList *rsc_list, const char * pre_text, long options,
void * print_data, gboolean print_all);
int pe__rscs_brief_output(pcmk__output_t *out, GList *rsc_list, unsigned int options);
-void pe_fence_node(pe_working_set_t * data_set, pe_node_t * node, const char *reason, bool priority_delay);
+void pe_fence_node(pcmk_scheduler_t *scheduler, pcmk_node_t *node,
+ const char *reason, bool priority_delay);
-pe_node_t *pe_create_node(const char *id, const char *uname, const char *type,
- const char *score, pe_working_set_t * data_set);
+pcmk_node_t *pe_create_node(const char *id, const char *uname, const char *type,
+ const char *score, pcmk_scheduler_t *scheduler);
//! \deprecated This function will be removed in a future release
-void common_print(pe_resource_t *rsc, const char *pre_text, const char *name,
- const pe_node_t *node, long options, void *print_data);
-int pe__common_output_text(pcmk__output_t *out, const pe_resource_t *rsc,
- const char *name, const pe_node_t *node,
+void common_print(pcmk_resource_t *rsc, const char *pre_text, const char *name,
+ const pcmk_node_t *node, long options, void *print_data);
+int pe__common_output_text(pcmk__output_t *out, const pcmk_resource_t *rsc,
+ const char *name, const pcmk_node_t *node,
unsigned int options);
-int pe__common_output_html(pcmk__output_t *out, const pe_resource_t *rsc,
- const char *name, const pe_node_t *node,
+int pe__common_output_html(pcmk__output_t *out, const pcmk_resource_t *rsc,
+ const char *name, const pcmk_node_t *node,
unsigned int options);
-GList *pe__bundle_containers(const pe_resource_t *bundle);
-
-int pe__bundle_max(const pe_resource_t *rsc);
-int pe__bundle_max_per_node(const pe_resource_t *rsc);
-
-pe_resource_t *pe__find_bundle_replica(const pe_resource_t *bundle,
- const pe_node_t *node);
-bool pe__bundle_needs_remote_name(pe_resource_t *rsc);
-const char *pe__add_bundle_remote_name(pe_resource_t *rsc,
- pe_working_set_t *data_set,
+//! A single instance of a bundle
+typedef struct {
+ int offset; //!< 0-origin index of this instance in bundle
+ char *ipaddr; //!< IP address associated with this instance
+ pcmk_node_t *node; //!< Node created for this instance
+ pcmk_resource_t *ip; //!< IP address resource for ipaddr
+ pcmk_resource_t *child; //!< Instance of bundled resource
+ pcmk_resource_t *container; //!< Container associated with this instance
+ pcmk_resource_t *remote; //!< Pacemaker Remote connection into container
+} pe__bundle_replica_t;
+
+GList *pe__bundle_containers(const pcmk_resource_t *bundle);
+
+int pe__bundle_max(const pcmk_resource_t *rsc);
+bool pe__node_is_bundle_instance(const pcmk_resource_t *bundle,
+ const pcmk_node_t *node);
+pcmk_resource_t *pe__bundled_resource(const pcmk_resource_t *rsc);
+const pcmk_resource_t *pe__get_rsc_in_container(const pcmk_resource_t *instance);
+pcmk_resource_t *pe__first_container(const pcmk_resource_t *bundle);
+void pe__foreach_bundle_replica(pcmk_resource_t *bundle,
+ bool (*fn)(pe__bundle_replica_t *, void *),
+ void *user_data);
+void pe__foreach_const_bundle_replica(const pcmk_resource_t *bundle,
+ bool (*fn)(const pe__bundle_replica_t *,
+ void *),
+ void *user_data);
+pcmk_resource_t *pe__find_bundle_replica(const pcmk_resource_t *bundle,
+ const pcmk_node_t *node);
+bool pe__bundle_needs_remote_name(pcmk_resource_t *rsc);
+const char *pe__add_bundle_remote_name(pcmk_resource_t *rsc,
+ pcmk_scheduler_t *scheduler,
xmlNode *xml, const char *field);
-const char *pe_node_attribute_calculated(const pe_node_t *node,
- const char *name,
- const pe_resource_t *rsc);
-const char *pe_node_attribute_raw(const pe_node_t *node, const char *name);
-bool pe__is_universal_clone(const pe_resource_t *rsc,
- const pe_working_set_t *data_set);
-void pe__add_param_check(const xmlNode *rsc_op, pe_resource_t *rsc,
- pe_node_t *node, enum pe_check_parameters,
- pe_working_set_t *data_set);
-void pe__foreach_param_check(pe_working_set_t *data_set,
- void (*cb)(pe_resource_t*, pe_node_t*,
+
+const char *pe__node_attribute_calculated(const pcmk_node_t *node,
+ const char *name,
+ const pcmk_resource_t *rsc,
+ enum pcmk__rsc_node node_type,
+ bool force_host);
+const char *pe_node_attribute_raw(const pcmk_node_t *node, const char *name);
+bool pe__is_universal_clone(const pcmk_resource_t *rsc,
+ const pcmk_scheduler_t *scheduler);
+void pe__add_param_check(const xmlNode *rsc_op, pcmk_resource_t *rsc,
+ pcmk_node_t *node, enum pcmk__check_parameters,
+ pcmk_scheduler_t *scheduler);
+void pe__foreach_param_check(pcmk_scheduler_t *scheduler,
+ void (*cb)(pcmk_resource_t*, pcmk_node_t*,
const xmlNode*,
- enum pe_check_parameters));
-void pe__free_param_checks(pe_working_set_t *data_set);
+ enum pcmk__check_parameters));
+void pe__free_param_checks(pcmk_scheduler_t *scheduler);
-bool pe__shutdown_requested(const pe_node_t *node);
-void pe__update_recheck_time(time_t recheck, pe_working_set_t *data_set);
+bool pe__shutdown_requested(const pcmk_node_t *node);
+void pe__update_recheck_time(time_t recheck, pcmk_scheduler_t *scheduler,
+ const char *reason);
/*!
* \internal
@@ -605,53 +580,55 @@ void pe__register_messages(pcmk__output_t *out);
void pe__unpack_dataset_nvpairs(const xmlNode *xml_obj, const char *set_name,
const pe_rule_eval_data_t *rule_data,
GHashTable *hash, const char *always_first,
- gboolean overwrite, pe_working_set_t *data_set);
-
-bool pe__resource_is_disabled(const pe_resource_t *rsc);
-pe_action_t *pe__clear_resource_history(pe_resource_t *rsc,
- const pe_node_t *node,
- pe_working_set_t *data_set);
-
-GList *pe__rscs_with_tag(pe_working_set_t *data_set, const char *tag_name);
-GList *pe__unames_with_tag(pe_working_set_t *data_set, const char *tag_name);
-bool pe__rsc_has_tag(pe_working_set_t *data_set, const char *rsc, const char *tag);
-bool pe__uname_has_tag(pe_working_set_t *data_set, const char *node, const char *tag);
-
-bool pe__rsc_running_on_only(const pe_resource_t *rsc, const pe_node_t *node);
-bool pe__rsc_running_on_any(pe_resource_t *rsc, GList *node_list);
+ gboolean overwrite,
+ pcmk_scheduler_t *scheduler);
+
+bool pe__resource_is_disabled(const pcmk_resource_t *rsc);
+void pe__clear_resource_history(pcmk_resource_t *rsc, const pcmk_node_t *node);
+
+GList *pe__rscs_with_tag(pcmk_scheduler_t *scheduler, const char *tag_name);
+GList *pe__unames_with_tag(pcmk_scheduler_t *scheduler, const char *tag_name);
+bool pe__rsc_has_tag(pcmk_scheduler_t *scheduler, const char *rsc,
+ const char *tag);
+bool pe__uname_has_tag(pcmk_scheduler_t *scheduler, const char *node,
+ const char *tag);
+
+bool pe__rsc_running_on_only(const pcmk_resource_t *rsc,
+ const pcmk_node_t *node);
+bool pe__rsc_running_on_any(pcmk_resource_t *rsc, GList *node_list);
GList *pe__filter_rsc_list(GList *rscs, GList *filter);
-GList * pe__build_node_name_list(pe_working_set_t *data_set, const char *s);
-GList * pe__build_rsc_list(pe_working_set_t *data_set, const char *s);
+GList * pe__build_node_name_list(pcmk_scheduler_t *scheduler, const char *s);
+GList * pe__build_rsc_list(pcmk_scheduler_t *scheduler, const char *s);
-bool pcmk__rsc_filtered_by_node(pe_resource_t *rsc, GList *only_node);
+bool pcmk__rsc_filtered_by_node(pcmk_resource_t *rsc, GList *only_node);
-gboolean pe__bundle_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
+gboolean pe__bundle_is_filtered(const pcmk_resource_t *rsc, GList *only_rsc,
gboolean check_parent);
-gboolean pe__clone_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
+gboolean pe__clone_is_filtered(const pcmk_resource_t *rsc, GList *only_rsc,
gboolean check_parent);
-gboolean pe__group_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
+gboolean pe__group_is_filtered(const pcmk_resource_t *rsc, GList *only_rsc,
gboolean check_parent);
-gboolean pe__native_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
+gboolean pe__native_is_filtered(const pcmk_resource_t *rsc, GList *only_rsc,
gboolean check_parent);
-xmlNode *pe__failed_probe_for_rsc(const pe_resource_t *rsc, const char *name);
+xmlNode *pe__failed_probe_for_rsc(const pcmk_resource_t *rsc, const char *name);
-const char *pe__clone_child_id(const pe_resource_t *rsc);
+const char *pe__clone_child_id(const pcmk_resource_t *rsc);
-int pe__sum_node_health_scores(const pe_node_t *node, int base_health);
-int pe__node_health(pe_node_t *node);
+int pe__sum_node_health_scores(const pcmk_node_t *node, int base_health);
+int pe__node_health(pcmk_node_t *node);
static inline enum pcmk__health_strategy
-pe__health_strategy(pe_working_set_t *data_set)
+pe__health_strategy(pcmk_scheduler_t *scheduler)
{
- return pcmk__parse_health_strategy(pe_pref(data_set->config_hash,
+ return pcmk__parse_health_strategy(pe_pref(scheduler->config_hash,
PCMK__OPT_NODE_HEALTH_STRATEGY));
}
static inline int
-pe__health_score(const char *option, pe_working_set_t *data_set)
+pe__health_score(const char *option, pcmk_scheduler_t *scheduler)
{
- return char2score(pe_pref(data_set->config_hash, option));
+ return char2score(pe_pref(scheduler->config_hash, option));
}
/*!
@@ -665,7 +642,7 @@ pe__health_score(const char *option, pe_working_set_t *data_set)
* if node has neither a name nor ID.
*/
static inline const char *
-pe__node_name(const pe_node_t *node)
+pe__node_name(const pcmk_node_t *node)
{
if (node == NULL) {
return "unspecified node";
@@ -691,7 +668,7 @@ pe__node_name(const pe_node_t *node)
* \return true if \p node1 and \p node2 refer to the same node
*/
static inline bool
-pe__same_node(const pe_node_t *node1, const pe_node_t *node2)
+pe__same_node(const pcmk_node_t *node1, const pcmk_node_t *node2)
{
return (node1 != NULL) && (node2 != NULL)
&& (node1->details == node2->details);
diff --git a/include/crm/pengine/pe_types.h b/include/crm/pengine/pe_types.h
index cc626c8..24355f8 100644
--- a/include/crm/pengine/pe_types.h
+++ b/include/crm/pengine/pe_types.h
@@ -16,6 +16,7 @@
# include <libxml/tree.h> // xmlNode
# include <glib.h> // gboolean, guint, GList, GHashTable
# include <crm/common/iso8601.h>
+# include <crm/common/scheduler.h>
# include <crm/pengine/common.h>
#ifdef __cplusplus
@@ -28,535 +29,6 @@ extern "C" {
* \ingroup pengine
*/
-typedef struct pe_node_s pe_node_t;
-typedef struct pe_action_s pe_action_t;
-typedef struct pe_resource_s pe_resource_t;
-typedef struct pe_working_set_s pe_working_set_t;
-
-enum pe_obj_types {
- pe_unknown = -1,
- pe_native = 0,
- pe_group = 1,
- pe_clone = 2,
- pe_container = 3,
-};
-
-typedef struct resource_object_functions_s {
- gboolean (*unpack) (pe_resource_t*, pe_working_set_t*);
- pe_resource_t *(*find_rsc) (pe_resource_t *parent, const char *search,
- const pe_node_t *node, int flags);
- /* parameter result must be free'd */
- char *(*parameter) (pe_resource_t*, pe_node_t*, gboolean, const char*,
- pe_working_set_t*);
- //! \deprecated will be removed in a future release
- void (*print) (pe_resource_t*, const char*, long, void*);
- gboolean (*active) (pe_resource_t*, gboolean);
- enum rsc_role_e (*state) (const pe_resource_t*, gboolean);
- pe_node_t *(*location) (const pe_resource_t*, GList**, int);
- void (*free) (pe_resource_t*);
- void (*count) (pe_resource_t*);
- gboolean (*is_filtered) (const pe_resource_t*, GList *, gboolean);
-
- /*!
- * \brief
- * \internal Find a node (and optionally count all) where resource is active
- *
- * \param[in] rsc Resource to check
- * \param[out] count_all If not NULL, set this to count of active nodes
- * \param[out] count_clean If not NULL, set this to count of clean nodes
- *
- * \return A node where the resource is active, preferring the source node
- * if the resource is involved in a partial migration or a clean,
- * online node if the resource's "requires" is "quorum" or
- * "nothing", or NULL if the resource is inactive.
- */
- pe_node_t *(*active_node)(const pe_resource_t *rsc, unsigned int *count_all,
- unsigned int *count_clean);
-} resource_object_functions_t;
-
-typedef struct resource_alloc_functions_s resource_alloc_functions_t;
-
-enum pe_quorum_policy {
- no_quorum_freeze,
- no_quorum_stop,
- no_quorum_ignore,
- no_quorum_suicide,
- no_quorum_demote
-};
-
-enum node_type {
- node_ping, //! \deprecated Do not use
- node_member,
- node_remote
-};
-
-//! \deprecated will be removed in a future release
-enum pe_restart {
- pe_restart_restart, //! \deprecated will be removed in a future release
- pe_restart_ignore //! \deprecated will be removed in a future release
-};
-
-//! Determine behavior of pe_find_resource_with_flags()
-enum pe_find {
- pe_find_renamed = 0x001, //!< match resource ID or LRM history ID
- pe_find_anon = 0x002, //!< match base name of anonymous clone instances
- pe_find_clone = 0x004, //!< match only clone instances
- pe_find_current = 0x008, //!< match resource active on specified node
- pe_find_inactive = 0x010, //!< match resource not running anywhere
- pe_find_any = 0x020, //!< match base name of any clone instance
-};
-
-// @TODO Make these an enum
-
-# define pe_flag_have_quorum 0x00000001ULL
-# define pe_flag_symmetric_cluster 0x00000002ULL
-# define pe_flag_maintenance_mode 0x00000008ULL
-
-# define pe_flag_stonith_enabled 0x00000010ULL
-# define pe_flag_have_stonith_resource 0x00000020ULL
-# define pe_flag_enable_unfencing 0x00000040ULL
-# define pe_flag_concurrent_fencing 0x00000080ULL
-
-# define pe_flag_stop_rsc_orphans 0x00000100ULL
-# define pe_flag_stop_action_orphans 0x00000200ULL
-# define pe_flag_stop_everything 0x00000400ULL
-
-# define pe_flag_start_failure_fatal 0x00001000ULL
-
-//! \deprecated
-# define pe_flag_remove_after_stop 0x00002000ULL
-
-# define pe_flag_startup_fencing 0x00004000ULL
-# define pe_flag_shutdown_lock 0x00008000ULL
-
-# define pe_flag_startup_probes 0x00010000ULL
-# define pe_flag_have_status 0x00020000ULL
-# define pe_flag_have_remote_nodes 0x00040000ULL
-
-# define pe_flag_quick_location 0x00100000ULL
-# define pe_flag_sanitized 0x00200000ULL
-
-//! \deprecated
-# define pe_flag_stdout 0x00400000ULL
-
-//! Don't count total, disabled and blocked resource instances
-# define pe_flag_no_counts 0x00800000ULL
-
-/*! Skip deprecated code that is kept solely for backward API compatibility.
- * (Internal code should always set this.)
- */
-# define pe_flag_no_compat 0x01000000ULL
-
-# define pe_flag_show_scores 0x02000000ULL
-# define pe_flag_show_utilization 0x04000000ULL
-
-/*!
- * When scheduling, only unpack the CIB (including constraints), calculate
- * as much cluster status as possible, and apply node health.
- */
-# define pe_flag_check_config 0x08000000ULL
-
-struct pe_working_set_s {
- xmlNode *input;
- crm_time_t *now;
-
- /* options extracted from the input */
- char *dc_uuid;
- pe_node_t *dc_node;
- const char *stonith_action;
- const char *placement_strategy;
-
- unsigned long long flags;
-
- int stonith_timeout;
- enum pe_quorum_policy no_quorum_policy;
-
- GHashTable *config_hash;
- GHashTable *tickets;
-
- // Actions for which there can be only one (e.g. fence nodeX)
- GHashTable *singletons;
-
- GList *nodes;
- GList *resources;
- GList *placement_constraints;
- GList *ordering_constraints;
- GList *colocation_constraints;
- GList *ticket_constraints;
-
- GList *actions;
- xmlNode *failed;
- xmlNode *op_defaults;
- xmlNode *rsc_defaults;
-
- /* stats */
- int num_synapse;
- int max_valid_nodes; //! Deprecated (will be removed in a future release)
- int order_id;
- int action_id;
-
- /* final output */
- xmlNode *graph;
-
- GHashTable *template_rsc_sets;
- const char *localhost;
- GHashTable *tags;
-
- int blocked_resources;
- int disabled_resources;
-
- GList *param_check; // History entries that need to be checked
- GList *stop_needed; // Containers that need stop actions
- time_t recheck_by; // Hint to controller to re-run scheduler by this time
- int ninstances; // Total number of resource instances
- guint shutdown_lock;// How long (seconds) to lock resources to shutdown node
- int priority_fencing_delay; // Priority fencing delay
-
- void *priv;
-};
-
-enum pe_check_parameters {
- /* Clear fail count if parameters changed for un-expired start or monitor
- * last_failure.
- */
- pe_check_last_failure,
-
- /* Clear fail count if parameters changed for start, monitor, promote, or
- * migrate_from actions for active resources.
- */
- pe_check_active,
-};
-
-struct pe_node_shared_s {
- const char *id;
- const char *uname;
- enum node_type type;
-
- /* @TODO convert these flags into a bitfield */
- gboolean online;
- gboolean standby;
- gboolean standby_onfail;
- gboolean pending;
- gboolean unclean;
- gboolean unseen;
- gboolean shutdown;
- gboolean expected_up;
- gboolean is_dc;
- gboolean maintenance;
- gboolean rsc_discovery_enabled;
- gboolean remote_requires_reset;
- gboolean remote_was_fenced;
- gboolean remote_maintenance; /* what the remote-rsc is thinking */
- gboolean unpacked;
-
- int num_resources;
- pe_resource_t *remote_rsc;
- GList *running_rsc; /* pe_resource_t* */
- GList *allocated_rsc; /* pe_resource_t* */
-
- GHashTable *attrs; /* char* => char* */
- GHashTable *utilization;
- GHashTable *digest_cache; //!< cache of calculated resource digests
- int priority; // calculated based on the priority of resources running on the node
- pe_working_set_t *data_set; //!< Cluster that this node is part of
-};
-
-struct pe_node_s {
- int weight;
- gboolean fixed; //!< \deprecated Will be removed in a future release
- int count;
- struct pe_node_shared_s *details;
- int rsc_discover_mode;
-};
-
-# define pe_rsc_orphan 0x00000001ULL
-# define pe_rsc_managed 0x00000002ULL
-# define pe_rsc_block 0x00000004ULL
-# define pe_rsc_orphan_container_filler 0x00000008ULL
-
-# define pe_rsc_notify 0x00000010ULL
-# define pe_rsc_unique 0x00000020ULL
-# define pe_rsc_fence_device 0x00000040ULL
-# define pe_rsc_promotable 0x00000080ULL
-
-# define pe_rsc_provisional 0x00000100ULL
-# define pe_rsc_allocating 0x00000200ULL
-# define pe_rsc_merging 0x00000400ULL
-# define pe_rsc_restarting 0x00000800ULL
-
-# define pe_rsc_stop 0x00001000ULL
-# define pe_rsc_reload 0x00002000ULL
-# define pe_rsc_allow_remote_remotes 0x00004000ULL
-# define pe_rsc_critical 0x00008000ULL
-
-# define pe_rsc_failed 0x00010000ULL
-# define pe_rsc_detect_loop 0x00020000ULL
-# define pe_rsc_runnable 0x00040000ULL
-# define pe_rsc_start_pending 0x00080000ULL
-
-//!< \deprecated Do not use
-# define pe_rsc_starting 0x00100000ULL
-
-//!< \deprecated Do not use
-# define pe_rsc_stopping 0x00200000ULL
-
-# define pe_rsc_stop_unexpected 0x00400000ULL
-# define pe_rsc_allow_migrate 0x00800000ULL
-
-# define pe_rsc_failure_ignored 0x01000000ULL
-# define pe_rsc_replica_container 0x02000000ULL
-# define pe_rsc_maintenance 0x04000000ULL
-# define pe_rsc_is_container 0x08000000ULL
-
-# define pe_rsc_needs_quorum 0x10000000ULL
-# define pe_rsc_needs_fencing 0x20000000ULL
-# define pe_rsc_needs_unfencing 0x40000000ULL
-
-/* *INDENT-OFF* */
-enum pe_action_flags {
- pe_action_pseudo = 0x00001,
- pe_action_runnable = 0x00002,
- pe_action_optional = 0x00004,
- pe_action_print_always = 0x00008,
-
- pe_action_have_node_attrs = 0x00010,
- pe_action_implied_by_stonith = 0x00040,
- pe_action_migrate_runnable = 0x00080,
-
- pe_action_dumped = 0x00100,
- pe_action_processed = 0x00200,
-#if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1)
- pe_action_clear = 0x00400, //! \deprecated Unused
-#endif
- pe_action_dangle = 0x00800,
-
- /* This action requires one or more of its dependencies to be runnable.
- * We use this to clear the runnable flag before checking dependencies.
- */
- pe_action_requires_any = 0x01000,
-
- pe_action_reschedule = 0x02000,
- pe_action_tracking = 0x04000,
- pe_action_dedup = 0x08000, //! Internal state tracking when creating graph
-
- pe_action_dc = 0x10000, //! Action may run on DC instead of target
-};
-/* *INDENT-ON* */
-
-struct pe_resource_s {
- char *id;
- char *clone_name;
- xmlNode *xml;
- xmlNode *orig_xml;
- xmlNode *ops_xml;
-
- pe_working_set_t *cluster;
- pe_resource_t *parent;
-
- enum pe_obj_types variant;
- void *variant_opaque;
- resource_object_functions_t *fns;
- resource_alloc_functions_t *cmds;
-
- enum rsc_recovery_type recovery_type;
-
- enum pe_restart restart_type; //!< \deprecated will be removed in future release
-
- int priority;
- int stickiness;
- int sort_index;
- int failure_timeout;
- int migration_threshold;
- guint remote_reconnect_ms;
- char *pending_task;
-
- unsigned long long flags;
-
- // @TODO merge these into flags
- gboolean is_remote_node;
- gboolean exclusive_discover;
-
- /* Pay special attention to whether you want to use rsc_cons_lhs and
- * rsc_cons directly, which include only colocations explicitly involving
- * this resource, or call libpacemaker's pcmk__with_this_colocations() and
- * pcmk__this_with_colocations() functions, which may return relevant
- * colocations involving the resource's ancestors as well.
- */
-
- //!@{
- //! This field should be treated as internal to Pacemaker
- GList *rsc_cons_lhs; // List of pcmk__colocation_t*
- GList *rsc_cons; // List of pcmk__colocation_t*
- GList *rsc_location; // List of pe__location_t*
- GList *actions; // List of pe_action_t*
- GList *rsc_tickets; // List of rsc_ticket*
- //!@}
-
- pe_node_t *allocated_to;
- pe_node_t *partial_migration_target;
- pe_node_t *partial_migration_source;
- GList *running_on; /* pe_node_t* */
- GHashTable *known_on; /* pe_node_t* */
- GHashTable *allowed_nodes; /* pe_node_t* */
-
- enum rsc_role_e role;
- enum rsc_role_e next_role;
-
- GHashTable *meta;
- GHashTable *parameters; //! \deprecated Use pe_rsc_params() instead
- GHashTable *utilization;
-
- GList *children; /* pe_resource_t* */
- GList *dangling_migrations; /* pe_node_t* */
-
- pe_resource_t *container;
- GList *fillers;
-
- // @COMPAT These should be made const at next API compatibility break
- pe_node_t *pending_node; // Node on which pending_task is happening
- pe_node_t *lock_node; // Resource is shutdown-locked to this node
-
- time_t lock_time; // When shutdown lock started
-
- /* Resource parameters may have node-attribute-based rules, which means the
- * values can vary by node. This table is a cache of parameter name/value
- * tables for each node (as needed). Use pe_rsc_params() to get the table
- * for a given node.
- */
- GHashTable *parameter_cache; // Key = node name, value = parameters table
-};
-
-struct pe_action_s {
- int id;
- int priority;
-
- pe_resource_t *rsc;
- pe_node_t *node;
- xmlNode *op_entry;
-
- char *task;
- char *uuid;
- char *cancel_task;
- char *reason;
-
- enum pe_action_flags flags;
- enum rsc_start_requirement needs;
- enum action_fail_response on_fail;
- enum rsc_role_e fail_role;
-
- GHashTable *meta;
- GHashTable *extra;
-
- /*
- * These two varables are associated with the constraint logic
- * that involves first having one or more actions runnable before
- * then allowing this action to execute.
- *
- * These varables are used with features such as 'clone-min' which
- * requires at minimum X number of cloned instances to be running
- * before an order dependency can run. Another option that uses
- * this is 'require-all=false' in ordering constrants. This option
- * says "only require one instance of a resource to start before
- * allowing dependencies to start" -- basically, require-all=false is
- * the same as clone-min=1.
- */
-
- /* current number of known runnable actions in the before list. */
- int runnable_before;
- /* the number of "before" runnable actions required for this action
- * to be considered runnable */
- int required_runnable_before;
-
- GList *actions_before; /* pe_action_wrapper_t* */
- GList *actions_after; /* pe_action_wrapper_t* */
-
- /* Some of the above fields could be moved to the details,
- * except for API backward compatibility.
- */
- void *action_details; // varies by type of action
-};
-
-typedef struct pe_ticket_s {
- char *id;
- gboolean granted;
- time_t last_granted;
- gboolean standby;
- GHashTable *state;
-} pe_ticket_t;
-
-typedef struct pe_tag_s {
- char *id;
- GList *refs;
-} pe_tag_t;
-
-//! Internal tracking for transition graph creation
-enum pe_link_state {
- pe_link_not_dumped, //! Internal tracking for transition graph creation
- pe_link_dumped, //! Internal tracking for transition graph creation
- pe_link_dup, //! \deprecated No longer used by Pacemaker
-};
-
-enum pe_discover_e {
- pe_discover_always = 0,
- pe_discover_never,
- pe_discover_exclusive,
-};
-
-/* *INDENT-OFF* */
-enum pe_ordering {
- pe_order_none = 0x0, /* deleted */
- pe_order_optional = 0x1, /* pure ordering, nothing implied */
- pe_order_apply_first_non_migratable = 0x2, /* Only apply this constraint's ordering if first is not migratable. */
-
- pe_order_implies_first = 0x10, /* If 'then' is required, ensure 'first' is too */
- pe_order_implies_then = 0x20, /* If 'first' is required, ensure 'then' is too */
- pe_order_promoted_implies_first = 0x40, /* If 'then' is required and then's rsc is promoted, ensure 'first' becomes required too */
-
- /* first requires then to be both runnable and migrate runnable. */
- pe_order_implies_first_migratable = 0x80,
-
- pe_order_runnable_left = 0x100, /* 'then' requires 'first' to be runnable */
-
- pe_order_pseudo_left = 0x200, /* 'then' can only be pseudo if 'first' is runnable */
- pe_order_implies_then_on_node = 0x400, /* If 'first' is required on 'nodeX',
- * ensure instances of 'then' on 'nodeX' are too.
- * Only really useful if 'then' is a clone and 'first' is not
- */
- pe_order_probe = 0x800, /* If 'first->rsc' is
- * - running but about to stop, ignore the constraint
- * - otherwise, behave as runnable_left
- */
-
- pe_order_restart = 0x1000, /* 'then' is runnable if 'first' is optional or runnable */
- pe_order_stonith_stop = 0x2000, //<! \deprecated Will be removed in future release
- pe_order_serialize_only = 0x4000, /* serialize */
- pe_order_same_node = 0x8000, /* applies only if 'first' and 'then' are on same node */
-
- pe_order_implies_first_printed = 0x10000, /* Like ..implies_first but only ensures 'first' is printed, not mandatory */
- pe_order_implies_then_printed = 0x20000, /* Like ..implies_then but only ensures 'then' is printed, not mandatory */
-
- pe_order_asymmetrical = 0x100000, /* Indicates asymmetrical one way ordering constraint. */
- pe_order_load = 0x200000, /* Only relevant if... */
- pe_order_one_or_more = 0x400000, /* 'then' is runnable only if one or more of its dependencies are too */
- pe_order_anti_colocation = 0x800000,
-
- pe_order_preserve = 0x1000000, /* Hack for breaking user ordering constraints with container resources */
- pe_order_then_cancels_first = 0x2000000, // if 'then' becomes required, 'first' becomes optional
- pe_order_trace = 0x4000000, /* test marker */
-
-#if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1)
- // \deprecated Use pe_order_promoted_implies_first instead
- pe_order_implies_first_master = pe_order_promoted_implies_first,
-#endif
-};
-/* *INDENT-ON* */
-
-typedef struct pe_action_wrapper_s {
- enum pe_ordering type;
- enum pe_link_state state;
- pe_action_t *action;
-} pe_action_wrapper_t;
-
#if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1)
#include <crm/pengine/pe_types_compat.h>
#endif
diff --git a/include/crm/pengine/pe_types_compat.h b/include/crm/pengine/pe_types_compat.h
index 6f174c4..1becd12 100644
--- a/include/crm/pengine/pe_types_compat.h
+++ b/include/crm/pengine/pe_types_compat.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2004-2022 the Pacemaker project contributors
+ * Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -10,7 +10,7 @@
#ifndef PCMK__CRM_PENGINE_PE_TYPES_COMPAT__H
# define PCMK__CRM_PENGINE_PE_TYPES_COMPAT__H
-#include <crm/pengine/pe_types.h>
+#include <crm/common/scheduler.h>
#ifdef __cplusplus
extern "C" {
@@ -25,6 +25,174 @@ extern "C" {
* release.
*/
+//! \deprecated Use pcmk_rsc_removed instead
+#define pe_rsc_orphan pcmk_rsc_removed
+
+//! \deprecated Use pcmk_rsc_managed instead
+#define pe_rsc_managed pcmk_rsc_managed
+
+//! \deprecated Use pcmk_rsc_blocked instead
+#define pe_rsc_block pcmk_rsc_blocked
+
+//! \deprecated Use pcmk_rsc_removed_filler instead
+#define pe_rsc_orphan_container_filler pcmk_rsc_removed_filler
+
+//! \deprecated Use pcmk_rsc_notify instead
+#define pe_rsc_notify pcmk_rsc_notify
+
+//! \deprecated Use pcmk_rsc_unique instead
+#define pe_rsc_unique pcmk_rsc_unique
+
+//! \deprecated Use pcmk_rsc_fence_device instead
+#define pe_rsc_fence_device pcmk_rsc_fence_device
+
+//! \deprecated Use pcmk_rsc_promotable instead
+#define pe_rsc_promotable pcmk_rsc_promotable
+
+//! \deprecated Use pcmk_rsc_unassigned instead
+#define pe_rsc_provisional pcmk_rsc_unassigned
+
+//! \deprecated Use pcmk_rsc_assigning instead
+#define pe_rsc_allocating pcmk_rsc_assigning
+
+//! \deprecated Use pcmk_rsc_updating_nodes instead
+#define pe_rsc_merging pcmk_rsc_updating_nodes
+
+//! \deprecated Use pcmk_rsc_restarting instead
+#define pe_rsc_restarting pcmk_rsc_restarting
+
+//! \deprecated Use pcmk_rsc_stop_if_failed instead
+#define pe_rsc_stop pcmk_rsc_stop_if_failed
+
+//! \deprecated Use pcmk_rsc_reload instead
+#define pe_rsc_reload pcmk_rsc_reload
+
+//! \deprecated Use pcmk_rsc_remote_nesting_allowed instead
+#define pe_rsc_allow_remote_remotes pcmk_rsc_remote_nesting_allowed
+
+//! \deprecated Use pcmk_rsc_critical instead
+#define pe_rsc_critical pcmk_rsc_critical
+
+//! \deprecated Use pcmk_rsc_failed instead
+#define pe_rsc_failed pcmk_rsc_failed
+
+//! \deprecated Use pcmk_rsc_detect_loop instead
+#define pe_rsc_detect_loop pcmk_rsc_detect_loop
+
+//! \deprecated Do not use
+#define pe_rsc_runnable pcmk_rsc_runnable
+
+//! \deprecated Use pcmk_rsc_start_pending instead
+#define pe_rsc_start_pending pcmk_rsc_start_pending
+
+//!< \deprecated Do not use
+#define pe_rsc_starting pcmk_rsc_starting
+
+//!< \deprecated Do not use
+#define pe_rsc_stopping pcmk_rsc_stopping
+
+//! \deprecated Use pcmk_rsc_stop_unexpected instead
+#define pe_rsc_stop_unexpected pcmk_rsc_stop_unexpected
+
+//! \deprecated Use pcmk_rsc_migratable instead
+#define pe_rsc_allow_migrate pcmk_rsc_migratable
+
+//! \deprecated Use pcmk_rsc_ignore_failure instead
+#define pe_rsc_failure_ignored pcmk_rsc_ignore_failure
+
+//! \deprecated Use pcmk_rsc_replica_container instead
+#define pe_rsc_replica_container pcmk_rsc_replica_container
+
+//! \deprecated Use pcmk_rsc_maintenance instead
+#define pe_rsc_maintenance pcmk_rsc_maintenance
+
+//! \deprecated Do not use
+#define pe_rsc_is_container pcmk_rsc_has_filler
+
+//! \deprecated Use pcmk_rsc_needs_quorum instead
+#define pe_rsc_needs_quorum pcmk_rsc_needs_quorum
+
+//! \deprecated Use pcmk_rsc_needs_fencing instead
+#define pe_rsc_needs_fencing pcmk_rsc_needs_fencing
+
+//! \deprecated Use pcmk_rsc_needs_unfencing instead
+#define pe_rsc_needs_unfencing pcmk_rsc_needs_unfencing
+
+//! \deprecated Use pcmk_sched_quorate instead
+#define pe_flag_have_quorum pcmk_sched_quorate
+
+//! \deprecated Use pcmk_sched_symmetric_cluster instead
+#define pe_flag_symmetric_cluster pcmk_sched_symmetric_cluster
+
+//! \deprecated Use pcmk_sched_in_maintenance instead
+#define pe_flag_maintenance_mode pcmk_sched_in_maintenance
+
+//! \deprecated Use pcmk_sched_fencing_enabled instead
+#define pe_flag_stonith_enabled pcmk_sched_fencing_enabled
+
+//! \deprecated Use pcmk_sched_have_fencing instead
+#define pe_flag_have_stonith_resource pcmk_sched_have_fencing
+
+//! \deprecated Use pcmk_sched_enable_unfencing instead
+#define pe_flag_enable_unfencing pcmk_sched_enable_unfencing
+
+//! \deprecated Use pcmk_sched_concurrent_fencing instead
+#define pe_flag_concurrent_fencing pcmk_sched_concurrent_fencing
+
+//! \deprecated Use pcmk_sched_stop_removed_resources instead
+#define pe_flag_stop_rsc_orphans pcmk_sched_stop_removed_resources
+
+//! \deprecated Use pcmk_sched_cancel_removed_actions instead
+#define pe_flag_stop_action_orphans pcmk_sched_cancel_removed_actions
+
+//! \deprecated Use pcmk_sched_stop_all instead
+#define pe_flag_stop_everything pcmk_sched_stop_all
+
+//! \deprecated Use pcmk_sched_start_failure_fatal instead
+#define pe_flag_start_failure_fatal pcmk_sched_start_failure_fatal
+
+//! \deprecated Do not use
+#define pe_flag_remove_after_stop pcmk_sched_remove_after_stop
+
+//! \deprecated Use pcmk_sched_startup_fencing instead
+#define pe_flag_startup_fencing pcmk_sched_startup_fencing
+
+//! \deprecated Use pcmk_sched_shutdown_lock instead
+#define pe_flag_shutdown_lock pcmk_sched_shutdown_lock
+
+//! \deprecated Use pcmk_sched_probe_resources instead
+#define pe_flag_startup_probes pcmk_sched_probe_resources
+
+//! \deprecated Use pcmk_sched_have_status instead
+#define pe_flag_have_status pcmk_sched_have_status
+
+//! \deprecated Use pcmk_sched_have_remote_nodes instead
+#define pe_flag_have_remote_nodes pcmk_sched_have_remote_nodes
+
+//! \deprecated Use pcmk_sched_location_only instead
+#define pe_flag_quick_location pcmk_sched_location_only
+
+//! \deprecated Use pcmk_sched_sanitized instead
+#define pe_flag_sanitized pcmk_sched_sanitized
+
+//! \deprecated Do not use
+#define pe_flag_stdout (1ULL << 22)
+
+//! \deprecated Use pcmk_sched_no_counts instead
+#define pe_flag_no_counts pcmk_sched_no_counts
+
+//! \deprecated Use pcmk_sched_no_compat instead
+#define pe_flag_no_compat pcmk_sched_no_compat
+
+//! \deprecated Use pcmk_sched_output_scores instead
+#define pe_flag_show_scores pcmk_sched_output_scores
+
+//! \deprecated Use pcmk_sched_show_utilization instead
+#define pe_flag_show_utilization pcmk_sched_show_utilization
+
+//! \deprecated Use pcmk_sched_validate_only instead
+#define pe_flag_check_config pcmk_sched_validate_only
+
//!@{
//! \deprecated Do not use (unused by Pacemaker)
enum pe_graph_flags {
@@ -35,27 +203,62 @@ enum pe_graph_flags {
};
//!@}
-//!< \deprecated Use pe_action_t instead
+//!@{
+//! \deprecated Do not use
+enum pe_check_parameters {
+ pe_check_last_failure,
+ pe_check_active,
+};
+//!@}
+
+//! \deprecated Use pcmk_action_t instead
typedef struct pe_action_s action_t;
-//!< \deprecated Use pe_action_wrapper_t instead
+//! \deprecated Use pcmk_action_t instead
+typedef struct pe_action_s pe_action_t;
+
+//! \deprecated Do not use
typedef struct pe_action_wrapper_s action_wrapper_t;
-//!< \deprecated Use pe_node_t instead
+//! \deprecated Do not use
+typedef struct pe_action_wrapper_s pe_action_wrapper_t;
+
+//! \deprecated Use pcmk_node_t instead
typedef struct pe_node_s node_t;
-//!< \deprecated Use enum pe_quorum_policy instead
+//! \deprecated Use pcmk_node_t instead
+typedef struct pe_node_s pe_node_t;
+
+//! \deprecated Use enum pe_quorum_policy instead
typedef enum pe_quorum_policy no_quorum_policy_t;
-//!< \deprecated use pe_resource_t instead
+//! \deprecated use pcmk_resource_t instead
typedef struct pe_resource_s resource_t;
-//!< \deprecated Use pe_tag_t instead
+//! \deprecated use pcmk_resource_t instead
+typedef struct pe_resource_s pe_resource_t;
+
+//! \deprecated Use pcmk_tag_t instead
typedef struct pe_tag_s tag_t;
-//!< \deprecated Use pe_ticket_t instead
+//! \deprecated Use pcmk_tag_t instead
+typedef struct pe_tag_s pe_tag_t;
+
+//! \deprecated Use pcmk_ticket_t instead
typedef struct pe_ticket_s ticket_t;
+//! \deprecated Use pcmk_ticket_t instead
+typedef struct pe_ticket_s pe_ticket_t;
+
+//! \deprecated Use pcmk_scheduler_t instead
+typedef struct pe_working_set_s pe_working_set_t;
+
+//! \deprecated This type should be treated as internal to Pacemaker
+typedef struct resource_alloc_functions_s resource_alloc_functions_t;
+
+//! \deprecated Use pcmk_rsc_methods_t instead
+typedef struct resource_object_functions_s resource_object_functions_t;
+
#ifdef __cplusplus
}
#endif
diff --git a/include/crm/pengine/remote_internal.h b/include/crm/pengine/remote_internal.h
index 46d58fc..0e7c044 100644
--- a/include/crm/pengine/remote_internal.h
+++ b/include/crm/pengine/remote_internal.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2013-2019 the Pacemaker project contributors
+ * Copyright 2013-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -19,16 +19,17 @@ extern "C" {
#include <crm/pengine/status.h>
bool xml_contains_remote_node(xmlNode *xml);
-bool pe__is_remote_node(const pe_node_t *node);
-bool pe__is_guest_node(const pe_node_t *node);
-bool pe__is_guest_or_remote_node(const pe_node_t *node);
-bool pe__is_bundle_node(const pe_node_t *node);
-bool pe__resource_is_remote_conn(const pe_resource_t *rsc,
- const pe_working_set_t *data_set);
-pe_resource_t *pe__resource_contains_guest_node(const pe_working_set_t *data_set,
- const pe_resource_t *rsc);
-void pe_foreach_guest_node(const pe_working_set_t *data_set, const pe_node_t *host,
- void (*helper)(const pe_node_t*, void*), void *user_data);
+bool pe__is_remote_node(const pcmk_node_t *node);
+bool pe__is_guest_node(const pcmk_node_t *node);
+bool pe__is_guest_or_remote_node(const pcmk_node_t *node);
+bool pe__is_bundle_node(const pcmk_node_t *node);
+bool pe__resource_is_remote_conn(const pcmk_resource_t *rsc);
+pcmk_resource_t *pe__resource_contains_guest_node(const pcmk_scheduler_t *scheduler,
+ const pcmk_resource_t *rsc);
+void pe_foreach_guest_node(const pcmk_scheduler_t *scheduler,
+ const pcmk_node_t *host,
+ void (*helper)(const pcmk_node_t*, void*),
+ void *user_data);
xmlNode *pe_create_remote_xml(xmlNode *parent, const char *uname,
const char *container_id, const char *migrateable,
const char *is_managed, const char *start_timeout,
diff --git a/include/crm/pengine/status.h b/include/crm/pengine/status.h
index 145a166..9c85425 100644
--- a/include/crm/pengine/status.h
+++ b/include/crm/pengine/status.h
@@ -15,7 +15,7 @@
# include <crm/common/util.h> // pcmk_is_set()
# include <crm/common/iso8601.h>
# include <crm/pengine/common.h>
-# include <crm/pengine/pe_types.h> // pe_node_t, pe_resource_t, etc.
+# include <crm/pengine/pe_types.h> // pcmk_node_t, pcmk_resource_t, etc.
# include <crm/pengine/complex.h>
#ifdef __cplusplus
@@ -28,24 +28,25 @@ extern "C" {
* \ingroup pengine
*/
-const char *rsc_printable_id(const pe_resource_t *rsc);
-gboolean cluster_status(pe_working_set_t * data_set);
-pe_working_set_t *pe_new_working_set(void);
-void pe_free_working_set(pe_working_set_t *data_set);
-void set_working_set_defaults(pe_working_set_t * data_set);
-void cleanup_calculations(pe_working_set_t * data_set);
-void pe_reset_working_set(pe_working_set_t *data_set);
-pe_resource_t *pe_find_resource(GList *rsc_list, const char *id_rh);
-pe_resource_t *pe_find_resource_with_flags(GList *rsc_list, const char *id, enum pe_find flags);
-pe_node_t *pe_find_node(const GList *node_list, const char *node_name);
-pe_node_t *pe_find_node_id(const GList *node_list, const char *id);
-pe_node_t *pe_find_node_any(const GList *node_list, const char *id,
+const char *rsc_printable_id(const pcmk_resource_t *rsc);
+gboolean cluster_status(pcmk_scheduler_t *scheduler);
+pcmk_scheduler_t *pe_new_working_set(void);
+void pe_free_working_set(pcmk_scheduler_t *scheduler);
+void set_working_set_defaults(pcmk_scheduler_t *scheduler);
+void cleanup_calculations(pcmk_scheduler_t *scheduler);
+void pe_reset_working_set(pcmk_scheduler_t *scheduler);
+pcmk_resource_t *pe_find_resource(GList *rsc_list, const char *id_rh);
+pcmk_resource_t *pe_find_resource_with_flags(GList *rsc_list, const char *id,
+ enum pe_find flags);
+pcmk_node_t *pe_find_node(const GList *node_list, const char *node_name);
+pcmk_node_t *pe_find_node_id(const GList *node_list, const char *id);
+pcmk_node_t *pe_find_node_any(const GList *node_list, const char *id,
const char *node_name);
GList *find_operations(const char *rsc, const char *node, gboolean active_filter,
- pe_working_set_t * data_set);
+ pcmk_scheduler_t *scheduler);
void calculate_active_ops(const GList *sorted_op_list, int *start_index,
int *stop_index);
-int pe_bundle_replicas(const pe_resource_t *rsc);
+int pe_bundle_replicas(const pcmk_resource_t *rsc);
/*!
* \brief Check whether a resource is any clone type
@@ -55,9 +56,9 @@ int pe_bundle_replicas(const pe_resource_t *rsc);
* \return true if resource is clone, false otherwise
*/
static inline bool
-pe_rsc_is_clone(const pe_resource_t *rsc)
+pe_rsc_is_clone(const pcmk_resource_t *rsc)
{
- return rsc && (rsc->variant == pe_clone);
+ return (rsc != NULL) && (rsc->variant == pcmk_rsc_variant_clone);
}
/*!
@@ -68,9 +69,9 @@ pe_rsc_is_clone(const pe_resource_t *rsc)
* \return true if resource is unique clone, false otherwise
*/
static inline bool
-pe_rsc_is_unique_clone(const pe_resource_t *rsc)
+pe_rsc_is_unique_clone(const pcmk_resource_t *rsc)
{
- return pe_rsc_is_clone(rsc) && pcmk_is_set(rsc->flags, pe_rsc_unique);
+ return pe_rsc_is_clone(rsc) && pcmk_is_set(rsc->flags, pcmk_rsc_unique);
}
/*!
@@ -81,9 +82,9 @@ pe_rsc_is_unique_clone(const pe_resource_t *rsc)
* \return true if resource is anonymous clone, false otherwise
*/
static inline bool
-pe_rsc_is_anon_clone(const pe_resource_t *rsc)
+pe_rsc_is_anon_clone(const pcmk_resource_t *rsc)
{
- return pe_rsc_is_clone(rsc) && !pcmk_is_set(rsc->flags, pe_rsc_unique);
+ return pe_rsc_is_clone(rsc) && !pcmk_is_set(rsc->flags, pcmk_rsc_unique);
}
/*!
@@ -94,7 +95,7 @@ pe_rsc_is_anon_clone(const pe_resource_t *rsc)
* \return true if resource is part of a bundle, false otherwise
*/
static inline bool
-pe_rsc_is_bundled(const pe_resource_t *rsc)
+pe_rsc_is_bundled(const pcmk_resource_t *rsc)
{
if (rsc == NULL) {
return false;
@@ -102,7 +103,7 @@ pe_rsc_is_bundled(const pe_resource_t *rsc)
while (rsc->parent != NULL) {
rsc = rsc->parent;
}
- return rsc->variant == pe_container;
+ return rsc->variant == pcmk_rsc_variant_bundle;
}
#ifdef __cplusplus
diff --git a/include/crm/services_compat.h b/include/crm/services_compat.h
index 97310f4..456d351 100644
--- a/include/crm/services_compat.h
+++ b/include/crm/services_compat.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2010-2022 the Pacemaker project contributors
+ * Copyright 2010-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -10,7 +10,7 @@
#ifndef PCMK__CRM_SERVICES_COMPAT__H
# define PCMK__CRM_SERVICES_COMPAT__H
-
+#include <crm/common/actions.h>
#include <crm/common/results.h>
#include <crm/services.h>
#include <glib.h>
@@ -68,7 +68,8 @@ static inline enum ocf_exitcode
services_get_ocf_exitcode(const char *action, int lsb_exitcode)
{
/* For non-status actions, LSB and OCF share error code meaning <= 7 */
- if (action && strcmp(action, "status") && strcmp(action, "monitor")) {
+ if ((action != NULL) && (strcmp(action, PCMK_ACTION_STATUS) != 0)
+ && (strcmp(action, PCMK_ACTION_MONITOR) != 0)) {
if ((lsb_exitcode < 0) || (lsb_exitcode > PCMK_LSB_NOT_RUNNING)) {
return PCMK_OCF_UNKNOWN_ERROR;
}
diff --git a/include/crm_internal.h b/include/crm_internal.h
index 5f6531f..71a0f7e 100644
--- a/include/crm_internal.h
+++ b/include/crm_internal.h
@@ -81,9 +81,14 @@
#define PCMK__XA_CONFIG_ERRORS "config-errors"
#define PCMK__XA_CONFIG_WARNINGS "config-warnings"
#define PCMK__XA_CONFIRM "confirm"
+#define PCMK__XA_CRMD "crmd"
+#define PCMK__XA_EXPECTED "expected"
#define PCMK__XA_GRAPH_ERRORS "graph-errors"
#define PCMK__XA_GRAPH_WARNINGS "graph-warnings"
+#define PCMK__XA_IN_CCM "in_ccm"
+#define PCMK__XA_JOIN "join"
#define PCMK__XA_MODE "mode"
+#define PCMK__XA_NODE_START_STATE "node_start_state"
#define PCMK__XA_TASK "task"
#define PCMK__XA_UPTIME "uptime"
#define PCMK__XA_CONN_HOST "connection_host"
diff --git a/include/pacemaker-internal.h b/include/pacemaker-internal.h
index 8610d1e..9e6ff21 100644
--- a/include/pacemaker-internal.h
+++ b/include/pacemaker-internal.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2019-2022 the Pacemaker project contributors
+ * Copyright 2019-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -11,14 +11,13 @@
# define PACEMAKER_INTERNAL__H
# include <pcmki/pcmki_acl.h>
+# include <pcmki/pcmki_agents.h>
# include <pcmki/pcmki_cluster_queries.h>
# include <pcmki/pcmki_fence.h>
# include <pcmki/pcmki_output.h>
# include <pcmki/pcmki_resource.h>
# include <pcmki/pcmki_result_code.h>
# include <pcmki/pcmki_rule.h>
-# include <pcmki/pcmki_sched_allocate.h>
-# include <pcmki/pcmki_sched_utils.h>
# include <pcmki/pcmki_scheduler.h>
# include <pcmki/pcmki_simulate.h>
# include <pcmki/pcmki_status.h>
diff --git a/include/pacemaker.h b/include/pacemaker.h
index f5c375a..ffa99ff 100644
--- a/include/pacemaker.h
+++ b/include/pacemaker.h
@@ -12,8 +12,8 @@
# include <glib.h>
# include <libxml/tree.h>
+# include <crm/common/scheduler.h>
# include <crm/cib/cib_types.h>
-# include <crm/pengine/pe_types.h>
# include <crm/stonith-ng.h>
@@ -202,13 +202,13 @@ int pcmk_pacemakerd_status(xmlNodePtr *xml, const char *ipc_name,
* \param[in,out] rsc Resource to calculate digests for
* \param[in] node Node whose operation history should be used
* \param[in] overrides Hash table of configuration parameters to override
- * \param[in] data_set Cluster working set (with status)
+ * \param[in] scheduler Scheduler data (with status)
*
* \return Standard Pacemaker return code
*/
-int pcmk_resource_digests(xmlNodePtr *xml, pe_resource_t *rsc,
- const pe_node_t *node, GHashTable *overrides,
- pe_working_set_t *data_set);
+int pcmk_resource_digests(xmlNodePtr *xml, pcmk_resource_t *rsc,
+ const pcmk_node_t *node, GHashTable *overrides,
+ pcmk_scheduler_t *scheduler);
/*!
* \brief Simulate a cluster's response to events
@@ -219,7 +219,7 @@ int pcmk_resource_digests(xmlNodePtr *xml, pe_resource_t *rsc,
* simulation. Output can be modified with various flags.
*
* \param[in,out] xml The destination for the result, as an XML tree
- * \param[in,out] data_set Working set for the cluster
+ * \param[in,out] scheduler Scheduler data
* \param[in] injections A structure containing cluster events
* (node up/down, tickets, injected operations)
* \param[in] flags A bitfield of :pcmk_sim_flags to modify
@@ -238,7 +238,7 @@ int pcmk_resource_digests(xmlNodePtr *xml, pe_resource_t *rsc,
*
* \return Standard Pacemaker return code
*/
-int pcmk_simulate(xmlNodePtr *xml, pe_working_set_t *data_set,
+int pcmk_simulate(xmlNodePtr *xml, pcmk_scheduler_t *scheduler,
const pcmk_injections_t *injections, unsigned int flags,
unsigned int section_opts, const char *use_date,
const char *input_file, const char *graph_file,
@@ -337,6 +337,45 @@ int pcmk_show_result_code(xmlNodePtr *xml, int code, enum pcmk_result_type type,
int pcmk_list_result_codes(xmlNodePtr *xml, enum pcmk_result_type type,
uint32_t flags);
+/*!
+ * \brief List available providers for the given OCF agent
+ *
+ * \param[in,out] xml The destination for the result, as an XML tree
+ * \param[in] agent_spec Resource agent name
+ *
+ * \return Standard Pacemaker return code
+ */
+int pcmk_list_alternatives(xmlNodePtr *xml, const char *agent_spec);
+
+/*!
+ * \brief List all agents available for the named standard and/or provider
+ *
+ * \param[in,out] xml The destination for the result, as an XML tree
+ * \param[in] agent_spec STD[:PROV]
+ *
+ * \return Standard Pacemaker return code
+ */
+int pcmk_list_agents(xmlNodePtr *xml, char *agent_spec);
+
+/*!
+ * \brief List all available OCF providers for the given agent
+ *
+ * \param[in,out] xml The destination for the result, as an XML tree
+ * \param[in] agent_spec Resource agent name
+ *
+ * \return Standard Pacemaker return code
+ */
+int pcmk_list_providers(xmlNodePtr *xml, const char *agent_spec);
+
+/*!
+ * \brief List all available resource agent standards
+ *
+ * \param[in,out] xml The destination for the result, as an XML tree
+ *
+ * \return Standard Pacemaker return code
+ */
+int pcmk_list_standards(xmlNodePtr *xml);
+
#ifdef BUILD_PUBLIC_LIBPACEMAKER
/*!
diff --git a/include/pcmki/Makefile.am b/include/pcmki/Makefile.am
index b379fdb..b9475af 100644
--- a/include/pcmki/Makefile.am
+++ b/include/pcmki/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2019-2022 the Pacemaker project contributors
+# Copyright 2019-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -9,18 +9,6 @@
MAINTAINERCLEANFILES = Makefile.in
-noinst_HEADERS = pcmki_acl.h \
- pcmki_cluster_queries.h \
- pcmki_fence.h \
- pcmki_output.h \
- pcmki_resource.h \
- pcmki_result_code.h \
- pcmki_rule.h \
- pcmki_sched_allocate.h \
- pcmki_sched_utils.h \
- pcmki_scheduler.h \
- pcmki_simulate.h \
- pcmki_status.h \
- pcmki_transition.h
+noinst_HEADERS = $(wildcard *.h)
.PHONY: $(ARCHIVE_VERSION)
diff --git a/include/pcmki/pcmki_agents.h b/include/pcmki/pcmki_agents.h
new file mode 100644
index 0000000..eefe3e5
--- /dev/null
+++ b/include/pcmki/pcmki_agents.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright 2023 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU Lesser General Public License
+ * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
+ */
+#ifndef PCMK__PCMKI_PCMKI_AGENTS__H
+#define PCMK__PCMKI_PCMKI_AGENTS__H
+
+#include <crm/common/output_internal.h>
+
+int pcmk__list_alternatives(pcmk__output_t *out, const char *agent_spec);
+int pcmk__list_agents(pcmk__output_t *out, char *agent_spec);
+int pcmk__list_providers(pcmk__output_t *out, const char *agent_spec);
+int pcmk__list_standards(pcmk__output_t *out);
+
+#endif /* PCMK__PCMKI_PCMKI_AGENTS__H */
diff --git a/include/pcmki/pcmki_cluster_queries.h b/include/pcmki/pcmki_cluster_queries.h
index 776aa27..3fa4c23 100644
--- a/include/pcmki/pcmki_cluster_queries.h
+++ b/include/pcmki/pcmki_cluster_queries.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2020-2022 the Pacemaker project contributors
+ * Copyright 2020-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -10,7 +10,8 @@
#ifndef PCMK__PCMKI_PCMKI_CLUSTER_QUERIES__H
# define PCMK__PCMKI_PCMKI_CLUSTER_QUERIES__H
-#include <glib.h> // gboolean, GMainLoop, etc.
+#include <stdbool.h>
+#include <stdint.h>
#include <crm/crm.h>
#include <crm/common/output_internal.h>
@@ -19,7 +20,7 @@
// CIB queries
int pcmk__list_nodes(pcmk__output_t *out, const char *node_types,
- gboolean bash_export);
+ bool bash_export);
// Controller queries
int pcmk__controller_status(pcmk__output_t *out, const char *node_name,
diff --git a/include/pcmki/pcmki_resource.h b/include/pcmki/pcmki_resource.h
index dc8ac69..442bb1f 100644
--- a/include/pcmki/pcmki_resource.h
+++ b/include/pcmki/pcmki_resource.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2021-2022 the Pacemaker project contributors
+ * Copyright 2021-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -11,10 +11,10 @@
#include <glib.h>
+#include <crm/common/scheduler.h>
#include <crm/common/output_internal.h>
-#include <crm/pengine/pe_types.h>
-int pcmk__resource_digests(pcmk__output_t *out, pe_resource_t *rsc,
- const pe_node_t *node, GHashTable *overrides);
+int pcmk__resource_digests(pcmk__output_t *out, pcmk_resource_t *rsc,
+ const pcmk_node_t *node, GHashTable *overrides);
#endif /* PCMK__PCMKI_PCMKI_RESOURCE__H */
diff --git a/include/pcmki/pcmki_sched_allocate.h b/include/pcmki/pcmki_sched_allocate.h
deleted file mode 100644
index 32044ea..0000000
--- a/include/pcmki/pcmki_sched_allocate.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright 2004-2023 the Pacemaker project contributors
- *
- * The version control history for this file may have further details.
- *
- * This source code is licensed under the GNU Lesser General Public License
- * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
- */
-
-#ifndef PCMK__PCMKI_PCMKI_SCHED_ALLOCATE__H
-# define PCMK__PCMKI_PCMKI_SCHED_ALLOCATE__H
-
-# include <glib.h>
-# include <crm/common/xml.h>
-# include <crm/pengine/status.h>
-# include <crm/pengine/complex.h>
-# include <crm/common/xml_internal.h>
-# include <crm/pengine/internal.h>
-# include <crm/common/xml.h>
-# include <pcmki/pcmki_scheduler.h>
-
-pe_node_t *pcmk__bundle_allocate(pe_resource_t *rsc, const pe_node_t *prefer);
-void pcmk__bundle_create_actions(pe_resource_t *rsc);
-bool pcmk__bundle_create_probe(pe_resource_t *rsc, pe_node_t *node);
-void pcmk__bundle_internal_constraints(pe_resource_t *rsc);
-void pcmk__bundle_rsc_location(pe_resource_t *rsc, pe__location_t *constraint);
-enum pe_action_flags pcmk__bundle_action_flags(pe_action_t *action,
- const pe_node_t *node);
-void pcmk__bundle_expand(pe_resource_t *rsc);
-void pcmk__bundle_add_utilization(const pe_resource_t *rsc,
- const pe_resource_t *orig_rsc,
- GList *all_rscs, GHashTable *utilization);
-void pcmk__bundle_shutdown_lock(pe_resource_t *rsc);
-
-void clone_create_actions(pe_resource_t *rsc);
-void clone_internal_constraints(pe_resource_t *rsc);
-void clone_rsc_location(pe_resource_t *rsc, pe__location_t *constraint);
-enum pe_action_flags clone_action_flags(pe_action_t *action,
- const pe_node_t *node);
-void clone_expand(pe_resource_t *rsc);
-bool clone_create_probe(pe_resource_t *rsc, pe_node_t *node);
-void clone_append_meta(const pe_resource_t *rsc, xmlNode *xml);
-void pcmk__clone_add_utilization(const pe_resource_t *rsc,
- const pe_resource_t *orig_rsc,
- GList *all_rscs, GHashTable *utilization);
-void pcmk__clone_shutdown_lock(pe_resource_t *rsc);
-
-void pcmk__log_transition_summary(const char *filename);
-
-#endif
diff --git a/include/pcmki/pcmki_sched_utils.h b/include/pcmki/pcmki_sched_utils.h
deleted file mode 100644
index 3e6d52f..0000000
--- a/include/pcmki/pcmki_sched_utils.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright 2004-2023 the Pacemaker project contributors
- *
- * The version control history for this file may have further details.
- *
- * This source code is licensed under the GNU Lesser General Public License
- * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
- */
-
-#ifndef PCMK__PCMKI_PCMKI_SCHED_UTILS__H
-# define PCMK__PCMKI_PCMKI_SCHED_UTILS__H
-
-#include <stdbool.h> // bool
-#include <glib.h> // GList, GHashTable, gboolean, guint
-#include <crm/lrmd.h> // lrmd_event_data_t
-#include <crm/cib.h> // cib_t
-#include <crm/pengine/pe_types.h>
-#include <crm/common/xml_internal.h>
-#include <crm/pengine/internal.h>
-#include <pcmki/pcmki_scheduler.h>
-#include <pcmki/pcmki_transition.h>
-#include <pacemaker.h>
-
-/* Constraint helper functions */
-GList *pcmk__copy_node_list(const GList *list, bool reset);
-
-int copies_per_node(pe_resource_t * rsc);
-
-xmlNode *pcmk__create_history_xml(xmlNode *parent, lrmd_event_data_t *event,
- const char *caller_version, int target_rc,
- const char *node, const char *origin);
-
-#endif
diff --git a/include/pcmki/pcmki_scheduler.h b/include/pcmki/pcmki_scheduler.h
index dde50a5..9adb9a9 100644
--- a/include/pcmki/pcmki_scheduler.h
+++ b/include/pcmki/pcmki_scheduler.h
@@ -8,36 +8,37 @@
*/
#ifndef PCMK__PCMKI_PCMKI_SCHEDULER__H
-# define PCMK__PCMKI_PCMKI_SCHEDULER__H
+#define PCMK__PCMKI_PCMKI_SCHEDULER__H
-# include <glib.h>
-# include <crm/crm.h>
-# include <crm/common/iso8601.h>
-# include <crm/pengine/rules.h>
-# include <crm/pengine/common.h>
-# include <crm/pengine/status.h>
+#include <glib.h> // GList
+#include <stdbool.h> // bool
+#include <libxml/tree.h> // xmlNode
-# include <crm/pengine/complex.h>
+#include <crm/lrmd_events.h> // lrmd_event_data_t
+#include <crm/pengine/status.h> // pcmk_resource_t, pcmk_scheduler_t
typedef struct {
const char *id;
const char *node_attribute;
- pe_resource_t *dependent; // The resource being colocated
- pe_resource_t *primary; // The resource the dependent is colocated with
+ pcmk_resource_t *dependent; // The resource being colocated
+ pcmk_resource_t *primary; // The resource the dependent is colocated with
int dependent_role; // Colocation applies only if dependent has this role
int primary_role; // Colocation applies only if primary has this role
int score;
- bool influence; // Whether dependent influences active primary placement
+ uint32_t flags; // Group of enum pcmk__coloc_flags
} pcmk__colocation_t;
-void pcmk__unpack_constraints(pe_working_set_t *data_set);
+void pcmk__unpack_constraints(pcmk_scheduler_t *scheduler);
void pcmk__schedule_actions(xmlNode *cib, unsigned long long flags,
- pe_working_set_t *data_set);
+ pcmk_scheduler_t *scheduler);
-GList *pcmk__with_this_colocations(const pe_resource_t *rsc);
-GList *pcmk__this_with_colocations(const pe_resource_t *rsc);
+GList *pcmk__copy_node_list(const GList *list, bool reset);
+
+xmlNode *pcmk__create_history_xml(xmlNode *parent, lrmd_event_data_t *event,
+ const char *caller_version, int target_rc,
+ const char *node, const char *origin);
#endif
diff --git a/include/pcmki/pcmki_simulate.h b/include/pcmki/pcmki_simulate.h
index 0b09903..ab73411 100644
--- a/include/pcmki/pcmki_simulate.h
+++ b/include/pcmki/pcmki_simulate.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2021-2022 the Pacemaker project contributors
+ * Copyright 2021-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -11,7 +11,7 @@
# define PCMK__PCMKI_PCMKI_SIMULATE__H
#include <crm/common/output_internal.h>
-#include <crm/pengine/pe_types.h>
+#include <crm/common/scheduler.h>
#include <pcmki/pcmki_transition.h>
#include <crm/cib.h> // cib_t
#include <pacemaker.h>
@@ -24,28 +24,28 @@
* CIB file in a given directory, printing the profiling timings for
* each.
*
- * \note \p data_set->priv must have been set to a valid \p pcmk__output_t
+ * \note \p scheduler->priv must have been set to a valid \p pcmk__output_t
* object before this function is called.
*
- * \param[in] dir A directory full of CIB files to be profiled
- * \param[in] repeat Number of times to run on each input file
- * \param[in,out] data_set Working set for the cluster
- * \param[in] use_date The date to set the cluster's time to (may be NULL)
+ * \param[in] dir A directory full of CIB files to be profiled
+ * \param[in] repeat Number of times to run on each input file
+ * \param[in,out] scheduler Scheduler data
+ * \param[in] use_date The date to set the cluster's time to (may be NULL)
*/
-void pcmk__profile_dir(const char *dir, long long repeat, pe_working_set_t *data_set,
- const char *use_date);
+void pcmk__profile_dir(const char *dir, long long repeat,
+ pcmk_scheduler_t *scheduler, const char *use_date);
/*!
* \internal
* \brief Simulate executing a transition
*
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
* \param[in,out] cib CIB object for scheduler input
* \param[in] op_fail_list List of actions to simulate as failing
*
* \return Transition status after simulated execution
*/
-enum pcmk__graph_status pcmk__simulate_transition(pe_working_set_t *data_set,
+enum pcmk__graph_status pcmk__simulate_transition(pcmk_scheduler_t *scheduler,
cib_t *cib,
const GList *op_fail_list);
@@ -58,7 +58,7 @@ enum pcmk__graph_status pcmk__simulate_transition(pe_working_set_t *data_set,
* optionally writes out a variety of artifacts to show the results of the
* simulation. Output can be modified with various flags.
*
- * \param[in,out] data_set Working set for the cluster
+ * \param[in,out] scheduler Scheduler data
* \param[in,out] out The output functions structure
* \param[in] injections A structure containing cluster events
* (node up/down, tickets, injected operations)
@@ -80,7 +80,7 @@ enum pcmk__graph_status pcmk__simulate_transition(pe_working_set_t *data_set,
*
* \return Standard Pacemaker return code
*/
-int pcmk__simulate(pe_working_set_t *data_set, pcmk__output_t *out,
+int pcmk__simulate(pcmk_scheduler_t *scheduler, pcmk__output_t *out,
const pcmk_injections_t *injections, unsigned int flags,
uint32_t section_opts, const char *use_date,
const char *input_file, const char *graph_file,
diff --git a/include/pcmki/pcmki_status.h b/include/pcmki/pcmki_status.h
index 6b48069..01139bb 100644
--- a/include/pcmki/pcmki_status.h
+++ b/include/pcmki/pcmki_status.h
@@ -13,7 +13,7 @@
#include <stdint.h>
#include <crm/cib/cib_types.h>
-#include <crm/pengine/pe_types.h>
+#include <crm/common/scheduler.h>
#include <crm/common/ipc_pacemakerd.h>
#include <crm/common/output_internal.h>
#include <pcmki/pcmki_fence.h>
@@ -26,8 +26,8 @@ extern "C" {
* \internal
* \brief Print one-line status suitable for use with monitoring software
*
- * \param[in,out] out Output object
- * \param[in] data_set Cluster working set
+ * \param[in,out] out Output object
+ * \param[in] scheduler Scheduler data
*
* \return Standard Pacemaker return code
*
@@ -39,7 +39,7 @@ extern "C" {
* callers should be added.
*/
int pcmk__output_simple_status(pcmk__output_t *out,
- const pe_working_set_t *data_set);
+ const pcmk_scheduler_t *scheduler);
int pcmk__output_cluster_status(pcmk__output_t *out, stonith_t *stonith,
cib_t *cib, xmlNode *current_cib,
diff --git a/include/pcmki/pcmki_transition.h b/include/pcmki/pcmki_transition.h
index 5dc3101..93237ed 100644
--- a/include/pcmki/pcmki_transition.h
+++ b/include/pcmki/pcmki_transition.h
@@ -14,6 +14,7 @@
# include <crm/crm.h>
# include <crm/msg_xml.h>
# include <crm/common/xml.h>
+# include <crm/lrmd_events.h> // lrmd_event_data_t
#ifdef __cplusplus
extern "C" {
@@ -164,6 +165,7 @@ void pcmk__free_graph(pcmk__graph_t *graph);
const char *pcmk__graph_status2text(enum pcmk__graph_status state);
void pcmk__log_graph(unsigned int log_level, pcmk__graph_t *graph);
void pcmk__log_graph_action(int log_level, pcmk__graph_action_t *action);
+void pcmk__log_transition_summary(const char *filename);
lrmd_event_data_t *pcmk__event_from_graph_action(const xmlNode *resource,
const pcmk__graph_action_t *action,
int status, int rc,
diff --git a/include/portability.h b/include/portability.h
index 932642d..368f887 100644
--- a/include/portability.h
+++ b/include/portability.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2001-2021 the Pacemaker project contributors
+ * Copyright 2001-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -27,45 +27,6 @@
# endif
# endif
-/* Prototypes for libreplace functions */
-
-# ifndef HAVE_DAEMON
- /* We supply a replacement function, but need a prototype */
-int daemon(int nochdir, int noclose);
-# endif
-
-# ifndef HAVE_SETENV
- /* We supply a replacement function, but need a prototype */
-int setenv(const char *name, const char *value, int why);
-# endif
-
-# ifndef HAVE_STRERROR
- /* We supply a replacement function, but need a prototype */
-char *strerror(int errnum);
-# endif
-
-# ifndef HAVE_STRCHRNUL
- /* We supply a replacement function, but need a prototype */
-char *strchrnul(const char *s, int c_in);
-# endif
-
-# ifndef HAVE_ALPHASORT
-# include <dirent.h>
-int alphasort(const void *dirent1, const void *dirent2);
-# endif
-
-# ifndef HAVE_STRNLEN
-size_t strnlen(const char *s, size_t maxlen);
-# else
-# define USE_GNU
-# endif
-
-# ifndef HAVE_STRNDUP
-char *strndup(const char *str, size_t len);
-# else
-# define USE_GNU
-# endif
-
# if HAVE_DBUS
# ifndef HAVE_DBUSBASICVALUE
# include <stdint.h>
diff --git a/lib/Makefile.am b/lib/Makefile.am
index ed5bfa3..52cf974 100644
--- a/lib/Makefile.am
+++ b/lib/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2003-2021 the Pacemaker project contributors
+# Copyright 2003-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -8,7 +8,11 @@
#
MAINTAINERCLEANFILES = Makefile.in
-LIBS = cib lrmd service fencing cluster
+LIBS = cib \
+ lrmd \
+ service \
+ fencing \
+ cluster
pkgconfig_DATA = $(LIBS:%=pacemaker-%.pc) \
libpacemaker.pc \
@@ -18,4 +22,12 @@ pkgconfig_DATA = $(LIBS:%=pacemaker-%.pc) \
EXTRA_DIST = $(pkgconfig_DATA:%=%.in)
-SUBDIRS = gnu common pengine cib services fencing lrmd cluster pacemaker
+SUBDIRS = gnu \
+ common \
+ pengine \
+ cib \
+ services \
+ fencing \
+ lrmd \
+ cluster \
+ pacemaker
diff --git a/lib/cib/Makefile.am b/lib/cib/Makefile.am
index 721fca1..a74c4b1 100644
--- a/lib/cib/Makefile.am
+++ b/lib/cib/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2004-2018 the Pacemaker project contributors
+# Copyright 2004-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -11,18 +11,20 @@ include $(top_srcdir)/mk/common.mk
## libraries
lib_LTLIBRARIES = libcib.la
-## SOURCES
-libcib_la_SOURCES = cib_ops.c cib_utils.c cib_client.c cib_native.c cib_attrs.c
-libcib_la_SOURCES += cib_file.c cib_remote.c
+## Library sources (*must* use += format for bumplibs)
+libcib_la_SOURCES = cib_attrs.c
+libcib_la_SOURCES += cib_client.c
+libcib_la_SOURCES += cib_file.c
+libcib_la_SOURCES += cib_native.c
+libcib_la_SOURCES += cib_ops.c
+libcib_la_SOURCES += cib_remote.c
+libcib_la_SOURCES += cib_utils.c
-libcib_la_LDFLAGS = -version-info 31:0:4
+libcib_la_LDFLAGS = -version-info 32:0:5
libcib_la_CPPFLAGS = -I$(top_srcdir) $(AM_CPPFLAGS)
libcib_la_CFLAGS = $(CFLAGS_HARDENED_LIB)
libcib_la_LDFLAGS += $(LDFLAGS_HARDENED_LIB)
-libcib_la_LIBADD = $(top_builddir)/lib/pengine/libpe_rules.la \
- $(top_builddir)/lib/common/libcrmcommon.la
-
-clean-generic:
- rm -f *.log *.debug *.xml *~
+libcib_la_LIBADD = $(top_builddir)/lib/pengine/libpe_rules.la \
+ $(top_builddir)/lib/common/libcrmcommon.la
diff --git a/lib/cib/cib_attrs.c b/lib/cib/cib_attrs.c
index 5f3a722..11629b8 100644
--- a/lib/cib/cib_attrs.c
+++ b/lib/cib/cib_attrs.c
@@ -152,16 +152,15 @@ find_attr(cib_t *cib, const char *section, const char *node_uuid,
static int
handle_multiples(pcmk__output_t *out, xmlNode *search, const char *attr_name)
{
- if (xml_has_children(search)) {
+ if ((search != NULL) && (search->children != NULL)) {
xmlNode *child = NULL;
- out->info(out, "Multiple attributes match name=%s", attr_name);
+ out->info(out, "Multiple attributes match name=%s", attr_name);
for (child = pcmk__xml_first_child(search); child != NULL;
child = pcmk__xml_next(child)) {
out->info(out, " Value: %s \t(id=%s)",
crm_element_value(child, XML_NVPAIR_ATTR_VALUE), ID(child));
}
-
return ENOTUNIQ;
} else {
@@ -184,9 +183,9 @@ cib__update_node_attr(pcmk__output_t *out, cib_t *cib, int call_options, const c
char *local_attr_id = NULL;
char *local_set_name = NULL;
- CRM_CHECK(section != NULL, return EINVAL);
- CRM_CHECK(attr_value != NULL, return EINVAL);
- CRM_CHECK(attr_name != NULL || attr_id != NULL, return EINVAL);
+ CRM_CHECK((out != NULL) && (cib != NULL) && (section != NULL)
+ && ((attr_id != NULL) || (attr_name != NULL))
+ && (attr_value != NULL), return EINVAL);
rc = find_attr(cib, section, node_uuid, set_type, set_name, attr_id,
attr_name, user_name, &xml_search);
@@ -360,7 +359,7 @@ cib__get_node_attrs(pcmk__output_t *out, cib_t *cib, const char *section,
crm_trace("Query failed for attribute %s (section=%s node=%s set=%s): %s",
pcmk__s(attr_name, "with unspecified name"),
section, pcmk__s(set_name, "<null>"),
- pcmk__s(node_uuid, "<null>"), pcmk_strerror(rc));
+ pcmk__s(node_uuid, "<null>"), pcmk_rc_str(rc));
}
return rc;
@@ -487,7 +486,7 @@ read_attr_delegate(cib_t *cib, const char *section, const char *node_uuid,
attr_id, attr_name, user_name, &result);
if (rc == pcmk_rc_ok) {
- if (!xml_has_children(result)) {
+ if (result->children == NULL) {
pcmk__str_update(attr_value, crm_element_value(result, XML_NVPAIR_ATTR_VALUE));
} else {
rc = ENOTUNIQ;
@@ -677,9 +676,7 @@ query_node_uname(cib_t * the_cib, const char *uuid, char **uname)
}
xml_obj = fragment;
- CRM_CHECK(pcmk__str_eq(crm_element_name(xml_obj), XML_CIB_TAG_NODES, pcmk__str_casei),
- return -ENOMSG);
- CRM_ASSERT(xml_obj != NULL);
+ CRM_CHECK(pcmk__xe_is(xml_obj, XML_CIB_TAG_NODES), return -ENOMSG);
crm_log_xml_trace(xml_obj, "Result section");
rc = -ENXIO;
diff --git a/lib/cib/cib_client.c b/lib/cib/cib_client.c
index 2d179e0..32e1f83 100644
--- a/lib/cib/cib_client.c
+++ b/lib/cib/cib_client.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2004-2022 the Pacemaker project contributors
+ * Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -253,14 +253,15 @@ cib_client_noop(cib_t * cib, int call_options)
{
op_common(cib);
return cib_internal_op(cib, PCMK__CIB_REQUEST_NOOP, NULL, NULL, NULL, NULL,
- call_options, NULL);
+ call_options, cib->user);
}
static int
cib_client_ping(cib_t * cib, xmlNode ** output_data, int call_options)
{
op_common(cib);
- return cib_internal_op(cib, CRM_OP_PING, NULL, NULL, NULL, output_data, call_options, NULL);
+ return cib_internal_op(cib, CRM_OP_PING, NULL, NULL, NULL, output_data,
+ call_options, cib->user);
}
static int
@@ -275,7 +276,7 @@ cib_client_query_from(cib_t * cib, const char *host, const char *section,
{
op_common(cib);
return cib_internal_op(cib, PCMK__CIB_REQUEST_QUERY, host, section, NULL,
- output_data, call_options, NULL);
+ output_data, call_options, cib->user);
}
static int
@@ -283,7 +284,7 @@ is_primary(cib_t *cib)
{
op_common(cib);
return cib_internal_op(cib, PCMK__CIB_REQUEST_IS_PRIMARY, NULL, NULL, NULL,
- NULL, cib_scope_local|cib_sync_call, NULL);
+ NULL, cib_scope_local|cib_sync_call, cib->user);
}
static int
@@ -291,7 +292,7 @@ set_secondary(cib_t *cib, int call_options)
{
op_common(cib);
return cib_internal_op(cib, PCMK__CIB_REQUEST_SECONDARY, NULL, NULL, NULL,
- NULL, call_options, NULL);
+ NULL, call_options, cib->user);
}
static int
@@ -306,7 +307,7 @@ set_primary(cib_t *cib, int call_options)
op_common(cib);
crm_trace("Adding cib_scope_local to options");
return cib_internal_op(cib, PCMK__CIB_REQUEST_PRIMARY, NULL, NULL, NULL,
- NULL, call_options|cib_scope_local, NULL);
+ NULL, call_options|cib_scope_local, cib->user);
}
static int
@@ -314,7 +315,7 @@ cib_client_bump_epoch(cib_t * cib, int call_options)
{
op_common(cib);
return cib_internal_op(cib, PCMK__CIB_REQUEST_BUMP, NULL, NULL, NULL, NULL,
- call_options, NULL);
+ call_options, cib->user);
}
static int
@@ -322,7 +323,7 @@ cib_client_upgrade(cib_t * cib, int call_options)
{
op_common(cib);
return cib_internal_op(cib, PCMK__CIB_REQUEST_UPGRADE, NULL, NULL, NULL,
- NULL, call_options, NULL);
+ NULL, call_options, cib->user);
}
static int
@@ -336,7 +337,7 @@ cib_client_sync_from(cib_t * cib, const char *host, const char *section, int cal
{
op_common(cib);
return cib_internal_op(cib, PCMK__CIB_REQUEST_SYNC_TO_ALL, host, section,
- NULL, NULL, call_options, NULL);
+ NULL, NULL, call_options, cib->user);
}
static int
@@ -344,7 +345,7 @@ cib_client_create(cib_t * cib, const char *section, xmlNode * data, int call_opt
{
op_common(cib);
return cib_internal_op(cib, PCMK__CIB_REQUEST_CREATE, NULL, section, data,
- NULL, call_options, NULL);
+ NULL, call_options, cib->user);
}
static int
@@ -352,7 +353,7 @@ cib_client_modify(cib_t * cib, const char *section, xmlNode * data, int call_opt
{
op_common(cib);
return cib_internal_op(cib, PCMK__CIB_REQUEST_MODIFY, NULL, section, data,
- NULL, call_options, NULL);
+ NULL, call_options, cib->user);
}
static int
@@ -360,7 +361,7 @@ cib_client_replace(cib_t * cib, const char *section, xmlNode * data, int call_op
{
op_common(cib);
return cib_internal_op(cib, PCMK__CIB_REQUEST_REPLACE, NULL, section, data,
- NULL, call_options, NULL);
+ NULL, call_options, cib->user);
}
static int
@@ -368,7 +369,7 @@ cib_client_delete(cib_t * cib, const char *section, xmlNode * data, int call_opt
{
op_common(cib);
return cib_internal_op(cib, PCMK__CIB_REQUEST_DELETE, NULL, section, data,
- NULL, call_options, NULL);
+ NULL, call_options, cib->user);
}
static int
@@ -376,7 +377,7 @@ cib_client_delete_absolute(cib_t * cib, const char *section, xmlNode * data, int
{
op_common(cib);
return cib_internal_op(cib, PCMK__CIB_REQUEST_ABS_DELETE, NULL, section,
- data, NULL, call_options, NULL);
+ data, NULL, call_options, cib->user);
}
static int
@@ -384,7 +385,76 @@ cib_client_erase(cib_t * cib, xmlNode ** output_data, int call_options)
{
op_common(cib);
return cib_internal_op(cib, PCMK__CIB_REQUEST_ERASE, NULL, NULL, NULL,
- output_data, call_options, NULL);
+ output_data, call_options, cib->user);
+}
+
+static int
+cib_client_init_transaction(cib_t *cib)
+{
+ int rc = pcmk_rc_ok;
+
+ op_common(cib);
+
+ if (cib->transaction != NULL) {
+ // A client can have at most one transaction at a time
+ rc = pcmk_rc_already;
+ }
+
+ if (rc == pcmk_rc_ok) {
+ cib->transaction = create_xml_node(NULL, T_CIB_TRANSACTION);
+ if (cib->transaction == NULL) {
+ rc = ENOMEM;
+ }
+ }
+
+ if (rc != pcmk_rc_ok) {
+ const char *client_id = NULL;
+
+ cib->cmds->client_id(cib, NULL, &client_id);
+ crm_err("Failed to initialize CIB transaction for client %s: %s",
+ client_id, pcmk_rc_str(rc));
+ }
+ return pcmk_rc2legacy(rc);
+}
+
+static int
+cib_client_end_transaction(cib_t *cib, bool commit, int call_options)
+{
+ const char *client_id = NULL;
+ int rc = pcmk_ok;
+
+ op_common(cib);
+ cib->cmds->client_id(cib, NULL, &client_id);
+ client_id = pcmk__s(client_id, "(unidentified)");
+
+ if (commit) {
+ if (cib->transaction == NULL) {
+ rc = pcmk_rc_no_transaction;
+
+ crm_err("Failed to commit transaction for CIB client %s: %s",
+ client_id, pcmk_rc_str(rc));
+ return pcmk_rc2legacy(rc);
+ }
+ rc = cib_internal_op(cib, PCMK__CIB_REQUEST_COMMIT_TRANSACT, NULL, NULL,
+ cib->transaction, NULL, call_options, cib->user);
+
+ } else {
+ // Discard always succeeds
+ if (cib->transaction != NULL) {
+ crm_trace("Discarded transaction for CIB client %s", client_id);
+ } else {
+ crm_trace("No transaction found for CIB client %s", client_id);
+ }
+ }
+ free_xml(cib->transaction);
+ cib->transaction = NULL;
+ return rc;
+}
+
+static void
+cib_client_set_user(cib_t *cib, const char *user)
+{
+ pcmk__str_update(&(cib->user), user);
}
static void
@@ -622,13 +692,15 @@ cib_new_variant(void)
return NULL;
}
+ // Deprecated method
new_cib->cmds->set_op_callback = cib_client_set_op_callback;
+
new_cib->cmds->add_notify_callback = cib_client_add_notify_callback;
new_cib->cmds->del_notify_callback = cib_client_del_notify_callback;
new_cib->cmds->register_callback = cib_client_register_callback;
new_cib->cmds->register_callback_full = cib_client_register_callback_full;
- new_cib->cmds->noop = cib_client_noop;
+ new_cib->cmds->noop = cib_client_noop; // Deprecated method
new_cib->cmds->ping = cib_client_ping;
new_cib->cmds->query = cib_client_query;
new_cib->cmds->sync = cib_client_sync;
@@ -656,8 +728,14 @@ cib_new_variant(void)
new_cib->cmds->remove = cib_client_delete;
new_cib->cmds->erase = cib_client_erase;
+ // Deprecated method
new_cib->cmds->delete_absolute = cib_client_delete_absolute;
+ new_cib->cmds->init_transaction = cib_client_init_transaction;
+ new_cib->cmds->end_transaction = cib_client_end_transaction;
+
+ new_cib->cmds->set_user = cib_client_set_user;
+
return new_cib;
}
diff --git a/lib/cib/cib_file.c b/lib/cib/cib_file.c
index 7d05965..a279823 100644
--- a/lib/cib/cib_file.c
+++ b/lib/cib/cib_file.c
@@ -37,35 +37,100 @@
#define CIB_LIVE_NAME CIB_SERIES ".xml"
+// key: client ID (const char *) -> value: client (cib_t *)
+static GHashTable *client_table = NULL;
+
enum cib_file_flags {
cib_file_flag_dirty = (1 << 0),
cib_file_flag_live = (1 << 1),
};
typedef struct cib_file_opaque_s {
- uint32_t flags; // Group of enum cib_file_flags
+ char *id;
char *filename;
+ uint32_t flags; // Group of enum cib_file_flags
+ xmlNode *cib_xml;
} cib_file_opaque_t;
-struct cib_func_entry {
- const char *op;
- gboolean read_only;
- cib_op_t fn;
-};
+static int cib_file_process_commit_transaction(const char *op, int options,
+ const char *section,
+ xmlNode *req, xmlNode *input,
+ xmlNode *existing_cib,
+ xmlNode **result_cib,
+ xmlNode **answer);
-static struct cib_func_entry cib_file_ops[] = {
- { PCMK__CIB_REQUEST_QUERY, TRUE, cib_process_query },
- { PCMK__CIB_REQUEST_MODIFY, FALSE, cib_process_modify },
- { PCMK__CIB_REQUEST_APPLY_PATCH, FALSE, cib_process_diff },
- { PCMK__CIB_REQUEST_BUMP, FALSE, cib_process_bump },
- { PCMK__CIB_REQUEST_REPLACE, FALSE, cib_process_replace },
- { PCMK__CIB_REQUEST_CREATE, FALSE, cib_process_create },
- { PCMK__CIB_REQUEST_DELETE, FALSE, cib_process_delete },
- { PCMK__CIB_REQUEST_ERASE, FALSE, cib_process_erase },
- { PCMK__CIB_REQUEST_UPGRADE, FALSE, cib_process_upgrade },
-};
+/*!
+ * \internal
+ * \brief Add a CIB file client to client table
+ *
+ * \param[in] cib CIB client
+ */
+static void
+register_client(const cib_t *cib)
+{
+ cib_file_opaque_t *private = cib->variant_opaque;
+
+ if (client_table == NULL) {
+ client_table = pcmk__strkey_table(NULL, NULL);
+ }
+ g_hash_table_insert(client_table, private->id, (gpointer) cib);
+}
+
+/*!
+ * \internal
+ * \brief Remove a CIB file client from client table
+ *
+ * \param[in] cib CIB client
+ */
+static void
+unregister_client(const cib_t *cib)
+{
+ cib_file_opaque_t *private = cib->variant_opaque;
-static xmlNode *in_mem_cib = NULL;
+ if (client_table == NULL) {
+ return;
+ }
+
+ g_hash_table_remove(client_table, private->id);
+
+ /* @COMPAT: Add to crm_exit() when libcib and libcrmcommon are merged,
+ * instead of destroying the client table when there are no more clients.
+ */
+ if (g_hash_table_size(client_table) == 0) {
+ g_hash_table_destroy(client_table);
+ client_table = NULL;
+ }
+}
+
+/*!
+ * \internal
+ * \brief Look up a CIB file client by its ID
+ *
+ * \param[in] client_id CIB client ID
+ *
+ * \return CIB client with matching ID if found, or \p NULL otherwise
+ */
+static cib_t *
+get_client(const char *client_id)
+{
+ if (client_table == NULL) {
+ return NULL;
+ }
+ return g_hash_table_lookup(client_table, (gpointer) client_id);
+}
+
+static const cib__op_fn_t cib_op_functions[] = {
+ [cib__op_apply_patch] = cib_process_diff,
+ [cib__op_bump] = cib_process_bump,
+ [cib__op_commit_transact] = cib_file_process_commit_transaction,
+ [cib__op_create] = cib_process_create,
+ [cib__op_delete] = cib_process_delete,
+ [cib__op_erase] = cib_process_erase,
+ [cib__op_modify] = cib_process_modify,
+ [cib__op_query] = cib_process_query,
+ [cib__op_replace] = cib_process_replace,
+ [cib__op_upgrade] = cib_process_upgrade,
+};
/* cib_file_backup() and cib_file_write_with_digest() need to chown the
* written files only in limited circumstances, so these variables allow
@@ -95,6 +160,27 @@ static gboolean cib_do_chown = FALSE;
/*!
* \internal
+ * \brief Get the function that performs a given CIB file operation
+ *
+ * \param[in] operation Operation whose function to look up
+ *
+ * \return Function that performs \p operation for a CIB file client
+ */
+static cib__op_fn_t
+file_get_op_function(const cib__operation_t *operation)
+{
+ enum cib__op_type type = operation->type;
+
+ CRM_ASSERT(type >= 0);
+
+ if (type >= PCMK__NELEM(cib_op_functions)) {
+ return NULL;
+ }
+ return cib_op_functions[type];
+}
+
+/*!
+ * \internal
* \brief Check whether a file is the live CIB
*
* \param[in] filename Name of file to check
@@ -125,114 +211,148 @@ cib_file_is_live(const char *filename)
}
static int
-cib_file_perform_op_delegate(cib_t *cib, const char *op, const char *host,
- const char *section, xmlNode *data,
- xmlNode **output_data, int call_options,
- const char *user_name)
+cib_file_process_request(cib_t *cib, xmlNode *request, xmlNode **output)
{
int rc = pcmk_ok;
- char *effective_user = NULL;
- gboolean query = FALSE;
- gboolean changed = FALSE;
- xmlNode *request = NULL;
- xmlNode *output = NULL;
- xmlNode *cib_diff = NULL;
+ const cib__operation_t *operation = NULL;
+ cib__op_fn_t op_function = NULL;
+
+ int call_id = 0;
+ int call_options = cib_none;
+ const char *op = crm_element_value(request, F_CIB_OPERATION);
+ const char *section = crm_element_value(request, F_CIB_SECTION);
+ xmlNode *data = get_message_xml(request, F_CIB_CALLDATA);
+
+ bool changed = false;
+ bool read_only = false;
xmlNode *result_cib = NULL;
- cib_op_t *fn = NULL;
- int lpc = 0;
- static int max_msg_types = PCMK__NELEM(cib_file_ops);
+ xmlNode *cib_diff = NULL;
+
cib_file_opaque_t *private = cib->variant_opaque;
- crm_info("Handling %s operation for %s as %s",
- (op? op : "invalid"), (section? section : "entire CIB"),
- (user_name? user_name : "default user"));
+ // We error checked these in callers
+ cib__get_operation(op, &operation);
+ op_function = file_get_op_function(operation);
- cib__set_call_options(call_options, "file operation",
- cib_no_mtime|cib_inhibit_bcast|cib_scope_local);
+ crm_element_value_int(request, F_CIB_CALLID, &call_id);
+ crm_element_value_int(request, F_CIB_CALLOPTS, &call_options);
- if (cib->state == cib_disconnected) {
- return -ENOTCONN;
- }
+ read_only = !pcmk_is_set(operation->flags, cib__op_attr_modifies);
- if (output_data != NULL) {
- *output_data = NULL;
+ // Mirror the logic in prepare_input() in pacemaker-based
+ if ((section != NULL) && pcmk__xe_is(data, XML_TAG_CIB)) {
+
+ data = pcmk_find_cib_element(data, section);
}
- if (op == NULL) {
- return -EINVAL;
+ rc = cib_perform_op(op, call_options, op_function, read_only, section,
+ request, data, true, &changed, &private->cib_xml,
+ &result_cib, &cib_diff, output);
+
+ if (pcmk_is_set(call_options, cib_transaction)) {
+ /* The rest of the logic applies only to the transaction as a whole, not
+ * to individual requests.
+ */
+ goto done;
}
- for (lpc = 0; lpc < max_msg_types; lpc++) {
- if (pcmk__str_eq(op, cib_file_ops[lpc].op, pcmk__str_casei)) {
- fn = &(cib_file_ops[lpc].fn);
- query = cib_file_ops[lpc].read_only;
- break;
+ if (rc == -pcmk_err_schema_validation) {
+ validate_xml_verbose(result_cib);
+
+ } else if ((rc == pcmk_ok) && !read_only) {
+ pcmk__log_xml_patchset(LOG_DEBUG, cib_diff);
+
+ if (result_cib != private->cib_xml) {
+ free_xml(private->cib_xml);
+ private->cib_xml = result_cib;
}
+ cib_set_file_flags(private, cib_file_flag_dirty);
}
- if (fn == NULL) {
- return -EPROTONOSUPPORT;
+ // Global operation callback (deprecated)
+ if (cib->op_callback != NULL) {
+ cib->op_callback(NULL, call_id, rc, *output);
}
- cib->call_id++;
- request = cib_create_op(cib->call_id, op, host, section, data, call_options,
- user_name);
- if(user_name) {
- crm_xml_add(request, XML_ACL_TAG_USER, user_name);
+done:
+ if ((result_cib != private->cib_xml) && (result_cib != *output)) {
+ free_xml(result_cib);
}
+ free_xml(cib_diff);
+ return rc;
+}
- /* Mirror the logic in cib_prepare_common() */
- if (section != NULL && data != NULL && pcmk__str_eq(crm_element_name(data), XML_TAG_CIB, pcmk__str_none)) {
- data = pcmk_find_cib_element(data, section);
- }
+static int
+cib_file_perform_op_delegate(cib_t *cib, const char *op, const char *host,
+ const char *section, xmlNode *data,
+ xmlNode **output_data, int call_options,
+ const char *user_name)
+{
+ int rc = pcmk_ok;
+ xmlNode *request = NULL;
+ xmlNode *output = NULL;
+ cib_file_opaque_t *private = cib->variant_opaque;
- rc = cib_perform_op(op, call_options, fn, query,
- section, request, data, TRUE, &changed, in_mem_cib, &result_cib, &cib_diff,
- &output);
+ const cib__operation_t *operation = NULL;
- free_xml(request);
- if (rc == -pcmk_err_schema_validation) {
- validate_xml_verbose(result_cib);
+ crm_info("Handling %s operation for %s as %s",
+ pcmk__s(op, "invalid"), pcmk__s(section, "entire CIB"),
+ pcmk__s(user_name, "default user"));
+
+ if (output_data != NULL) {
+ *output_data = NULL;
}
- if (rc != pcmk_ok) {
- free_xml(result_cib);
+ if (cib->state == cib_disconnected) {
+ return -ENOTCONN;
+ }
- } else if (query == FALSE) {
- pcmk__output_t *out = NULL;
+ rc = cib__get_operation(op, &operation);
+ rc = pcmk_rc2legacy(rc);
+ if (rc != pcmk_ok) {
+ // @COMPAT: At compatibility break, use rc directly
+ return -EPROTONOSUPPORT;
+ }
- rc = pcmk_rc2legacy(pcmk__log_output_new(&out));
- CRM_CHECK(rc == pcmk_ok, goto done);
+ if (file_get_op_function(operation) == NULL) {
+ // @COMPAT: At compatibility break, use EOPNOTSUPP
+ crm_err("Operation %s is not supported by CIB file clients", op);
+ return -EPROTONOSUPPORT;
+ }
- pcmk__output_set_log_level(out, LOG_DEBUG);
- rc = out->message(out, "xml-patchset", cib_diff);
- out->finish(out, pcmk_rc2exitc(rc), true, NULL);
- pcmk__output_free(out);
- rc = pcmk_ok;
+ cib__set_call_options(call_options, "file operation", cib_no_mtime);
- free_xml(in_mem_cib);
- in_mem_cib = result_cib;
- cib_set_file_flags(private, cib_file_flag_dirty);
+ rc = cib__create_op(cib, op, host, section, data, call_options, user_name,
+ NULL, &request);
+ if (rc != pcmk_ok) {
+ return rc;
}
+ crm_xml_add(request, XML_ACL_TAG_USER, user_name);
+ crm_xml_add(request, F_CIB_CLIENTID, private->id);
- if (cib->op_callback != NULL) {
- cib->op_callback(NULL, cib->call_id, rc, output);
+ if (pcmk_is_set(call_options, cib_transaction)) {
+ rc = cib__extend_transaction(cib, request);
+ goto done;
}
+ rc = cib_file_process_request(cib, request, &output);
+
if ((output_data != NULL) && (output != NULL)) {
- *output_data = (output == in_mem_cib)? copy_xml(output) : output;
+ if (output->doc == private->cib_xml->doc) {
+ *output_data = copy_xml(output);
+ } else {
+ *output_data = output;
+ }
}
done:
- free_xml(cib_diff);
+ if ((output != NULL)
+ && (output->doc != private->cib_xml->doc)
+ && ((output_data == NULL) || (output != *output_data))) {
- if ((output_data == NULL) && (output != in_mem_cib)) {
- /* Don't free output if we're still using it. (output_data != NULL)
- * means we may have assigned *output_data = output above.
- */
free_xml(output);
}
- free(effective_user);
+ free_xml(request);
return rc;
}
@@ -240,7 +360,8 @@ done:
* \internal
* \brief Read CIB from disk and validate it against XML schema
*
- * \param[in] filename Name of file to read CIB from
+ * \param[in] filename Name of file to read CIB from
+ * \param[out] output Where to store the read CIB XML
*
* \return pcmk_ok on success,
* -ENXIO if file does not exist (or stat() otherwise fails), or
@@ -251,7 +372,7 @@ done:
* because some callers might not need to write.
*/
static int
-load_file_cib(const char *filename)
+load_file_cib(const char *filename, xmlNode **output)
{
struct stat buf;
xmlNode *root = NULL;
@@ -282,7 +403,7 @@ load_file_cib(const char *filename)
}
/* Remember the parsed XML for later use */
- in_mem_cib = root;
+ *output = root;
return pcmk_ok;
}
@@ -295,7 +416,7 @@ cib_file_signon(cib_t *cib, const char *name, enum cib_conn_type type)
if (private->filename == NULL) {
rc = -EINVAL;
} else {
- rc = load_file_cib(private->filename);
+ rc = load_file_cib(private->filename, &private->cib_xml);
}
if (rc == pcmk_ok) {
@@ -303,10 +424,11 @@ cib_file_signon(cib_t *cib, const char *name, enum cib_conn_type type)
private->filename, name);
cib->state = cib_connected_command;
cib->type = cib_command;
+ register_client(cib);
} else {
- crm_info("Connection to local file '%s' for %s failed: %s\n",
- private->filename, name, pcmk_strerror(rc));
+ crm_info("Connection to local file '%s' for %s (client %s) failed: %s",
+ private->filename, name, private->id, pcmk_strerror(rc));
}
return rc;
}
@@ -315,12 +437,13 @@ cib_file_signon(cib_t *cib, const char *name, enum cib_conn_type type)
* \internal
* \brief Write out the in-memory CIB to a live CIB file
*
- * param[in,out] path Full path to file to write
+ * param[in] cib_root Root of XML tree to write
+ * param[in,out] path Full path to file to write
*
* \return 0 on success, -1 on failure
*/
static int
-cib_file_write_live(char *path)
+cib_file_write_live(xmlNode *cib_root, char *path)
{
uid_t uid = geteuid();
struct passwd *daemon_pwent;
@@ -370,7 +493,7 @@ cib_file_write_live(char *path)
}
/* write the file */
- if (cib_file_write_with_digest(in_mem_cib, cib_dirname,
+ if (cib_file_write_with_digest(cib_root, cib_dirname,
cib_filename) != pcmk_ok) {
rc = -1;
}
@@ -410,13 +533,15 @@ cib_file_signoff(cib_t *cib)
crm_debug("Disconnecting from the CIB manager");
cib->state = cib_disconnected;
cib->type = cib_no_connection;
+ unregister_client(cib);
+ cib->cmds->end_transaction(cib, false, cib_none);
/* If the in-memory CIB has been changed, write it to disk */
if (pcmk_is_set(private->flags, cib_file_flag_dirty)) {
/* If this is the live CIB, write it out with a digest */
if (pcmk_is_set(private->flags, cib_file_flag_live)) {
- if (cib_file_write_live(private->filename) < 0) {
+ if (cib_file_write_live(private->cib_xml, private->filename) < 0) {
rc = pcmk_err_generic;
}
@@ -424,7 +549,8 @@ cib_file_signoff(cib_t *cib)
} else {
gboolean do_bzip = pcmk__ends_with_ext(private->filename, ".bz2");
- if (write_xml_file(in_mem_cib, private->filename, do_bzip) <= 0) {
+ if (write_xml_file(private->cib_xml, private->filename,
+ do_bzip) <= 0) {
rc = pcmk_err_generic;
}
}
@@ -438,8 +564,8 @@ cib_file_signoff(cib_t *cib)
}
/* Free the in-memory CIB */
- free_xml(in_mem_cib);
- in_mem_cib = NULL;
+ free_xml(private->cib_xml);
+ private->cib_xml = NULL;
return rc;
}
@@ -455,9 +581,11 @@ cib_file_free(cib_t *cib)
if (rc == pcmk_ok) {
cib_file_opaque_t *private = cib->variant_opaque;
+ free(private->id);
free(private->filename);
- free(cib->cmds);
free(private);
+ free(cib->cmds);
+ free(cib->user);
free(cib);
} else {
@@ -494,24 +622,24 @@ cib_file_set_connection_dnotify(cib_t *cib,
* \param[out] async_id If not \p NULL, where to store asynchronous client ID
* \param[out] sync_id If not \p NULL, where to store synchronous client ID
*
- * \return Legacy Pacemaker return code (specifically, \p -EPROTONOSUPPORT)
+ * \return Legacy Pacemaker return code
*
* \note This is the \p cib_file variant implementation of
* \p cib_api_operations_t:client_id().
- * \note A \p cib_file object doesn't connect to the CIB and is never assigned a
- * client ID.
*/
static int
cib_file_client_id(const cib_t *cib, const char **async_id,
const char **sync_id)
{
+ cib_file_opaque_t *private = cib->variant_opaque;
+
if (async_id != NULL) {
- *async_id = NULL;
+ *async_id = private->id;
}
if (sync_id != NULL) {
- *sync_id = NULL;
+ *sync_id = private->id;
}
- return -EPROTONOSUPPORT;
+ return pcmk_ok;
}
cib_t *
@@ -530,6 +658,7 @@ cib_file_new(const char *cib_location)
free(cib);
return NULL;
}
+ private->id = crm_generate_uuid();
cib->variant = cib_file;
cib->variant_opaque = private;
@@ -550,7 +679,7 @@ cib_file_new(const char *cib_location)
cib->cmds->signon = cib_file_signon;
cib->cmds->signoff = cib_file_signoff;
cib->cmds->free = cib_file_free;
- cib->cmds->inputfd = cib_file_inputfd;
+ cib->cmds->inputfd = cib_file_inputfd; // Deprecated method
cib->cmds->register_notification = cib_file_register_notification;
cib->cmds->set_connection_dnotify = cib_file_set_connection_dnotify;
@@ -917,3 +1046,133 @@ cib_file_write_with_digest(xmlNode *cib_root, const char *cib_dirname,
free(tmp_cib);
return exit_rc;
}
+
+/*!
+ * \internal
+ * \brief Process requests in a CIB transaction
+ *
+ * Stop when a request fails or when all requests have been processed.
+ *
+ * \param[in,out] cib CIB client
+ * \param[in,out] transaction CIB transaction
+ *
+ * \return Standard Pacemaker return code
+ */
+static int
+cib_file_process_transaction_requests(cib_t *cib, xmlNode *transaction)
+{
+ cib_file_opaque_t *private = cib->variant_opaque;
+
+ for (xmlNode *request = first_named_child(transaction, T_CIB_COMMAND);
+ request != NULL; request = crm_next_same_xml(request)) {
+
+ xmlNode *output = NULL;
+ const char *op = crm_element_value(request, F_CIB_OPERATION);
+
+ int rc = cib_file_process_request(cib, request, &output);
+
+ rc = pcmk_legacy2rc(rc);
+ if (rc != pcmk_rc_ok) {
+ crm_err("Aborting transaction for CIB file client (%s) on file "
+ "'%s' due to failed %s request: %s",
+ private->id, private->filename, op, pcmk_rc_str(rc));
+ crm_log_xml_info(request, "Failed request");
+ return rc;
+ }
+
+ crm_trace("Applied %s request to transaction working CIB for CIB file "
+ "client (%s) on file '%s'",
+ op, private->id, private->filename);
+ crm_log_xml_trace(request, "Successful request");
+ }
+
+ return pcmk_rc_ok;
+}
+
+/*!
+ * \internal
+ * \brief Commit a given CIB file client's transaction to a working CIB copy
+ *
+ * \param[in,out] cib CIB file client
+ * \param[in] transaction CIB transaction
+ * \param[in,out] result_cib Where to store result CIB
+ *
+ * \return Standard Pacemaker return code
+ *
+ * \note The caller is responsible for replacing the \p cib argument's
+ * \p private->cib_xml with \p result_cib on success, and for freeing
+ * \p result_cib using \p free_xml() on failure.
+ */
+static int
+cib_file_commit_transaction(cib_t *cib, xmlNode *transaction,
+ xmlNode **result_cib)
+{
+ int rc = pcmk_rc_ok;
+ cib_file_opaque_t *private = cib->variant_opaque;
+ xmlNode *saved_cib = private->cib_xml;
+
+ CRM_CHECK(pcmk__xe_is(transaction, T_CIB_TRANSACTION),
+ return pcmk_rc_no_transaction);
+
+ /* *result_cib should be a copy of private->cib_xml (created by
+ * cib_perform_op()). If not, make a copy now. Change tracking isn't
+ * strictly required here because:
+ * * Each request in the transaction will have changes tracked and ACLs
+ * checked if appropriate.
+ * * cib_perform_op() will infer changes for the commit request at the end.
+ */
+ CRM_CHECK((*result_cib != NULL) && (*result_cib != private->cib_xml),
+ *result_cib = copy_xml(private->cib_xml));
+
+ crm_trace("Committing transaction for CIB file client (%s) on file '%s' to "
+ "working CIB",
+ private->id, private->filename);
+
+ // Apply all changes to a working copy of the CIB
+ private->cib_xml = *result_cib;
+
+ rc = cib_file_process_transaction_requests(cib, transaction);
+
+ crm_trace("Transaction commit %s for CIB file client (%s) on file '%s'",
+ ((rc == pcmk_rc_ok)? "succeeded" : "failed"),
+ private->id, private->filename);
+
+ /* Some request types (for example, erase) may have freed private->cib_xml
+ * (the working copy) and pointed it at a new XML object. In that case, it
+ * follows that *result_cib (the working copy) was freed.
+ *
+ * Point *result_cib at the updated working copy stored in private->cib_xml.
+ */
+ *result_cib = private->cib_xml;
+
+ // Point private->cib_xml back to the unchanged original copy
+ private->cib_xml = saved_cib;
+
+ return rc;
+}
+
+static int
+cib_file_process_commit_transaction(const char *op, int options,
+ const char *section, xmlNode *req,
+ xmlNode *input, xmlNode *existing_cib,
+ xmlNode **result_cib, xmlNode **answer)
+{
+ int rc = pcmk_rc_ok;
+ const char *client_id = crm_element_value(req, F_CIB_CLIENTID);
+ cib_t *cib = NULL;
+
+ CRM_CHECK(client_id != NULL, return -EINVAL);
+
+ cib = get_client(client_id);
+ CRM_CHECK(cib != NULL, return -EINVAL);
+
+ rc = cib_file_commit_transaction(cib, input, result_cib);
+ if (rc != pcmk_rc_ok) {
+ cib_file_opaque_t *private = cib->variant_opaque;
+
+ crm_err("Could not commit transaction for CIB file client (%s) on "
+ "file '%s': %s",
+ private->id, private->filename, pcmk_rc_str(rc));
+ }
+ return pcmk_rc2legacy(rc);
+}
diff --git a/lib/cib/cib_native.c b/lib/cib/cib_native.c
index 4a87f56..c5e8b9e 100644
--- a/lib/cib/cib_native.c
+++ b/lib/cib/cib_native.c
@@ -69,20 +69,19 @@ cib_native_perform_op_delegate(cib_t *cib, const char *op, const char *host,
pcmk__set_ipc_flags(ipc_flags, "client", crm_ipc_client_response);
}
- cib->call_id++;
- if (cib->call_id < 1) {
- cib->call_id = 1;
+ rc = cib__create_op(cib, op, host, section, data, call_options, user_name,
+ NULL, &op_msg);
+ if (rc != pcmk_ok) {
+ return rc;
}
- op_msg = cib_create_op(cib->call_id, op, host, section, data, call_options,
- user_name);
- if (op_msg == NULL) {
- return -EPROTO;
+ if (pcmk_is_set(call_options, cib_transaction)) {
+ rc = cib__extend_transaction(cib, op_msg);
+ goto done;
}
crm_trace("Sending %s message to the CIB manager (timeout=%ds)", op, cib->call_timeout);
rc = crm_ipc_send(native->ipc, op_msg, ipc_flags, cib->call_timeout * 1000, &op_reply);
- free_xml(op_msg);
if (rc < 0) {
crm_err("Couldn't perform %s operation (timeout=%ds): %s (%d)", op,
@@ -168,6 +167,7 @@ cib_native_perform_op_delegate(cib_t *cib, const char *op, const char *host,
cib->state = cib_disconnected;
}
+ free_xml(op_msg);
free_xml(op_reply);
return rc;
}
@@ -255,6 +255,7 @@ cib_native_signoff(cib_t *cib)
crm_ipc_destroy(ipc);
}
+ cib->cmds->end_transaction(cib, false, cib_none);
cib->state = cib_disconnected;
cib->type = cib_no_connection;
@@ -268,6 +269,7 @@ cib_native_signon_raw(cib_t *cib, const char *name, enum cib_conn_type type,
int rc = pcmk_ok;
const char *channel = NULL;
cib_native_opaque_t *native = cib->variant_opaque;
+ xmlNode *hello = NULL;
struct ipc_client_callbacks cib_callbacks = {
.dispatch = cib_native_dispatch_internal,
@@ -296,12 +298,16 @@ cib_native_signon_raw(cib_t *cib, const char *name, enum cib_conn_type type,
if (async_fd != NULL) {
native->ipc = crm_ipc_new(channel, 0);
-
- if (native->ipc && crm_ipc_connect(native->ipc)) {
- *async_fd = crm_ipc_get_fd(native->ipc);
-
- } else if (native->ipc) {
- rc = -ENOTCONN;
+ if (native->ipc != NULL) {
+ rc = pcmk__connect_generic_ipc(native->ipc);
+ if (rc == pcmk_rc_ok) {
+ rc = pcmk__ipc_fd(native->ipc, async_fd);
+ if (rc != pcmk_rc_ok) {
+ crm_info("Couldn't get file descriptor for %s IPC",
+ channel);
+ }
+ }
+ rc = pcmk_rc2legacy(rc);
}
} else {
@@ -317,23 +323,23 @@ cib_native_signon_raw(cib_t *cib, const char *name, enum cib_conn_type type,
}
if (rc == pcmk_ok) {
- xmlNode *reply = NULL;
- xmlNode *hello = create_xml_node(NULL, "cib_command");
+ rc = cib__create_op(cib, CRM_OP_REGISTER, NULL, NULL, NULL,
+ cib_sync_call, NULL, name, &hello);
+ }
- crm_xml_add(hello, F_TYPE, T_CIB);
- crm_xml_add(hello, F_CIB_OPERATION, CRM_OP_REGISTER);
- crm_xml_add(hello, F_CIB_CLIENTNAME, name);
- crm_xml_add_int(hello, F_CIB_CALLOPTS, cib_sync_call);
+ if (rc == pcmk_ok) {
+ xmlNode *reply = NULL;
- if (crm_ipc_send(native->ipc, hello, crm_ipc_client_response, -1, &reply) > 0) {
+ if (crm_ipc_send(native->ipc, hello, crm_ipc_client_response, -1,
+ &reply) > 0) {
const char *msg_type = crm_element_value(reply, F_CIB_OPERATION);
- rc = pcmk_ok;
crm_log_xml_trace(reply, "reg-reply");
if (!pcmk__str_eq(msg_type, CRM_OP_REGISTER, pcmk__str_casei)) {
- crm_info("Reply to CIB registration message has "
- "unknown type '%s'", msg_type);
+ crm_info("Reply to CIB registration message has unknown type "
+ "'%s'",
+ msg_type);
rc = -EPROTO;
} else {
@@ -347,7 +353,6 @@ cib_native_signon_raw(cib_t *cib, const char *name, enum cib_conn_type type,
} else {
rc = -ECOMM;
}
-
free_xml(hello);
}
@@ -383,6 +388,7 @@ cib_native_free(cib_t *cib)
free(native->token);
free(cib->variant_opaque);
free(cib->cmds);
+ free(cib->user);
free(cib);
}
diff --git a/lib/cib/cib_ops.c b/lib/cib/cib_ops.c
index d3293c4..c324304 100644
--- a/lib/cib/cib_ops.c
+++ b/lib/cib/cib_ops.c
@@ -19,6 +19,9 @@
#include <sys/param.h>
#include <sys/types.h>
+#include <glib.h>
+#include <libxml/tree.h>
+
#include <crm/crm.h>
#include <crm/cib/internal.h>
#include <crm/msg_xml.h>
@@ -26,6 +29,139 @@
#include <crm/common/xml.h>
#include <crm/common/xml_internal.h>
+// @TODO: Free this via crm_exit() when libcib gets merged with libcrmcommon
+static GHashTable *operation_table = NULL;
+
+static const cib__operation_t cib_ops[] = {
+ {
+ PCMK__CIB_REQUEST_ABS_DELETE, cib__op_abs_delete,
+ cib__op_attr_modifies|cib__op_attr_privileged
+ },
+ {
+ PCMK__CIB_REQUEST_APPLY_PATCH, cib__op_apply_patch,
+ cib__op_attr_modifies
+ |cib__op_attr_privileged
+ |cib__op_attr_transaction
+ },
+ {
+ PCMK__CIB_REQUEST_BUMP, cib__op_bump,
+ cib__op_attr_modifies
+ |cib__op_attr_privileged
+ |cib__op_attr_transaction
+ },
+ {
+ PCMK__CIB_REQUEST_COMMIT_TRANSACT, cib__op_commit_transact,
+ cib__op_attr_modifies
+ |cib__op_attr_privileged
+ |cib__op_attr_replaces
+ |cib__op_attr_writes_through
+ },
+ {
+ PCMK__CIB_REQUEST_CREATE, cib__op_create,
+ cib__op_attr_modifies
+ |cib__op_attr_privileged
+ |cib__op_attr_transaction
+ },
+ {
+ PCMK__CIB_REQUEST_DELETE, cib__op_delete,
+ cib__op_attr_modifies
+ |cib__op_attr_privileged
+ |cib__op_attr_transaction
+ },
+ {
+ PCMK__CIB_REQUEST_ERASE, cib__op_erase,
+ cib__op_attr_modifies
+ |cib__op_attr_privileged
+ |cib__op_attr_replaces
+ |cib__op_attr_transaction
+ },
+ {
+ PCMK__CIB_REQUEST_IS_PRIMARY, cib__op_is_primary,
+ cib__op_attr_privileged
+ },
+ {
+ PCMK__CIB_REQUEST_MODIFY, cib__op_modify,
+ cib__op_attr_modifies
+ |cib__op_attr_privileged
+ |cib__op_attr_transaction
+ },
+ {
+ PCMK__CIB_REQUEST_NOOP, cib__op_noop, cib__op_attr_none
+ },
+ {
+ CRM_OP_PING, cib__op_ping, cib__op_attr_none
+ },
+ {
+ // @COMPAT: Drop cib__op_attr_modifies when we drop legacy mode support
+ PCMK__CIB_REQUEST_PRIMARY, cib__op_primary,
+ cib__op_attr_modifies|cib__op_attr_privileged|cib__op_attr_local
+ },
+ {
+ PCMK__CIB_REQUEST_QUERY, cib__op_query, cib__op_attr_none
+ },
+ {
+ PCMK__CIB_REQUEST_REPLACE, cib__op_replace,
+ cib__op_attr_modifies
+ |cib__op_attr_privileged
+ |cib__op_attr_replaces
+ |cib__op_attr_writes_through
+ |cib__op_attr_transaction
+ },
+ {
+ PCMK__CIB_REQUEST_SECONDARY, cib__op_secondary,
+ cib__op_attr_privileged|cib__op_attr_local
+ },
+ {
+ PCMK__CIB_REQUEST_SHUTDOWN, cib__op_shutdown, cib__op_attr_privileged
+ },
+ {
+ PCMK__CIB_REQUEST_SYNC_TO_ALL, cib__op_sync_all, cib__op_attr_privileged
+ },
+ {
+ PCMK__CIB_REQUEST_SYNC_TO_ONE, cib__op_sync_one, cib__op_attr_privileged
+ },
+ {
+ PCMK__CIB_REQUEST_UPGRADE, cib__op_upgrade,
+ cib__op_attr_modifies
+ |cib__op_attr_privileged
+ |cib__op_attr_writes_through
+ |cib__op_attr_transaction
+ },
+};
+
+/*!
+ * \internal
+ * \brief Get the \c cib__operation_t object for a given CIB operation name
+ *
+ * \param[in] op CIB operation name
+ * \param[out] operation Where to store CIB operation object
+ *
+ * \return Standard Pacemaker return code
+ */
+int
+cib__get_operation(const char *op, const cib__operation_t **operation)
+{
+ CRM_ASSERT((op != NULL) && (operation != NULL));
+
+ if (operation_table == NULL) {
+ operation_table = pcmk__strkey_table(NULL, NULL);
+
+ for (int lpc = 0; lpc < PCMK__NELEM(cib_ops); lpc++) {
+ const cib__operation_t *oper = &(cib_ops[lpc]);
+
+ g_hash_table_insert(operation_table, (gpointer) oper->name,
+ (gpointer) oper);
+ }
+ }
+
+ *operation = g_hash_table_lookup(operation_table, op);
+ if (*operation == NULL) {
+ crm_err("Operation %s is invalid", op);
+ return EINVAL;
+ }
+ return pcmk_rc_ok;
+}
+
int
cib_process_query(const char *op, int options, const char *section, xmlNode * req, xmlNode * input,
xmlNode * existing_cib, xmlNode ** result_cib, xmlNode ** answer)
@@ -54,8 +190,8 @@ cib_process_query(const char *op, int options, const char *section, xmlNode * re
result = -ENXIO;
} else if (options & cib_no_children) {
- const char *tag = TYPE(obj_root);
- xmlNode *shallow = create_xml_node(*answer, tag);
+ xmlNode *shallow = create_xml_node(*answer,
+ (const char *) obj_root->name);
copy_in_properties(shallow, obj_root);
*answer = shallow;
@@ -107,12 +243,14 @@ cib_process_erase(const char *op, int options, const char *section, xmlNode * re
int result = pcmk_ok;
crm_trace("Processing \"%s\" event", op);
- *answer = NULL;
- free_xml(*result_cib);
- *result_cib = createEmptyCib(0);
+ if (*result_cib != existing_cib) {
+ free_xml(*result_cib);
+ }
+ *result_cib = createEmptyCib(0);
copy_in_properties(*result_cib, existing_cib);
update_counter(*result_cib, XML_ATTR_GENERATION_ADMIN, false);
+ *answer = NULL;
return result;
}
@@ -172,7 +310,6 @@ cib_process_replace(const char *op, int options, const char *section, xmlNode *
xmlNode * input, xmlNode * existing_cib, xmlNode ** result_cib,
xmlNode ** answer)
{
- const char *tag = NULL;
int result = pcmk_ok;
crm_trace("Processing %s for %s section",
@@ -189,16 +326,14 @@ cib_process_replace(const char *op, int options, const char *section, xmlNode *
return -EINVAL;
}
- tag = crm_element_name(input);
-
if (pcmk__str_eq(XML_CIB_TAG_SECTION_ALL, section, pcmk__str_casei)) {
section = NULL;
- } else if (pcmk__str_eq(tag, section, pcmk__str_casei)) {
+ } else if (pcmk__xe_is(input, section)) {
section = NULL;
}
- if (pcmk__str_eq(tag, XML_TAG_CIB, pcmk__str_casei)) {
+ if (pcmk__xe_is(input, XML_TAG_CIB)) {
int updates = 0;
int epoch = 0;
int admin_epoch = 0;
@@ -262,7 +397,9 @@ cib_process_replace(const char *op, int options, const char *section, xmlNode *
replace_admin_epoch, replace_epoch, replace_updates, peer);
}
- free_xml(*result_cib);
+ if (*result_cib != existing_cib) {
+ free_xml(*result_cib);
+ }
*result_cib = copy_xml(input);
} else {
@@ -299,7 +436,7 @@ cib_process_delete(const char *op, int options, const char *section, xmlNode * r
}
obj_root = pcmk_find_cib_element(*result_cib, section);
- if(pcmk__str_eq(crm_element_name(input), section, pcmk__str_casei)) {
+ if (pcmk__xe_is(input, section)) {
xmlNode *child = NULL;
for (child = pcmk__xml_first_child(input); child;
child = pcmk__xml_next(child)) {
@@ -360,7 +497,8 @@ cib_process_modify(const char *op, int options, const char *section, xmlNode * r
}
}
- if(options & cib_mixed_update) {
+ // @COMPAT cib_mixed_update is deprecated as of 2.1.7
+ if (pcmk_is_set(options, cib_mixed_update)) {
int max = 0, lpc;
xmlXPathObjectPtr xpathObj = xpath_search(*result_cib, "//@__delete__");
@@ -396,7 +534,7 @@ update_cib_object(xmlNode * parent, xmlNode * update)
CRM_CHECK(update != NULL, return -EINVAL);
CRM_CHECK(parent != NULL, return -EINVAL);
- object_name = crm_element_name(update);
+ object_name = (const char *) update->name;
CRM_CHECK(object_name != NULL, return -EINVAL);
object_id = ID(update);
@@ -425,33 +563,25 @@ update_cib_object(xmlNode * parent, xmlNode * update)
// @COMPAT: XML_CIB_ATTR_REPLACE is unused internally. Remove at break.
replace = crm_element_value(update, XML_CIB_ATTR_REPLACE);
if (replace != NULL) {
- xmlNode *remove = NULL;
- int last = 0, lpc = 0, len = 0;
+ int last = 0;
+ int len = strlen(replace);
- len = strlen(replace);
- while (lpc <= len) {
+ for (int lpc = 0; lpc <= len; ++lpc) {
if (replace[lpc] == ',' || replace[lpc] == 0) {
- char *replace_item = NULL;
-
- if (last == lpc) {
- /* nothing to do */
- last = lpc + 1;
- goto incr;
- }
-
- replace_item = strndup(replace + last, lpc - last);
- remove = find_xml_node(target, replace_item, FALSE);
- if (remove != NULL) {
- crm_trace("Replacing node <%s> in <%s>",
- replace_item, crm_element_name(target));
- free_xml(remove);
- remove = NULL;
+ if (last != lpc) {
+ char *replace_item = strndup(replace + last, lpc - last);
+ xmlNode *remove = find_xml_node(target, replace_item,
+ FALSE);
+
+ if (remove != NULL) {
+ crm_trace("Replacing node <%s> in <%s>",
+ replace_item, target->name);
+ free_xml(remove);
+ }
+ free(replace_item);
}
- free(replace_item);
last = lpc + 1;
}
- incr:
- lpc++;
}
xml_remove_prop(update, XML_CIB_ATTR_REPLACE);
xml_remove_prop(target, XML_CIB_ATTR_REPLACE);
@@ -475,7 +605,7 @@ update_cib_object(xmlNode * parent, xmlNode * update)
a_child = pcmk__xml_next(a_child)) {
int tmp_result = 0;
- crm_trace("Updating child <%s%s%s%s>", crm_element_name(a_child),
+ crm_trace("Updating child <%s%s%s%s>", a_child->name,
((ID(a_child) == NULL)? "" : " " XML_ATTR_ID "='"),
pcmk__s(ID(a_child), ""), ((ID(a_child) == NULL)? "" : "'"));
@@ -484,7 +614,7 @@ update_cib_object(xmlNode * parent, xmlNode * update)
/* only the first error is likely to be interesting */
if (tmp_result != pcmk_ok) {
crm_err("Error updating child <%s%s%s%s>",
- crm_element_name(a_child),
+ a_child->name,
((ID(a_child) == NULL)? "" : " " XML_ATTR_ID "='"),
pcmk__s(ID(a_child), ""),
((ID(a_child) == NULL)? "" : "'"));
@@ -514,7 +644,7 @@ add_cib_object(xmlNode * parent, xmlNode * new_obj)
return -EINVAL;
}
- object_name = crm_element_name(new_obj);
+ object_name = (const char *) new_obj->name;
if (object_name == NULL) {
return -EINVAL;
}
@@ -555,7 +685,8 @@ update_results(xmlNode *failed, xmlNode *target, const char *operation,
add_node_copy(xml_node, target);
crm_xml_add(xml_node, XML_FAILCIB_ATTR_ID, ID(target));
- crm_xml_add(xml_node, XML_FAILCIB_ATTR_OBJTYPE, TYPE(target));
+ crm_xml_add(xml_node, XML_FAILCIB_ATTR_OBJTYPE,
+ (const char *) target->name);
crm_xml_add(xml_node, XML_FAILCIB_ATTR_OP, operation);
crm_xml_add(xml_node, XML_FAILCIB_ATTR_REASON, error_msg);
@@ -582,7 +713,7 @@ cib_process_create(const char *op, int options, const char *section, xmlNode * r
} else if (pcmk__str_eq(XML_TAG_CIB, section, pcmk__str_casei)) {
section = NULL;
- } else if (pcmk__str_eq(crm_element_name(input), XML_TAG_CIB, pcmk__str_casei)) {
+ } else if (pcmk__xe_is(input, XML_TAG_CIB)) {
section = NULL;
}
@@ -601,7 +732,7 @@ cib_process_create(const char *op, int options, const char *section, xmlNode * r
failed = create_xml_node(NULL, XML_TAG_FAILED);
update_section = pcmk_find_cib_element(*result_cib, section);
- if (pcmk__str_eq(crm_element_name(input), section, pcmk__str_casei)) {
+ if (pcmk__xe_is(input, section)) {
xmlNode *a_child = NULL;
for (a_child = pcmk__xml_first_child(input); a_child != NULL;
@@ -617,7 +748,7 @@ cib_process_create(const char *op, int options, const char *section, xmlNode * r
update_results(failed, input, op, result);
}
- if ((result == pcmk_ok) && xml_has_children(failed)) {
+ if ((result == pcmk_ok) && (failed->children != NULL)) {
result = -EINVAL;
}
@@ -646,8 +777,11 @@ cib_process_diff(const char *op, int options, const char *section, xmlNode * req
op, originator,
(pcmk_is_set(options, cib_force_diff)? " (global update)" : ""));
- free_xml(*result_cib);
+ if (*result_cib != existing_cib) {
+ free_xml(*result_cib);
+ }
*result_cib = copy_xml(existing_cib);
+
return xml_apply_patchset(*result_cib, input, TRUE);
}
@@ -670,7 +804,7 @@ cib__config_changed_v1(xmlNode *last, xmlNode *next, xmlNode **diff)
goto done;
}
- crm_element_value_int(*diff, "format", &format);
+ crm_element_value_int(*diff, PCMK_XA_FORMAT, &format);
CRM_LOG_ASSERT(format == 1);
xpathObj = xpath_search(*diff, "//" XML_CIB_TAG_CONFIGURATION);
@@ -803,8 +937,8 @@ cib_process_xpath(const char *op, int options, const char *section,
} else if (pcmk__str_eq(op, PCMK__CIB_REQUEST_QUERY, pcmk__str_none)) {
if (options & cib_no_children) {
- const char *tag = TYPE(match);
- xmlNode *shallow = create_xml_node(*answer, tag);
+ xmlNode *shallow = create_xml_node(*answer,
+ (const char *) match->name);
copy_in_properties(shallow, match);
diff --git a/lib/cib/cib_remote.c b/lib/cib/cib_remote.c
index 28095b3..77479d7 100644
--- a/lib/cib/cib_remote.c
+++ b/lib/cib/cib_remote.c
@@ -55,7 +55,8 @@ typedef struct cib_remote_opaque_s {
static int
cib_remote_perform_op(cib_t *cib, const char *op, const char *host,
const char *section, xmlNode *data,
- xmlNode **output_data, int call_options, const char *name)
+ xmlNode **output_data, int call_options,
+ const char *user_name)
{
int rc;
int remaining_time = 0;
@@ -79,15 +80,16 @@ cib_remote_perform_op(cib_t *cib, const char *op, const char *host,
return -EINVAL;
}
- cib->call_id++;
- if (cib->call_id < 1) {
- cib->call_id = 1;
+ rc = cib__create_op(cib, op, host, section, data, call_options, user_name,
+ NULL, &op_msg);
+ if (rc != pcmk_ok) {
+ return rc;
}
- op_msg = cib_create_op(cib->call_id, op, host, section, data, call_options,
- NULL);
- if (op_msg == NULL) {
- return -EPROTO;
+ if (pcmk_is_set(call_options, cib_transaction)) {
+ rc = cib__extend_transaction(cib, op_msg);
+ free_xml(op_msg);
+ return rc;
}
crm_trace("Sending %s message to the CIB manager", op);
@@ -378,7 +380,7 @@ cib_tls_signon(cib_t *cib, pcmk__remote_t *connection, gboolean event_channel)
}
/* login to server */
- login = create_xml_node(NULL, "cib_command");
+ login = create_xml_node(NULL, T_CIB_COMMAND);
crm_xml_add(login, "op", "authenticate");
crm_xml_add(login, "user", private->user);
crm_xml_add(login, "password", private->passwd);
@@ -434,6 +436,7 @@ cib_remote_signon(cib_t *cib, const char *name, enum cib_conn_type type)
{
int rc = pcmk_ok;
cib_remote_opaque_t *private = cib->variant_opaque;
+ xmlNode *hello = NULL;
if (private->passwd == NULL) {
if (private->out == NULL) {
@@ -459,10 +462,13 @@ cib_remote_signon(cib_t *cib, const char *name, enum cib_conn_type type)
}
if (rc == pcmk_ok) {
- xmlNode *hello = cib_create_op(0, CRM_OP_REGISTER, NULL, NULL, NULL, 0,
- NULL);
- crm_xml_add(hello, F_CIB_CLIENTNAME, name);
- pcmk__remote_send_xml(&private->command, hello);
+ rc = cib__create_op(cib, CRM_OP_REGISTER, NULL, NULL, NULL, cib_none,
+ NULL, name, &hello);
+ }
+
+ if (rc == pcmk_ok) {
+ rc = pcmk__remote_send_xml(&private->command, hello);
+ rc = pcmk_rc2legacy(rc);
free_xml(hello);
}
@@ -490,6 +496,7 @@ cib_remote_signoff(cib_t *cib)
cib_tls_close(cib);
#endif
+ cib->cmds->end_transaction(cib, false, cib_none);
cib->state = cib_disconnected;
cib->type = cib_no_connection;
@@ -511,6 +518,7 @@ cib_remote_free(cib_t *cib)
free(private->user);
free(private->passwd);
free(cib->cmds);
+ free(cib->user);
free(private);
free(cib);
}
@@ -530,7 +538,7 @@ cib_remote_inputfd(cib_t * cib)
static int
cib_remote_register_notification(cib_t * cib, const char *callback, int enabled)
{
- xmlNode *notify_msg = create_xml_node(NULL, "cib_command");
+ xmlNode *notify_msg = create_xml_node(NULL, T_CIB_COMMAND);
cib_remote_opaque_t *private = cib->variant_opaque;
crm_xml_add(notify_msg, F_CIB_OPERATION, T_CIB_NOTIFY);
@@ -614,7 +622,7 @@ cib_remote_new(const char *server, const char *user, const char *passwd, int por
cib->cmds->signon = cib_remote_signon;
cib->cmds->signoff = cib_remote_signoff;
cib->cmds->free = cib_remote_free;
- cib->cmds->inputfd = cib_remote_inputfd;
+ cib->cmds->inputfd = cib_remote_inputfd; // Deprecated method
cib->cmds->register_notification = cib_remote_register_notification;
cib->cmds->set_connection_dnotify = cib_remote_set_connection_dnotify;
diff --git a/lib/cib/cib_utils.c b/lib/cib/cib_utils.c
index c75d844..0082eef 100644
--- a/lib/cib/cib_utils.c
+++ b/lib/cib/cib_utils.c
@@ -20,6 +20,7 @@
#include <crm/crm.h>
#include <crm/cib/internal.h>
#include <crm/msg_xml.h>
+#include <crm/common/cib_internal.h>
#include <crm/common/xml.h>
#include <crm/common/xml_internal.h>
#include <crm/pengine/rules.h>
@@ -78,6 +79,154 @@ cib_diff_version_details(xmlNode * diff, int *admin_epoch, int *epoch, int *upda
}
/*!
+ * \internal
+ * \brief Get the XML patchset from a CIB diff notification
+ *
+ * \param[in] msg CIB diff notification
+ * \param[out] patchset Where to store XML patchset
+ *
+ * \return Standard Pacemaker return code
+ */
+int
+cib__get_notify_patchset(const xmlNode *msg, const xmlNode **patchset)
+{
+ int rc = pcmk_err_generic;
+
+ CRM_ASSERT(patchset != NULL);
+ *patchset = NULL;
+
+ if (msg == NULL) {
+ crm_err("CIB diff notification received with no XML");
+ return ENOMSG;
+ }
+
+ if ((crm_element_value_int(msg, F_CIB_RC, &rc) != 0) || (rc != pcmk_ok)) {
+ crm_warn("Ignore failed CIB update: %s " CRM_XS " rc=%d",
+ pcmk_strerror(rc), rc);
+ crm_log_xml_debug(msg, "failed");
+ return pcmk_legacy2rc(rc);
+ }
+
+ *patchset = get_message_xml(msg, F_CIB_UPDATE_RESULT);
+
+ if (*patchset == NULL) {
+ crm_err("CIB diff notification received with no patchset");
+ return ENOMSG;
+ }
+ return pcmk_rc_ok;
+}
+
+#define XPATH_DIFF_V1 "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_ADDED
+
+/*!
+ * \internal
+ * \brief Check whether a given CIB element was modified in a CIB patchset (v1)
+ *
+ * \param[in] patchset CIB XML patchset
+ * \param[in] element XML tag of CIB element to check (\c NULL is equivalent
+ * to \c XML_TAG_CIB)
+ *
+ * \return \c true if \p element was modified, or \c false otherwise
+ */
+static bool
+element_in_patchset_v1(const xmlNode *patchset, const char *element)
+{
+ char *xpath = crm_strdup_printf(XPATH_DIFF_V1 "//%s",
+ pcmk__s(element, XML_TAG_CIB));
+ xmlXPathObject *xpath_obj = xpath_search(patchset, xpath);
+
+ free(xpath);
+
+ if (xpath_obj == NULL) {
+ return false;
+ }
+ freeXpathObject(xpath_obj);
+ return true;
+}
+
+/*!
+ * \internal
+ * \brief Check whether a given CIB element was modified in a CIB patchset (v2)
+ *
+ * \param[in] patchset CIB XML patchset
+ * \param[in] element XML tag of CIB element to check (\c NULL is equivalent
+ * to \c XML_TAG_CIB). Supported values include any CIB
+ * element supported by \c pcmk__cib_abs_xpath_for().
+ *
+ * \return \c true if \p element was modified, or \c false otherwise
+ */
+static bool
+element_in_patchset_v2(const xmlNode *patchset, const char *element)
+{
+ const char *element_xpath = pcmk__cib_abs_xpath_for(element);
+ const char *parent_xpath = pcmk_cib_parent_name_for(element);
+ char *element_regex = NULL;
+ bool rc = false;
+
+ CRM_CHECK(element_xpath != NULL, return false); // Unsupported element
+
+ // Matches if and only if element_xpath is part of a changed path
+ element_regex = crm_strdup_printf("^%s(/|$)", element_xpath);
+
+ for (const xmlNode *change = first_named_child(patchset, XML_DIFF_CHANGE);
+ change != NULL; change = crm_next_same_xml(change)) {
+
+ const char *op = crm_element_value(change, F_CIB_OPERATION);
+ const char *diff_xpath = crm_element_value(change, XML_DIFF_PATH);
+
+ if (pcmk__str_eq(diff_xpath, element_regex, pcmk__str_regex)) {
+ // Change to an existing element
+ rc = true;
+ break;
+ }
+
+ if (pcmk__str_eq(op, "create", pcmk__str_none)
+ && pcmk__str_eq(diff_xpath, parent_xpath, pcmk__str_none)
+ && pcmk__xe_is(pcmk__xml_first_child(change), element)) {
+
+ // Newly added element
+ rc = true;
+ break;
+ }
+ }
+
+ free(element_regex);
+ return rc;
+}
+
+/*!
+ * \internal
+ * \brief Check whether a given CIB element was modified in a CIB patchset
+ *
+ * \param[in] patchset CIB XML patchset
+ * \param[in] element XML tag of CIB element to check (\c NULL is equivalent
+ * to \c XML_TAG_CIB). Supported values include any CIB
+ * element supported by \c pcmk__cib_abs_xpath_for().
+ *
+ * \return \c true if \p element was modified, or \c false otherwise
+ */
+bool
+cib__element_in_patchset(const xmlNode *patchset, const char *element)
+{
+ int format = 1;
+
+ CRM_ASSERT(patchset != NULL);
+
+ crm_element_value_int(patchset, PCMK_XA_FORMAT, &format);
+ switch (format) {
+ case 1:
+ return element_in_patchset_v1(patchset, element);
+
+ case 2:
+ return element_in_patchset_v2(patchset, element);
+
+ default:
+ crm_warn("Unknown patch format: %d", format);
+ return false;
+ }
+}
+
+/*!
* \brief Create XML for a new (empty) CIB
*
* \param[in] cib_epoch What to use as "epoch" CIB property
@@ -141,30 +290,79 @@ cib_acl_enabled(xmlNode *xml, const char *user)
return rc;
}
+/*!
+ * \internal
+ * \brief Determine whether to perform operations on a scratch copy of the CIB
+ *
+ * \param[in] op CIB operation
+ * \param[in] section CIB section
+ * \param[in] call_options CIB call options
+ *
+ * \return \p true if we should make a copy of the CIB, or \p false otherwise
+ */
+static bool
+should_copy_cib(const char *op, const char *section, int call_options)
+{
+ if (pcmk_is_set(call_options, cib_dryrun)) {
+ // cib_dryrun implies a scratch copy by definition; no side effects
+ return true;
+ }
+
+ if (pcmk__str_eq(op, PCMK__CIB_REQUEST_COMMIT_TRANSACT, pcmk__str_none)) {
+ /* Commit-transaction must make a copy for atomicity. We must revert to
+ * the original CIB if the entire transaction cannot be applied
+ * successfully.
+ */
+ return true;
+ }
+
+ if (pcmk_is_set(call_options, cib_transaction)) {
+ /* If cib_transaction is set, then we're in the process of committing a
+ * transaction. The commit-transaction request already made a scratch
+ * copy, and we're accumulating changes in that copy.
+ */
+ return false;
+ }
+
+ if (pcmk__str_eq(section, XML_CIB_TAG_STATUS, pcmk__str_none)) {
+ /* Copying large CIBs accounts for a huge percentage of our CIB usage,
+ * and this avoids some of it.
+ *
+ * @TODO: Is this safe? See discussion at
+ * https://github.com/ClusterLabs/pacemaker/pull/3094#discussion_r1211400690.
+ */
+ return false;
+ }
+
+ // Default behavior is to operate on a scratch copy
+ return true;
+}
+
int
-cib_perform_op(const char *op, int call_options, cib_op_t * fn, gboolean is_query,
- const char *section, xmlNode * req, xmlNode * input,
- gboolean manage_counters, gboolean * config_changed,
- xmlNode * current_cib, xmlNode ** result_cib, xmlNode ** diff, xmlNode ** output)
+cib_perform_op(const char *op, int call_options, cib__op_fn_t fn, bool is_query,
+ const char *section, xmlNode *req, xmlNode *input,
+ bool manage_counters, bool *config_changed,
+ xmlNode **current_cib, xmlNode **result_cib, xmlNode **diff,
+ xmlNode **output)
{
int rc = pcmk_ok;
- gboolean check_schema = TRUE;
+ bool check_schema = true;
+ bool make_copy = true;
xmlNode *top = NULL;
xmlNode *scratch = NULL;
+ xmlNode *patchset_cib = NULL;
xmlNode *local_diff = NULL;
const char *new_version = NULL;
const char *user = crm_element_value(req, F_CIB_USER);
- bool with_digest = FALSE;
-
- pcmk__output_t *out = NULL;
- int out_rc = pcmk_rc_no_output;
+ bool with_digest = false;
crm_trace("Begin %s%s%s op",
(pcmk_is_set(call_options, cib_dryrun)? "dry run of " : ""),
(is_query? "read-only " : ""), op);
CRM_CHECK(output != NULL, return -ENOMSG);
+ CRM_CHECK(current_cib != NULL, return -ENOMSG);
CRM_CHECK(result_cib != NULL, return -ENOMSG);
CRM_CHECK(config_changed != NULL, return -ENOMSG);
@@ -173,25 +371,26 @@ cib_perform_op(const char *op, int call_options, cib_op_t * fn, gboolean is_quer
}
*result_cib = NULL;
- *config_changed = FALSE;
+ *config_changed = false;
if (fn == NULL) {
return -EINVAL;
}
if (is_query) {
- xmlNode *cib_ro = current_cib;
+ xmlNode *cib_ro = *current_cib;
xmlNode *cib_filtered = NULL;
- if(cib_acl_enabled(cib_ro, user)) {
- if(xml_acl_filtered_copy(user, current_cib, current_cib, &cib_filtered)) {
- if (cib_filtered == NULL) {
- crm_debug("Pre-filtered the entire cib");
- return -EACCES;
- }
- cib_ro = cib_filtered;
- crm_log_xml_trace(cib_ro, "filtered");
+ if (cib_acl_enabled(cib_ro, user)
+ && xml_acl_filtered_copy(user, *current_cib, *current_cib,
+ &cib_filtered)) {
+
+ if (cib_filtered == NULL) {
+ crm_debug("Pre-filtered the entire cib");
+ return -EACCES;
}
+ cib_ro = cib_filtered;
+ crm_log_xml_trace(cib_ro, "filtered");
}
rc = (*fn) (op, call_options, section, req, input, cib_ro, result_cib, output);
@@ -202,14 +401,14 @@ cib_perform_op(const char *op, int call_options, cib_op_t * fn, gboolean is_quer
} else if(cib_filtered == *output) {
cib_filtered = NULL; /* Let them have this copy */
- } else if(*output == current_cib) {
+ } else if (*output == *current_cib) {
/* They already know not to free it */
} else if(cib_filtered && (*output)->doc == cib_filtered->doc) {
/* We're about to free the document of which *output is a part */
*output = copy_xml(*output);
- } else if((*output)->doc == current_cib->doc) {
+ } else if ((*output)->doc == (*current_cib)->doc) {
/* Give them a copy they can free */
*output = copy_xml(*output);
}
@@ -218,31 +417,41 @@ cib_perform_op(const char *op, int call_options, cib_op_t * fn, gboolean is_quer
return rc;
}
+ make_copy = should_copy_cib(op, section, call_options);
- if (pcmk_is_set(call_options, cib_zero_copy)) {
+ if (!make_copy) {
/* Conditional on v2 patch style */
- scratch = current_cib;
+ scratch = *current_cib;
- /* Create a shallow copy of current_cib for the version details */
- current_cib = create_xml_node(NULL, (const char *)scratch->name);
- copy_in_properties(current_cib, scratch);
- top = current_cib;
+ // Make a copy of the top-level element to store version details
+ top = create_xml_node(NULL, (const char *) scratch->name);
+ copy_in_properties(top, scratch);
+ patchset_cib = top;
xml_track_changes(scratch, user, NULL, cib_acl_enabled(scratch, user));
rc = (*fn) (op, call_options, section, req, input, scratch, &scratch, output);
+ /* If scratch points to a new object now (for example, after an erase
+ * operation), then *current_cib should point to the same object.
+ */
+ *current_cib = scratch;
+
} else {
- scratch = copy_xml(current_cib);
+ scratch = copy_xml(*current_cib);
+ patchset_cib = *current_cib;
+
xml_track_changes(scratch, user, NULL, cib_acl_enabled(scratch, user));
- rc = (*fn) (op, call_options, section, req, input, current_cib, &scratch, output);
+ rc = (*fn) (op, call_options, section, req, input, *current_cib,
+ &scratch, output);
- if(scratch && xml_tracking_changes(scratch) == FALSE) {
+ if ((scratch != NULL) && !xml_tracking_changes(scratch)) {
crm_trace("Inferring changes after %s op", op);
- xml_track_changes(scratch, user, current_cib, cib_acl_enabled(current_cib, user));
- xml_calculate_changes(current_cib, scratch);
+ xml_track_changes(scratch, user, *current_cib,
+ cib_acl_enabled(*current_cib, user));
+ xml_calculate_changes(*current_cib, scratch);
}
- CRM_CHECK(current_cib != scratch, return -EINVAL);
+ CRM_CHECK(*current_cib != scratch, return -EINVAL);
}
xml_acl_disable(scratch); /* Allow the system to make any additional changes */
@@ -271,12 +480,12 @@ cib_perform_op(const char *op, int call_options, cib_op_t * fn, gboolean is_quer
}
}
- if (current_cib) {
+ if (patchset_cib != NULL) {
int old = 0;
int new = 0;
crm_element_value_int(scratch, XML_ATTR_GENERATION_ADMIN, &new);
- crm_element_value_int(current_cib, XML_ATTR_GENERATION_ADMIN, &old);
+ crm_element_value_int(patchset_cib, XML_ATTR_GENERATION_ADMIN, &old);
if (old > new) {
crm_err("%s went backwards: %d -> %d (Opts: %#x)",
@@ -287,7 +496,7 @@ cib_perform_op(const char *op, int call_options, cib_op_t * fn, gboolean is_quer
} else if (old == new) {
crm_element_value_int(scratch, XML_ATTR_GENERATION, &new);
- crm_element_value_int(current_cib, XML_ATTR_GENERATION, &old);
+ crm_element_value_int(patchset_cib, XML_ATTR_GENERATION, &old);
if (old > new) {
crm_err("%s went backwards: %d -> %d (Opts: %#x)",
XML_ATTR_GENERATION, old, new, call_options);
@@ -302,13 +511,14 @@ cib_perform_op(const char *op, int call_options, cib_op_t * fn, gboolean is_quer
pcmk__strip_xml_text(scratch);
fix_plus_plus_recursive(scratch);
- if (pcmk_is_set(call_options, cib_zero_copy)) {
- /* At this point, current_cib is just the 'cib' tag and its properties,
+ if (!make_copy) {
+ /* At this point, patchset_cib is just the "cib" tag and its properties.
*
* The v1 format would barf on this, but we know the v2 patch
* format only needs it for the top-level version fields
*/
- local_diff = xml_create_patchset(2, current_cib, scratch, (bool*)config_changed, manage_counters);
+ local_diff = xml_create_patchset(2, patchset_cib, scratch,
+ config_changed, manage_counters);
} else {
static time_t expires = 0;
@@ -316,63 +526,38 @@ cib_perform_op(const char *op, int call_options, cib_op_t * fn, gboolean is_quer
if (expires < tm_now) {
expires = tm_now + 60; /* Validate clients are correctly applying v2-style diffs at most once a minute */
- with_digest = TRUE;
+ with_digest = true;
}
- local_diff = xml_create_patchset(0, current_cib, scratch, (bool*)config_changed, manage_counters);
+ local_diff = xml_create_patchset(0, patchset_cib, scratch,
+ config_changed, manage_counters);
}
- // Create a log output object only if we're going to use it
- pcmk__if_tracing(
- {
- rc = pcmk_rc2legacy(pcmk__log_output_new(&out));
- CRM_CHECK(rc == pcmk_ok, goto done);
-
- pcmk__output_set_log_level(out, LOG_TRACE);
- out_rc = pcmk__xml_show_changes(out, scratch);
- },
- {}
- );
+ pcmk__log_xml_changes(LOG_TRACE, scratch);
xml_accept_changes(scratch);
if(local_diff) {
- int temp_rc = pcmk_rc_no_output;
-
- patchset_process_digest(local_diff, current_cib, scratch, with_digest);
-
- if (out == NULL) {
- rc = pcmk_rc2legacy(pcmk__log_output_new(&out));
- CRM_CHECK(rc == pcmk_ok, goto done);
- }
- pcmk__output_set_log_level(out, LOG_INFO);
- temp_rc = out->message(out, "xml-patchset", local_diff);
- out_rc = pcmk__output_select_rc(rc, temp_rc);
-
+ patchset_process_digest(local_diff, patchset_cib, scratch, with_digest);
+ pcmk__log_xml_patchset(LOG_INFO, local_diff);
crm_log_xml_trace(local_diff, "raw patch");
}
- if (out != NULL) {
- out->finish(out, pcmk_rc2exitc(out_rc), true, NULL);
- pcmk__output_free(out);
- out = NULL;
- }
-
- if (!pcmk_is_set(call_options, cib_zero_copy) && (local_diff != NULL)) {
+ if (make_copy && (local_diff != NULL)) {
// Original to compare against doesn't exist
pcmk__if_tracing(
{
// Validate the calculated patch set
int test_rc = pcmk_ok;
int format = 1;
- xmlNode *cib_copy = copy_xml(current_cib);
+ xmlNode *cib_copy = copy_xml(patchset_cib);
- crm_element_value_int(local_diff, "format", &format);
+ crm_element_value_int(local_diff, PCMK_XA_FORMAT, &format);
test_rc = xml_apply_patchset(cib_copy, local_diff,
manage_counters);
if (test_rc != pcmk_ok) {
save_xml_to_file(cib_copy, "PatchApply:calculated", NULL);
- save_xml_to_file(current_cib, "PatchApply:input", NULL);
+ save_xml_to_file(patchset_cib, "PatchApply:input", NULL);
save_xml_to_file(scratch, "PatchApply:actual", NULL);
save_xml_to_file(local_diff, "PatchApply:diff", NULL);
crm_err("v%d patchset error, patch failed to apply: %s "
@@ -391,7 +576,7 @@ cib_perform_op(const char *op, int call_options, cib_op_t * fn, gboolean is_quer
* a) we don't really care whats in the status section
* b) we don't validate any of its contents at the moment anyway
*/
- check_schema = FALSE;
+ check_schema = false;
}
/* === scratch must not be modified after this point ===
@@ -420,19 +605,35 @@ cib_perform_op(const char *op, int call_options, cib_op_t * fn, gboolean is_quer
/* Does the CIB support the "update-*" attributes... */
if (current_schema >= minimum_schema) {
+ /* Ensure values of origin, client, and user in scratch match
+ * the values in req
+ */
const char *origin = crm_element_value(req, F_ORIG);
+ const char *client = crm_element_value(req, F_CIB_CLIENTNAME);
+
+ if (origin != NULL) {
+ crm_xml_add(scratch, XML_ATTR_UPDATE_ORIG, origin);
+ } else {
+ xml_remove_prop(scratch, XML_ATTR_UPDATE_ORIG);
+ }
- CRM_LOG_ASSERT(origin != NULL);
- crm_xml_replace(scratch, XML_ATTR_UPDATE_ORIG, origin);
- crm_xml_replace(scratch, XML_ATTR_UPDATE_CLIENT,
- crm_element_value(req, F_CIB_CLIENTNAME));
- crm_xml_replace(scratch, XML_ATTR_UPDATE_USER, crm_element_value(req, F_CIB_USER));
+ if (client != NULL) {
+ crm_xml_add(scratch, XML_ATTR_UPDATE_CLIENT, user);
+ } else {
+ xml_remove_prop(scratch, XML_ATTR_UPDATE_CLIENT);
+ }
+
+ if (user != NULL) {
+ crm_xml_add(scratch, XML_ATTR_UPDATE_USER, user);
+ } else {
+ xml_remove_prop(scratch, XML_ATTR_UPDATE_USER);
+ }
}
}
}
crm_trace("Perform validation: %s", pcmk__btoa(check_schema));
- if ((rc == pcmk_ok) && check_schema && !validate_xml(scratch, NULL, TRUE)) {
+ if ((rc == pcmk_ok) && check_schema && !validate_xml(scratch, NULL, true)) {
const char *current_schema = crm_element_value(scratch,
XML_ATTR_VALIDATION);
@@ -444,13 +645,17 @@ cib_perform_op(const char *op, int call_options, cib_op_t * fn, gboolean is_quer
done:
*result_cib = scratch;
- if(rc != pcmk_ok && cib_acl_enabled(current_cib, user)) {
- if(xml_acl_filtered_copy(user, current_cib, scratch, result_cib)) {
- if (*result_cib == NULL) {
- crm_debug("Pre-filtered the entire cib result");
- }
- free_xml(scratch);
+
+ /* @TODO: This may not work correctly with !make_copy, since we don't
+ * keep the original CIB.
+ */
+ if ((rc != pcmk_ok) && cib_acl_enabled(patchset_cib, user)
+ && xml_acl_filtered_copy(user, patchset_cib, scratch, result_cib)) {
+
+ if (*result_cib == NULL) {
+ crm_debug("Pre-filtered the entire cib result");
}
+ free_xml(scratch);
}
if(diff) {
@@ -464,36 +669,117 @@ cib_perform_op(const char *op, int call_options, cib_op_t * fn, gboolean is_quer
return rc;
}
-xmlNode *
-cib_create_op(int call_id, const char *op, const char *host,
- const char *section, xmlNode *data, int call_options,
- const char *user_name)
+int
+cib__create_op(cib_t *cib, const char *op, const char *host,
+ const char *section, xmlNode *data, int call_options,
+ const char *user_name, const char *client_name,
+ xmlNode **op_msg)
{
- xmlNode *op_msg = create_xml_node(NULL, "cib_command");
+ CRM_CHECK((cib != NULL) && (op_msg != NULL), return -EPROTO);
- CRM_CHECK(op_msg != NULL, return NULL);
-
- crm_xml_add(op_msg, F_XML_TAGNAME, "cib_command");
+ *op_msg = create_xml_node(NULL, T_CIB_COMMAND);
+ if (*op_msg == NULL) {
+ return -EPROTO;
+ }
- crm_xml_add(op_msg, F_TYPE, T_CIB);
- crm_xml_add(op_msg, F_CIB_OPERATION, op);
- crm_xml_add(op_msg, F_CIB_HOST, host);
- crm_xml_add(op_msg, F_CIB_SECTION, section);
- crm_xml_add_int(op_msg, F_CIB_CALLID, call_id);
- if (user_name) {
- crm_xml_add(op_msg, F_CIB_USER, user_name);
+ cib->call_id++;
+ if (cib->call_id < 1) {
+ cib->call_id = 1;
}
+
+ crm_xml_add(*op_msg, F_XML_TAGNAME, T_CIB_COMMAND);
+ crm_xml_add(*op_msg, F_TYPE, T_CIB);
+ crm_xml_add(*op_msg, F_CIB_OPERATION, op);
+ crm_xml_add(*op_msg, F_CIB_HOST, host);
+ crm_xml_add(*op_msg, F_CIB_SECTION, section);
+ crm_xml_add(*op_msg, F_CIB_USER, user_name);
+ crm_xml_add(*op_msg, F_CIB_CLIENTNAME, client_name);
+ crm_xml_add_int(*op_msg, F_CIB_CALLID, cib->call_id);
+
crm_trace("Sending call options: %.8lx, %d", (long)call_options, call_options);
- crm_xml_add_int(op_msg, F_CIB_CALLOPTS, call_options);
+ crm_xml_add_int(*op_msg, F_CIB_CALLOPTS, call_options);
if (data != NULL) {
- add_message_xml(op_msg, F_CIB_CALLDATA, data);
+ add_message_xml(*op_msg, F_CIB_CALLDATA, data);
}
- if (call_options & cib_inhibit_bcast) {
- CRM_CHECK((call_options & cib_scope_local), return NULL);
+ if (pcmk_is_set(call_options, cib_inhibit_bcast)) {
+ CRM_CHECK(pcmk_is_set(call_options, cib_scope_local),
+ free_xml(*op_msg); return -EPROTO);
}
- return op_msg;
+ return pcmk_ok;
+}
+
+/*!
+ * \internal
+ * \brief Check whether a CIB request is supported in a transaction
+ *
+ * \param[in] request CIB request
+ *
+ * \return Standard Pacemaker return code
+ */
+static int
+validate_transaction_request(const xmlNode *request)
+{
+ const char *op = crm_element_value(request, F_CIB_OPERATION);
+ const char *host = crm_element_value(request, F_CIB_HOST);
+ const cib__operation_t *operation = NULL;
+ int rc = cib__get_operation(op, &operation);
+
+ if (rc != pcmk_rc_ok) {
+ // cib__get_operation() logs error
+ return rc;
+ }
+
+ if (!pcmk_is_set(operation->flags, cib__op_attr_transaction)) {
+ crm_err("Operation %s is not supported in CIB transactions", op);
+ return EOPNOTSUPP;
+ }
+
+ if (host != NULL) {
+ crm_err("Operation targeting a specific node (%s) is not supported in "
+ "a CIB transaction",
+ host);
+ return EOPNOTSUPP;
+ }
+ return pcmk_rc_ok;
+}
+
+/*!
+ * \internal
+ * \brief Append a CIB request to a CIB transaction
+ *
+ * \param[in,out] cib CIB client whose transaction to extend
+ * \param[in,out] request Request to add to transaction
+ *
+ * \return Legacy Pacemaker return code
+ */
+int
+cib__extend_transaction(cib_t *cib, xmlNode *request)
+{
+ int rc = pcmk_rc_ok;
+
+ CRM_ASSERT((cib != NULL) && (request != NULL));
+
+ rc = validate_transaction_request(request);
+
+ if ((rc == pcmk_rc_ok) && (cib->transaction == NULL)) {
+ rc = pcmk_rc_no_transaction;
+ }
+
+ if (rc == pcmk_rc_ok) {
+ add_node_copy(cib->transaction, request);
+
+ } else {
+ const char *op = crm_element_value(request, F_CIB_OPERATION);
+ const char *client_id = NULL;
+
+ cib->cmds->client_id(cib, NULL, &client_id);
+ crm_err("Failed to add '%s' operation to transaction for client %s: %s",
+ op, pcmk__s(client_id, "(unidentified)"), pcmk_rc_str(rc));
+ crm_log_xml_info(request, "failed");
+ }
+ return pcmk_rc2legacy(rc);
}
void
@@ -701,16 +987,7 @@ cib_apply_patch_event(xmlNode *event, xmlNode *input, xmlNode **output,
}
if (level > LOG_CRIT) {
- pcmk__output_t *out = NULL;
-
- rc = pcmk_rc2legacy(pcmk__log_output_new(&out));
- CRM_CHECK(rc == pcmk_ok, return rc);
-
- pcmk__output_set_log_level(out, level);
- rc = out->message(out, "xml-patchset", diff);
- out->finish(out, pcmk_rc2exitc(rc), true, NULL);
- pcmk__output_free(out);
- rc = pcmk_ok;
+ pcmk__log_xml_patchset(level, diff);
}
if (input != NULL) {
diff --git a/lib/cluster/Makefile.am b/lib/cluster/Makefile.am
index 9225f29..2ddbffb 100644
--- a/lib/cluster/Makefile.am
+++ b/lib/cluster/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2004-2018 the Pacemaker project contributors
+# Copyright 2004-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -13,17 +13,20 @@ noinst_HEADERS = crmcluster_private.h
## libraries
lib_LTLIBRARIES = libcrmcluster.la
-libcrmcluster_la_LDFLAGS = -version-info 30:0:1
+libcrmcluster_la_LDFLAGS = -version-info 31:0:2
libcrmcluster_la_CFLAGS = $(CFLAGS_HARDENED_LIB)
libcrmcluster_la_LDFLAGS += $(LDFLAGS_HARDENED_LIB)
-libcrmcluster_la_LIBADD = $(top_builddir)/lib/common/libcrmcommon.la $(top_builddir)/lib/fencing/libstonithd.la $(CLUSTERLIBS)
+libcrmcluster_la_LIBADD = $(top_builddir)/lib/fencing/libstonithd.la
+libcrmcluster_la_LIBADD += $(top_builddir)/lib/common/libcrmcommon.la
+libcrmcluster_la_LIBADD += $(CLUSTERLIBS)
-libcrmcluster_la_SOURCES = election.c cluster.c membership.c
+## Library sources (*must* use += format for bumplibs)
+libcrmcluster_la_SOURCES = cluster.c
+libcrmcluster_la_SOURCES += election.c
+libcrmcluster_la_SOURCES += membership.c
if BUILD_CS_SUPPORT
-libcrmcluster_la_SOURCES += cpg.c corosync.c
+libcrmcluster_la_SOURCES += corosync.c
+libcrmcluster_la_SOURCES += cpg.c
endif
-
-clean-generic:
- rm -f *.log *.debug *.xml *~
diff --git a/lib/cluster/cluster.c b/lib/cluster/cluster.c
index 011e053..f2cd428 100644
--- a/lib/cluster/cluster.c
+++ b/lib/cluster/cluster.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2004-2022 the Pacemaker project contributors
+ * Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -160,7 +160,7 @@ pcmk_cluster_free(crm_cluster_t *cluster)
*/
gboolean
send_cluster_message(const crm_node_t *node, enum crm_ais_msg_types service,
- xmlNode *data, gboolean ordered)
+ const xmlNode *data, gboolean ordered)
{
switch (get_cluster_type()) {
case pcmk_cluster_corosync:
@@ -280,7 +280,7 @@ crm_peer_uname(const char *uuid)
return NULL;
}
- node = pcmk__search_cluster_node_cache((uint32_t) id, NULL);
+ node = pcmk__search_cluster_node_cache((uint32_t) id, NULL, NULL);
if (node != NULL) {
crm_info("Setting uuid for node %s[%u] to %s",
node->uname, node->id, uuid);
@@ -294,19 +294,6 @@ crm_peer_uname(const char *uuid)
}
/*!
- * \brief Add a node's UUID as an XML attribute
- *
- * \param[in,out] xml XML element to add UUID to
- * \param[in] attr XML attribute name to set
- * \param[in,out] node Node whose UUID should be used as attribute value
- */
-void
-set_uuid(xmlNode *xml, const char *attr, crm_node_t *node)
-{
- crm_xml_add(xml, attr, crm_peer_uuid(node));
-}
-
-/*!
* \brief Get a log-friendly string equivalent of a cluster type
*
* \param[in] type Cluster type
@@ -403,3 +390,17 @@ is_corosync_cluster(void)
{
return get_cluster_type() == pcmk_cluster_corosync;
}
+
+// Deprecated functions kept only for backward API compatibility
+// LCOV_EXCL_START
+
+#include <crm/cluster/compat.h>
+
+void
+set_uuid(xmlNode *xml, const char *attr, crm_node_t *node)
+{
+ crm_xml_add(xml, attr, crm_peer_uuid(node));
+}
+
+// LCOV_EXCL_STOP
+// End deprecated API
diff --git a/lib/cluster/cpg.c b/lib/cluster/cpg.c
index 2af4a50..d1decc6 100644
--- a/lib/cluster/cpg.c
+++ b/lib/cluster/cpg.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2004-2022 the Pacemaker project contributors
+ * Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -506,14 +506,14 @@ pcmk_message_common_cs(cpg_handle_t handle, uint32_t nodeid, uint32_t pid, void
uncompressed = calloc(1, new_size);
rc = BZ2_bzBuffToBuffDecompress(uncompressed, &new_size, msg->data, msg->compressed_size, 1, 0);
- if (rc != BZ_OK) {
- crm_err("Decompression failed: %s " CRM_XS " bzerror=%d",
- bz2_strerror(rc), rc);
+ rc = pcmk__bzlib2rc(rc);
+
+ if (rc != pcmk_rc_ok) {
+ crm_err("Decompression failed: %s " CRM_XS " rc=%d", pcmk_rc_str(rc), rc);
free(uncompressed);
goto badmsg;
}
- CRM_ASSERT(rc == BZ_OK);
CRM_ASSERT(new_size == msg->size);
data = uncompressed;
@@ -628,7 +628,7 @@ node_left(const char *cpg_group_name, int event_counter,
size_t member_list_entries)
{
crm_node_t *peer = pcmk__search_cluster_node_cache(cpg_peer->nodeid,
- NULL);
+ NULL, NULL);
const struct cpg_address **rival = NULL;
/* Most CPG-related Pacemaker code assumes that only one process on a node
@@ -888,11 +888,11 @@ cluster_connect_cpg(crm_cluster_t *cluster)
*
* \return TRUE on success, otherwise FALSE
*/
-gboolean
-pcmk__cpg_send_xml(xmlNode *msg, const crm_node_t *node,
+bool
+pcmk__cpg_send_xml(const xmlNode *msg, const crm_node_t *node,
enum crm_ais_msg_types dest)
{
- gboolean rc = TRUE;
+ bool rc = true;
char *data = NULL;
data = dump_xml_unformatted(msg);
diff --git a/lib/cluster/crmcluster_private.h b/lib/cluster/crmcluster_private.h
index 6933b73..370bca5 100644
--- a/lib/cluster/crmcluster_private.h
+++ b/lib/cluster/crmcluster_private.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2020-2022 the Pacemaker project contributors
+ * Copyright 2020-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -41,7 +41,7 @@ G_GNUC_INTERNAL
void pcmk__corosync_disconnect(crm_cluster_t *cluster);
G_GNUC_INTERNAL
-gboolean pcmk__cpg_send_xml(xmlNode *msg, const crm_node_t *node,
- enum crm_ais_msg_types dest);
+bool pcmk__cpg_send_xml(const xmlNode *msg, const crm_node_t *node,
+ enum crm_ais_msg_types dest);
#endif // PCMK__CRMCLUSTER_PRIVATE__H
diff --git a/lib/cluster/membership.c b/lib/cluster/membership.c
index 0c54f19..f856cca 100644
--- a/lib/cluster/membership.c
+++ b/lib/cluster/membership.c
@@ -157,7 +157,7 @@ crm_remote_peer_cache_remove(const char *node_name)
*
* \param[in] node_state XML of node state
*
- * \return CRM_NODE_LOST if XML_NODE_IN_CLUSTER is false in node_state,
+ * \return CRM_NODE_LOST if PCMK__XA_IN_CCM is false in node_state,
* CRM_NODE_MEMBER otherwise
* \note Unlike most boolean XML attributes, this one defaults to true, for
* backward compatibility with older controllers that don't set it.
@@ -167,7 +167,8 @@ remote_state_from_cib(const xmlNode *node_state)
{
bool status = false;
- if (pcmk__xe_get_bool_attr(node_state, XML_NODE_IN_CLUSTER, &status) == pcmk_rc_ok && !status) {
+ if ((pcmk__xe_get_bool_attr(node_state, PCMK__XA_IN_CCM,
+ &status) == pcmk_rc_ok) && !status) {
return CRM_NODE_LOST;
} else {
return CRM_NODE_MEMBER;
@@ -515,7 +516,7 @@ pcmk__search_node_caches(unsigned int id, const char *uname, uint32_t flags)
}
if ((node == NULL) && pcmk_is_set(flags, CRM_GET_PEER_CLUSTER)) {
- node = pcmk__search_cluster_node_cache(id, uname);
+ node = pcmk__search_cluster_node_cache(id, uname, NULL);
}
return node;
}
@@ -525,12 +526,15 @@ pcmk__search_node_caches(unsigned int id, const char *uname, uint32_t flags)
*
* \param[in] id If not 0, cluster node ID to search for
* \param[in] uname If not NULL, node name to search for
+ * \param[in] uuid If not NULL while id is 0, node UUID instead of cluster
+ * node ID to search for
* \param[in] flags Bitmask of enum crm_get_peer_flags
*
* \return (Possibly newly created) node cache entry
*/
crm_node_t *
-crm_get_peer_full(unsigned int id, const char *uname, int flags)
+pcmk__get_peer_full(unsigned int id, const char *uname, const char *uuid,
+ int flags)
{
crm_node_t *node = NULL;
@@ -543,22 +547,40 @@ crm_get_peer_full(unsigned int id, const char *uname, int flags)
}
if ((node == NULL) && pcmk_is_set(flags, CRM_GET_PEER_CLUSTER)) {
- node = crm_get_peer(id, uname);
+ node = pcmk__get_peer(id, uname, uuid);
}
return node;
}
/*!
+ * \brief Get a node cache entry (cluster or Pacemaker Remote)
+ *
+ * \param[in] id If not 0, cluster node ID to search for
+ * \param[in] uname If not NULL, node name to search for
+ * \param[in] flags Bitmask of enum crm_get_peer_flags
+ *
+ * \return (Possibly newly created) node cache entry
+ */
+crm_node_t *
+crm_get_peer_full(unsigned int id, const char *uname, int flags)
+{
+ return pcmk__get_peer_full(id, uname, NULL, flags);
+}
+
+/*!
* \internal
* \brief Search cluster node cache
*
* \param[in] id If not 0, cluster node ID to search for
* \param[in] uname If not NULL, node name to search for
+ * \param[in] uuid If not NULL while id is 0, node UUID instead of cluster
+ * node ID to search for
*
* \return Cluster node cache entry if found, otherwise NULL
*/
crm_node_t *
-pcmk__search_cluster_node_cache(unsigned int id, const char *uname)
+pcmk__search_cluster_node_cache(unsigned int id, const char *uname,
+ const char *uuid)
{
GHashTableIter iter;
crm_node_t *node = NULL;
@@ -589,6 +611,16 @@ pcmk__search_cluster_node_cache(unsigned int id, const char *uname)
break;
}
}
+
+ } else if (uuid != NULL) {
+ g_hash_table_iter_init(&iter, crm_peer_cache);
+ while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) {
+ if (pcmk__str_eq(node->uuid, uuid, pcmk__str_casei)) {
+ crm_trace("UUID match: %s = %p", node->uuid, node);
+ by_id = node;
+ break;
+ }
+ }
}
node = by_id; /* Good default */
@@ -693,12 +725,14 @@ remove_conflicting_peer(crm_node_t *node)
*
* \param[in] id If not 0, cluster node ID to search for
* \param[in] uname If not NULL, node name to search for
+ * \param[in] uuid If not NULL while id is 0, node UUID instead of cluster
+ * node ID to search for
*
* \return (Possibly newly created) cluster node cache entry
*/
/* coverity[-alloc] Memory is referenced in one or both hashtables */
crm_node_t *
-crm_get_peer(unsigned int id, const char *uname)
+pcmk__get_peer(unsigned int id, const char *uname, const char *uuid)
{
crm_node_t *node = NULL;
char *uname_lookup = NULL;
@@ -707,7 +741,7 @@ crm_get_peer(unsigned int id, const char *uname)
crm_peer_init();
- node = pcmk__search_cluster_node_cache(id, uname);
+ node = pcmk__search_cluster_node_cache(id, uname, uuid);
/* if uname wasn't provided, and find_peer did not turn up a uname based on id.
* we need to do a lookup of the node name using the id in the cluster membership. */
@@ -721,7 +755,7 @@ crm_get_peer(unsigned int id, const char *uname)
/* try to turn up the node one more time now that we know the uname. */
if (node == NULL) {
- node = pcmk__search_cluster_node_cache(id, uname);
+ node = pcmk__search_cluster_node_cache(id, uname, uuid);
}
}
@@ -750,7 +784,9 @@ crm_get_peer(unsigned int id, const char *uname)
}
if(node->uuid == NULL) {
- const char *uuid = crm_peer_uuid(node);
+ if (uuid == NULL) {
+ uuid = crm_peer_uuid(node);
+ }
if (uuid) {
crm_info("Node %u has uuid %s", id, uuid);
@@ -766,6 +802,21 @@ crm_get_peer(unsigned int id, const char *uname)
}
/*!
+ * \brief Get a cluster node cache entry
+ *
+ * \param[in] id If not 0, cluster node ID to search for
+ * \param[in] uname If not NULL, node name to search for
+ *
+ * \return (Possibly newly created) cluster node cache entry
+ */
+/* coverity[-alloc] Memory is referenced in one or both hashtables */
+crm_node_t *
+crm_get_peer(unsigned int id, const char *uname)
+{
+ return pcmk__get_peer(id, uname, NULL);
+}
+
+/*!
* \internal
* \brief Update a node's uname
*
@@ -917,6 +968,13 @@ crm_update_peer_proc(const char *source, crm_node_t * node, uint32_t flag, const
proc2text(flag), status);
}
+ if (pcmk_is_set(node->processes, crm_get_cluster_proc())) {
+ node->when_online = time(NULL);
+
+ } else {
+ node->when_online = 0;
+ }
+
/* Call the client callback first, then update the peer state,
* in case the node will be reaped
*/
@@ -1025,6 +1083,13 @@ update_peer_state_iter(const char *source, crm_node_t *node, const char *state,
if (state && !pcmk__str_eq(node->state, state, pcmk__str_casei)) {
char *last = node->state;
+ if (is_member) {
+ node->when_member = time(NULL);
+
+ } else {
+ node->when_member = 0;
+ }
+
node->state = strdup(state);
crm_notice("Node %s state is now %s " CRM_XS
" nodeid=%u previous=%s source=%s", node->uname, state,
diff --git a/lib/common/Makefile.am b/lib/common/Makefile.am
index ef729d4..f9c43b9 100644
--- a/lib/common/Makefile.am
+++ b/lib/common/Makefile.am
@@ -8,7 +8,8 @@
#
include $(top_srcdir)/mk/common.mk
-AM_CPPFLAGS += -I$(top_builddir)/lib/gnu -I$(top_srcdir)/lib/gnu
+AM_CPPFLAGS += -I$(top_builddir)/lib/gnu \
+ -I$(top_srcdir)/lib/gnu
## libraries
lib_LTLIBRARIES = libcrmcommon.la
@@ -29,14 +30,16 @@ CFLAGS = $(CFLAGS_COPY:-Wcast-qual=) -fPIC
# changes the order so the subdirectories are processed afterwards.
SUBDIRS = . tests
-noinst_HEADERS = crmcommon_private.h mock_private.h
+noinst_HEADERS = crmcommon_private.h \
+ mock_private.h
-libcrmcommon_la_LDFLAGS = -version-info 45:0:11
+libcrmcommon_la_LDFLAGS = -version-info 46:0:12
libcrmcommon_la_CFLAGS = $(CFLAGS_HARDENED_LIB)
libcrmcommon_la_LDFLAGS += $(LDFLAGS_HARDENED_LIB)
-libcrmcommon_la_LIBADD = @LIBADD_DL@ $(top_builddir)/lib/gnu/libgnu.la
+libcrmcommon_la_LIBADD = @LIBADD_DL@ \
+ $(top_builddir)/lib/gnu/libgnu.la
# If configured with --with-profiling or --with-coverage, BUILD_PROFILING will
# be set and -fno-builtin will be added to the CFLAGS. However, libcrmcommon
@@ -47,9 +50,10 @@ if BUILD_PROFILING
libcrmcommon_la_LIBADD += -lm
endif
-# Use += rather than backlashed continuation lines for parsing by bumplibs
+## Library sources (*must* use += format for bumplibs)
libcrmcommon_la_SOURCES =
libcrmcommon_la_SOURCES += acl.c
+libcrmcommon_la_SOURCES += actions.c
libcrmcommon_la_SOURCES += agents.c
libcrmcommon_la_SOURCES += alerts.c
libcrmcommon_la_SOURCES += attrs.c
@@ -75,7 +79,6 @@ libcrmcommon_la_SOURCES += mainloop.c
libcrmcommon_la_SOURCES += messages.c
libcrmcommon_la_SOURCES += nodes.c
libcrmcommon_la_SOURCES += nvpair.c
-libcrmcommon_la_SOURCES += operations.c
libcrmcommon_la_SOURCES += options.c
libcrmcommon_la_SOURCES += output.c
libcrmcommon_la_SOURCES += output_html.c
@@ -89,12 +92,14 @@ libcrmcommon_la_SOURCES += pid.c
libcrmcommon_la_SOURCES += procfs.c
libcrmcommon_la_SOURCES += remote.c
libcrmcommon_la_SOURCES += results.c
+libcrmcommon_la_SOURCES += scheduler.c
libcrmcommon_la_SOURCES += schemas.c
libcrmcommon_la_SOURCES += scores.c
libcrmcommon_la_SOURCES += strings.c
libcrmcommon_la_SOURCES += utils.c
libcrmcommon_la_SOURCES += watchdog.c
libcrmcommon_la_SOURCES += xml.c
+libcrmcommon_la_SOURCES += xml_attr.c
libcrmcommon_la_SOURCES += xml_display.c
libcrmcommon_la_SOURCES += xpath.c
@@ -107,18 +112,22 @@ include $(top_srcdir)/mk/tap.mk
libcrmcommon_test_la_SOURCES = $(libcrmcommon_la_SOURCES)
libcrmcommon_test_la_SOURCES += mock.c
-libcrmcommon_test_la_LDFLAGS = $(libcrmcommon_la_LDFLAGS) -rpath $(libdir) $(LDFLAGS_WRAP)
+libcrmcommon_test_la_LDFLAGS = $(libcrmcommon_la_LDFLAGS) \
+ -rpath $(libdir) \
+ $(LDFLAGS_WRAP)
# If GCC emits a builtin function in place of something we've mocked up, that will
# get used instead of the mocked version which leads to unexpected test results. So
# disable all builtins. Older versions of GCC (at least, on RHEL7) will still emit
# replacement code for strdup (and possibly other functions) unless -fno-inline is
# also added.
-libcrmcommon_test_la_CFLAGS = $(libcrmcommon_la_CFLAGS) -DPCMK__UNIT_TESTING -fno-builtin -fno-inline
+libcrmcommon_test_la_CFLAGS = $(libcrmcommon_la_CFLAGS) \
+ -DPCMK__UNIT_TESTING \
+ -fno-builtin \
+ -fno-inline
# If -fno-builtin is used, -lm also needs to be added. See the comment at
# BUILD_PROFILING above.
-libcrmcommon_test_la_LIBADD = $(libcrmcommon_la_LIBADD) -lcmocka -lm
+libcrmcommon_test_la_LIBADD = $(libcrmcommon_la_LIBADD) \
+ -lcmocka \
+ -lm
nodist_libcrmcommon_test_la_SOURCES = $(nodist_libcrmcommon_la_SOURCES)
-
-clean-generic:
- rm -f *.log *.debug *.xml *~
diff --git a/lib/common/acl.c b/lib/common/acl.c
index 33a4e00..1ebd765 100644
--- a/lib/common/acl.c
+++ b/lib/common/acl.c
@@ -26,7 +26,7 @@
typedef struct xml_acl_s {
enum xml_private_flags mode;
- char *xpath;
+ gchar *xpath;
} xml_acl_t;
static void
@@ -35,7 +35,7 @@ free_acl(void *data)
if (data) {
xml_acl_t *acl = data;
- free(acl->xpath);
+ g_free(acl->xpath);
free(acl);
}
}
@@ -68,7 +68,7 @@ create_acl(const xmlNode *xml, GList *acls, enum xml_private_flags mode)
if ((tag == NULL) && (ref == NULL) && (xpath == NULL)) {
// Schema should prevent this, but to be safe ...
crm_trace("Ignoring ACL <%s> element without selection criteria",
- crm_element_name(xml));
+ xml->name);
return NULL;
}
@@ -77,10 +77,9 @@ create_acl(const xmlNode *xml, GList *acls, enum xml_private_flags mode)
acl->mode = mode;
if (xpath) {
- acl->xpath = strdup(xpath);
- CRM_ASSERT(acl->xpath != NULL);
+ acl->xpath = g_strdup(xpath);
crm_trace("Unpacked ACL <%s> element using xpath: %s",
- crm_element_name(xml), acl->xpath);
+ xml->name, acl->xpath);
} else {
GString *buf = g_string_sized_new(128);
@@ -101,12 +100,11 @@ create_acl(const xmlNode *xml, GList *acls, enum xml_private_flags mode)
pcmk__g_strcat(buf, "//", pcmk__s(tag, "*"), NULL);
}
- acl->xpath = strdup((const char *) buf->str);
- CRM_ASSERT(acl->xpath != NULL);
+ acl->xpath = buf->str;
- g_string_free(buf, TRUE);
+ g_string_free(buf, FALSE);
crm_trace("Unpacked ACL <%s> element as xpath: %s",
- crm_element_name(xml), acl->xpath);
+ xml->name, acl->xpath);
}
return g_list_append(acls, acl);
@@ -131,10 +129,10 @@ parse_acl_entry(const xmlNode *acl_top, const xmlNode *acl_entry, GList *acls)
for (child = pcmk__xe_first_child(acl_entry); child;
child = pcmk__xe_next(child)) {
- const char *tag = crm_element_name(child);
+ const char *tag = (const char *) child->name;
const char *kind = crm_element_value(child, XML_ACL_ATTR_KIND);
- if (strcmp(XML_ACL_TAG_PERMISSION, tag) == 0){
+ if (pcmk__xe_is(child, XML_ACL_TAG_PERMISSION)) {
CRM_ASSERT(kind != NULL);
crm_trace("Unpacking ACL <%s> element of kind '%s'", tag, kind);
tag = kind;
@@ -157,7 +155,7 @@ parse_acl_entry(const xmlNode *acl_top, const xmlNode *acl_entry, GList *acls)
if (role_id && strcmp(ref_role, role_id) == 0) {
crm_trace("Unpacking referenced role '%s' in ACL <%s> element",
- role_id, crm_element_name(acl_entry));
+ role_id, acl_entry->name);
acls = parse_acl_entry(acl_top, role, acls);
break;
}
@@ -304,10 +302,9 @@ pcmk__unpack_acl(xmlNode *source, xmlNode *target, const char *user)
for (child = pcmk__xe_first_child(acls); child;
child = pcmk__xe_next(child)) {
- const char *tag = crm_element_name(child);
- if (!strcmp(tag, XML_ACL_TAG_USER)
- || !strcmp(tag, XML_ACL_TAG_USERv1)) {
+ if (pcmk__xe_is(child, XML_ACL_TAG_USER)
+ || pcmk__xe_is(child, XML_ACL_TAG_USERv1)) {
const char *id = crm_element_value(child, XML_ATTR_NAME);
if (id == NULL) {
@@ -318,7 +315,7 @@ pcmk__unpack_acl(xmlNode *source, xmlNode *target, const char *user)
crm_debug("Unpacking ACLs for user '%s'", id);
docpriv->acls = parse_acl_entry(acls, child, docpriv->acls);
}
- } else if (!strcmp(tag, XML_ACL_TAG_GROUP)) {
+ } else if (pcmk__xe_is(child, XML_ACL_TAG_GROUP)) {
const char *id = crm_element_value(child, XML_ATTR_NAME);
if (id == NULL) {
@@ -392,7 +389,7 @@ purge_xml_attributes(xmlNode *xml)
if (test_acl_mode(nodepriv->flags, pcmk__xf_acl_read)) {
crm_trace("%s[@" XML_ATTR_ID "=%s] is readable",
- crm_element_name(xml), ID(xml));
+ xml->name, ID(xml));
return true;
}
@@ -571,22 +568,22 @@ pcmk__apply_creation_acl(xmlNode *xml, bool check_top)
if (implicitly_allowed(xml)) {
crm_trace("Creation of <%s> scaffolding with id=\"%s\""
" is implicitly allowed",
- crm_element_name(xml), display_id(xml));
+ xml->name, display_id(xml));
} else if (pcmk__check_acl(xml, NULL, pcmk__xf_acl_write)) {
crm_trace("ACLs allow creation of <%s> with id=\"%s\"",
- crm_element_name(xml), display_id(xml));
+ xml->name, display_id(xml));
} else if (check_top) {
crm_trace("ACLs disallow creation of <%s> with id=\"%s\"",
- crm_element_name(xml), display_id(xml));
+ xml->name, display_id(xml));
pcmk_free_xml_subtree(xml);
return;
} else {
crm_notice("ACLs would disallow creation of %s<%s> with id=\"%s\"",
((xml == xmlDocGetRootElement(xml->doc))? "root element " : ""),
- crm_element_name(xml), display_id(xml));
+ xml->name, display_id(xml));
}
}
diff --git a/lib/common/operations.c b/lib/common/actions.c
index 3db96cd..e710615 100644
--- a/lib/common/operations.c
+++ b/lib/common/actions.c
@@ -107,15 +107,15 @@ parse_op_key(const char *key, char **rsc_id, char **op_type, guint *interval_ms)
* contain underbars. Here, list action names and name prefixes that can.
*/
const char *actions_with_underbars[] = {
- CRMD_ACTION_MIGRATED,
- CRMD_ACTION_MIGRATE,
+ PCMK_ACTION_MIGRATE_FROM,
+ PCMK_ACTION_MIGRATE_TO,
NULL
};
const char *action_prefixes_with_underbars[] = {
- "pre_" CRMD_ACTION_NOTIFY,
- "post_" CRMD_ACTION_NOTIFY,
- "confirmed-pre_" CRMD_ACTION_NOTIFY,
- "confirmed-post_" CRMD_ACTION_NOTIFY,
+ "pre_" PCMK_ACTION_NOTIFY,
+ "post_" PCMK_ACTION_NOTIFY,
+ "confirmed-pre_" PCMK_ACTION_NOTIFY,
+ "confirmed-post_" PCMK_ACTION_NOTIFY,
NULL,
};
@@ -470,11 +470,11 @@ crm_op_needs_metadata(const char *rsc_class, const char *op)
}
// Metadata is needed only for these actions
- return pcmk__str_any_of(op, CRMD_ACTION_START, CRMD_ACTION_STATUS,
- CRMD_ACTION_PROMOTE, CRMD_ACTION_DEMOTE,
- CRMD_ACTION_RELOAD, CRMD_ACTION_RELOAD_AGENT,
- CRMD_ACTION_MIGRATE, CRMD_ACTION_MIGRATED,
- CRMD_ACTION_NOTIFY, NULL);
+ return pcmk__str_any_of(op, PCMK_ACTION_START, PCMK_ACTION_MONITOR,
+ PCMK_ACTION_PROMOTE, PCMK_ACTION_DEMOTE,
+ PCMK_ACTION_RELOAD, PCMK_ACTION_RELOAD_AGENT,
+ PCMK_ACTION_MIGRATE_TO, PCMK_ACTION_MIGRATE_FROM,
+ PCMK_ACTION_NOTIFY, NULL);
}
/*!
@@ -488,7 +488,8 @@ crm_op_needs_metadata(const char *rsc_class, const char *op)
bool
pcmk__is_fencing_action(const char *action)
{
- return pcmk__str_any_of(action, "off", "reboot", "poweroff", NULL);
+ return pcmk__str_any_of(action, PCMK_ACTION_OFF, PCMK_ACTION_REBOOT,
+ "poweroff", NULL);
}
bool
@@ -498,7 +499,8 @@ pcmk_is_probe(const char *task, guint interval)
return false;
}
- return (interval == 0) && pcmk__str_eq(task, CRMD_ACTION_STATUS, pcmk__str_none);
+ return (interval == 0)
+ && pcmk__str_eq(task, PCMK_ACTION_MONITOR, pcmk__str_none);
}
bool
diff --git a/lib/common/alerts.c b/lib/common/alerts.c
index abdadef..98b1e3f 100644
--- a/lib/common/alerts.c
+++ b/lib/common/alerts.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2015-2022 the Pacemaker project contributors
+ * Copyright 2015-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -12,8 +12,8 @@
#include <crm/lrmd.h>
#include <crm/msg_xml.h>
#include <crm/common/alerts_internal.h>
+#include <crm/common/cib_internal.h>
#include <crm/common/xml_internal.h>
-#include <crm/cib/internal.h> /* for F_CIB_UPDATE_RESULT */
/*
* to allow script compatibility we can have more than one
@@ -168,86 +168,3 @@ pcmk__add_alert_key_int(GHashTable *table, enum pcmk__alert_keys_e name,
g_hash_table_insert(table, strdup(*key), pcmk__itoa(value));
}
}
-
-#define XPATH_PATCHSET1_DIFF "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_ADDED
-
-#define XPATH_PATCHSET1_CRMCONFIG XPATH_PATCHSET1_DIFF "//" XML_CIB_TAG_CRMCONFIG
-#define XPATH_PATCHSET1_ALERTS XPATH_PATCHSET1_DIFF "//" XML_CIB_TAG_ALERTS
-
-#define XPATH_PATCHSET1_EITHER \
- XPATH_PATCHSET1_CRMCONFIG " | " XPATH_PATCHSET1_ALERTS
-
-#define XPATH_CONFIG "/" XML_TAG_CIB "/" XML_CIB_TAG_CONFIGURATION
-
-#define XPATH_CRMCONFIG XPATH_CONFIG "/" XML_CIB_TAG_CRMCONFIG "/"
-#define XPATH_ALERTS XPATH_CONFIG "/" XML_CIB_TAG_ALERTS
-
-/*!
- * \internal
- * \brief Check whether a CIB update affects alerts
- *
- * \param[in] msg XML containing CIB update
- * \param[in] config Whether to check for crmconfig change as well
- *
- * \return TRUE if update affects alerts, FALSE otherwise
- */
-bool
-pcmk__alert_in_patchset(xmlNode *msg, bool config)
-{
- int rc = -1;
- int format= 1;
- xmlNode *patchset = get_message_xml(msg, F_CIB_UPDATE_RESULT);
- xmlNode *change = NULL;
- xmlXPathObject *xpathObj = NULL;
-
- CRM_CHECK(msg != NULL, return FALSE);
-
- crm_element_value_int(msg, F_CIB_RC, &rc);
- if (rc < pcmk_ok) {
- crm_trace("Ignore failed CIB update: %s (%d)", pcmk_strerror(rc), rc);
- return FALSE;
- }
-
- crm_element_value_int(patchset, "format", &format);
- if (format == 1) {
- const char *diff = (config? XPATH_PATCHSET1_EITHER : XPATH_PATCHSET1_ALERTS);
-
- if ((xpathObj = xpath_search(msg, diff)) != NULL) {
- freeXpathObject(xpathObj);
- return TRUE;
- }
- } else if (format == 2) {
- for (change = pcmk__xml_first_child(patchset); change != NULL;
- change = pcmk__xml_next(change)) {
- const char *xpath = crm_element_value(change, XML_DIFF_PATH);
-
- if (xpath == NULL) {
- continue;
- }
-
- if ((!config || !strstr(xpath, XPATH_CRMCONFIG))
- && !strstr(xpath, XPATH_ALERTS)) {
-
- /* this is not a change to an existing section ... */
-
- xmlNode *section = NULL;
- const char *name = NULL;
-
- if ((strcmp(xpath, XPATH_CONFIG) != 0) ||
- ((section = pcmk__xml_first_child(change)) == NULL) ||
- ((name = crm_element_name(section)) == NULL) ||
- (strcmp(name, XML_CIB_TAG_ALERTS) != 0)) {
-
- /* ... nor is it a newly added alerts section */
- continue;
- }
- }
-
- return TRUE;
- }
-
- } else {
- crm_warn("Unknown patch format: %d", format);
- }
- return FALSE;
-}
diff --git a/lib/common/cib.c b/lib/common/cib.c
index b84c5e8..fee7881 100644
--- a/lib/common/cib.c
+++ b/lib/common/cib.c
@@ -1,6 +1,6 @@
/*
* Original copyright 2004 International Business Machines
- * Later changes copyright 2008-2021 the Pacemaker project contributors
+ * Later changes copyright 2008-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -14,6 +14,8 @@
#include <libxml/tree.h> // xmlNode
#include <crm/msg_xml.h>
+#include <crm/common/cib.h>
+#include <crm/common/cib_internal.h>
/*
* Functions to help find particular sections of the CIB
@@ -99,7 +101,7 @@ static struct {
};
/*!
- * \brief Get the XPath needed to find a specified CIB element name
+ * \brief Get the relative XPath needed to find a specified CIB element name
*
* \param[in] element_name Name of CIB element
*
@@ -120,6 +122,23 @@ pcmk_cib_xpath_for(const char *element_name)
}
/*!
+ * \internal
+ * \brief Get the absolute XPath needed to find a specified CIB element name
+ *
+ * \param[in] element Name of CIB element
+ *
+ * \return XPath for finding \p element in CIB XML (or \c NULL if unknown)
+ */
+const char *
+pcmk__cib_abs_xpath_for(const char *element)
+{
+ const char *xpath = pcmk_cib_xpath_for(element);
+
+ // XPaths returned by pcmk_cib_xpath_for() are relative (starting with "//")
+ return ((xpath != NULL)? (xpath + 1) : NULL);
+}
+
+/*!
* \brief Get the parent element name of a given CIB element name
*
* \param[in] element_name Name of CIB element
diff --git a/lib/common/crmcommon_private.h b/lib/common/crmcommon_private.h
index 7faccb6..121d663 100644
--- a/lib/common/crmcommon_private.h
+++ b/lib/common/crmcommon_private.h
@@ -63,7 +63,7 @@ typedef struct xml_doc_private_s {
} while (0)
G_GNUC_INTERNAL
-void pcmk__xml2text(xmlNodePtr data, uint32_t options, GString *buffer,
+void pcmk__xml2text(const xmlNode *data, uint32_t options, GString *buffer,
int depth);
G_GNUC_INTERNAL
@@ -116,12 +116,14 @@ G_GNUC_INTERNAL
void pcmk__log_xmllib_err(void *ctx, const char *fmt, ...)
G_GNUC_PRINTF(2, 3);
-static inline const char *
-pcmk__xml_attr_value(const xmlAttr *attr)
-{
- return ((attr == NULL) || (attr->children == NULL))? NULL
- : (const char *) attr->children->content;
-}
+G_GNUC_INTERNAL
+void pcmk__mark_xml_node_dirty(xmlNode *xml);
+
+G_GNUC_INTERNAL
+bool pcmk__marked_as_deleted(xmlAttrPtr a, void *user_data);
+
+G_GNUC_INTERNAL
+void pcmk__dump_xml_attr(const xmlAttr *attr, GString *buffer);
/*
* IPC
@@ -173,11 +175,11 @@ typedef struct pcmk__ipc_methods_s {
* \brief Check whether an IPC request results in a reply
*
* \param[in,out] api IPC API connection
- * \param[in,out] request IPC request XML
+ * \param[in] request IPC request XML
*
* \return true if request would result in an IPC reply, false otherwise
*/
- bool (*reply_expected)(pcmk_ipc_api_t *api, xmlNode *request);
+ bool (*reply_expected)(pcmk_ipc_api_t *api, const xmlNode *request);
/*!
* \internal
@@ -222,7 +224,7 @@ typedef struct pcmk__ipc_header_s {
} pcmk__ipc_header_t;
G_GNUC_INTERNAL
-int pcmk__send_ipc_request(pcmk_ipc_api_t *api, xmlNode *request);
+int pcmk__send_ipc_request(pcmk_ipc_api_t *api, const xmlNode *request);
G_GNUC_INTERNAL
void pcmk__call_ipc_callback(pcmk_ipc_api_t *api,
@@ -264,47 +266,6 @@ pcmk__ipc_methods_t *pcmk__schedulerd_api_methods(void);
//! XML has been moved
#define PCMK__XML_PREFIX_MOVED "+~"
-/*!
- * \brief Check the authenticity of the IPC socket peer process
- *
- * If everything goes well, peer's authenticity is verified by the means
- * of comparing against provided referential UID and GID (either satisfies),
- * and the result of this check can be deduced from the return value.
- * As an exception, detected UID of 0 ("root") satisfies arbitrary
- * provided referential daemon's credentials.
- *
- * \param[in] qb_ipc libqb client connection if available
- * \param[in] sock IPC related, connected Unix socket to check peer of
- * \param[in] refuid referential UID to check against
- * \param[in] refgid referential GID to check against
- * \param[out] gotpid to optionally store obtained PID of the peer
- * (not available on FreeBSD, special value of 1
- * used instead, and the caller is required to
- * special case this value respectively)
- * \param[out] gotuid to optionally store obtained UID of the peer
- * \param[out] gotgid to optionally store obtained GID of the peer
- *
- * \return Standard Pacemaker return code
- * ie: 0 if it the connection is authentic
- * pcmk_rc_ipc_unauthorized if the connection is not authentic,
- * standard errors.
- *
- * \note While this function is tolerant on what constitutes authorized
- * IPC daemon process (its effective user matches UID=0 or \p refuid,
- * or at least its group matches \p refgid), either or both (in case
- * of UID=0) mismatches on the expected credentials of such peer
- * process \e shall be investigated at the caller when value of 1
- * gets returned there, since higher-than-expected privileges in
- * respect to the expected/intended credentials possibly violate
- * the least privilege principle and may pose an additional risk
- * (i.e. such accidental inconsistency shall be eventually fixed).
- */
-int pcmk__crm_ipc_is_authentic_process(qb_ipcc_connection_t *qb_ipc, int sock,
- uid_t refuid, gid_t refgid,
- pid_t *gotpid, uid_t *gotuid,
- gid_t *gotgid);
-
-
/*
* Output
*/
diff --git a/lib/common/digest.c b/lib/common/digest.c
index 3bf04bf..4de6f97 100644
--- a/lib/common/digest.c
+++ b/lib/common/digest.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2015-2022 the Pacemaker project contributors
+ * Copyright 2015-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -89,7 +89,7 @@ calculate_xml_digest_v1(xmlNode *input, gboolean sort, gboolean ignored)
* \return Newly allocated string containing digest
*/
static char *
-calculate_xml_digest_v2(xmlNode *source, gboolean do_filter)
+calculate_xml_digest_v2(const xmlNode *source, gboolean do_filter)
{
char *digest = NULL;
GString *buffer = g_string_sized_new(1024);
diff --git a/lib/common/io.c b/lib/common/io.c
index 2264e16..35efbe9 100644
--- a/lib/common/io.c
+++ b/lib/common/io.c
@@ -460,11 +460,17 @@ pcmk__file_contents(const char *filename, char **contents)
goto bail;
}
rewind(fp);
- read_len = fread(*contents, 1, length, fp); /* Coverity: False positive */
+
+ read_len = fread(*contents, 1, length, fp);
if (read_len != length) {
free(*contents);
*contents = NULL;
rc = EIO;
+ } else {
+ /* Coverity thinks *contents isn't null-terminated. It doesn't
+ * understand calloc().
+ */
+ (*contents)[length] = '\0';
}
}
diff --git a/lib/common/ipc_attrd.c b/lib/common/ipc_attrd.c
index 7c40aa7..9caaabe 100644
--- a/lib/common/ipc_attrd.c
+++ b/lib/common/ipc_attrd.c
@@ -44,7 +44,7 @@ set_pairs_data(pcmk__attrd_api_reply_t *data, xmlNode *msg_data)
}
static bool
-reply_expected(pcmk_ipc_api_t *api, xmlNode *request)
+reply_expected(pcmk_ipc_api_t *api, const xmlNode *request)
{
const char *command = crm_element_value(request, PCMK__XA_TASK);
@@ -169,32 +169,29 @@ destroy_api(pcmk_ipc_api_t *api)
}
static int
-connect_and_send_attrd_request(pcmk_ipc_api_t *api, xmlNode *request)
+connect_and_send_attrd_request(pcmk_ipc_api_t *api, const xmlNode *request)
{
int rc = pcmk_rc_ok;
- int max = 5;
-
- while (max > 0) {
- crm_info("Connecting to cluster... %d retries remaining", max);
- rc = pcmk_connect_ipc(api, pcmk_ipc_dispatch_sync);
-
- if (rc == pcmk_rc_ok) {
- rc = pcmk__send_ipc_request(api, request);
- break;
- } else if (rc == EAGAIN || rc == EALREADY) {
- sleep(5 - max);
- max--;
- } else {
- crm_err("Could not connect to attrd: %s", pcmk_rc_str(rc));
- break;
- }
+
+ rc = pcmk__connect_ipc(api, pcmk_ipc_dispatch_sync, 5);
+ if (rc != pcmk_rc_ok) {
+ crm_err("Could not connect to %s: %s",
+ pcmk_ipc_name(api, true), pcmk_rc_str(rc));
+ return rc;
}
- return rc;
+ rc = pcmk__send_ipc_request(api, request);
+ if (rc != pcmk_rc_ok) {
+ crm_err("Could not send request to %s: %s",
+ pcmk_ipc_name(api, true), pcmk_rc_str(rc));
+ return rc;
+ }
+
+ return pcmk_rc_ok;
}
static int
-send_attrd_request(pcmk_ipc_api_t *api, xmlNode *request)
+send_attrd_request(pcmk_ipc_api_t *api, const xmlNode *request)
{
return pcmk__send_ipc_request(api, request);
}
diff --git a/lib/common/ipc_client.c b/lib/common/ipc_client.c
index c6d1645..0d38650 100644
--- a/lib/common/ipc_client.c
+++ b/lib/common/ipc_client.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2004-2022 the Pacemaker project contributors
+ * Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -31,6 +31,10 @@
#include <crm/common/ipc_internal.h>
#include "crmcommon_private.h"
+static int is_ipc_provider_expected(qb_ipcc_connection_t *qb_ipc, int sock,
+ uid_t refuid, gid_t refgid, pid_t *gotpid,
+ uid_t *gotuid, gid_t *gotgid);
+
/*!
* \brief Create a new object for using Pacemaker daemon IPC
*
@@ -164,7 +168,7 @@ ipc_post_disconnect(gpointer user_data)
{
pcmk_ipc_api_t *api = user_data;
- crm_info("Disconnected from %s IPC API", pcmk_ipc_name(api, true));
+ crm_info("Disconnected from %s", pcmk_ipc_name(api, true));
// Perform any daemon-specific handling needed
if ((api->cmds != NULL) && (api->cmds->post_disconnect != NULL)) {
@@ -389,7 +393,7 @@ dispatch_ipc_source_data(const char *buffer, ssize_t length, gpointer user_data)
* meaning no data is available; all other values indicate errors.
* \todo This does not allow the caller to poll multiple file descriptors at
* once. If there is demand for that, we could add a wrapper for
- * crm_ipc_get_fd(api->ipc), so the caller can call poll() themselves.
+ * pcmk__ipc_fd(api->ipc), so the caller can call poll() themselves.
*/
int
pcmk_poll_ipc(const pcmk_ipc_api_t *api, int timeout_ms)
@@ -400,7 +404,14 @@ pcmk_poll_ipc(const pcmk_ipc_api_t *api, int timeout_ms)
if ((api == NULL) || (api->dispatch_type != pcmk_ipc_dispatch_poll)) {
return EINVAL;
}
- pollfd.fd = crm_ipc_get_fd(api->ipc);
+
+ rc = pcmk__ipc_fd(api->ipc, &(pollfd.fd));
+ if (rc != pcmk_rc_ok) {
+ crm_debug("Could not obtain file descriptor for %s IPC: %s",
+ pcmk_ipc_name(api, true), pcmk_rc_str(rc));
+ return rc;
+ }
+
pollfd.events = POLLIN;
rc = poll(&pollfd, 1, timeout_ms);
if (rc < 0) {
@@ -465,54 +476,54 @@ connect_with_main_loop(pcmk_ipc_api_t *api)
static int
connect_without_main_loop(pcmk_ipc_api_t *api)
{
- int rc;
+ int rc = pcmk__connect_generic_ipc(api->ipc);
- if (!crm_ipc_connect(api->ipc)) {
- rc = errno;
+ if (rc != pcmk_rc_ok) {
crm_ipc_close(api->ipc);
- return rc;
+ } else {
+ crm_debug("Connected to %s IPC (without main loop)",
+ pcmk_ipc_name(api, true));
}
- crm_debug("Connected to %s IPC (without main loop)",
- pcmk_ipc_name(api, true));
- return pcmk_rc_ok;
+ return rc;
}
/*!
- * \brief Connect to a Pacemaker daemon via IPC
+ * \internal
+ * \brief Connect to a Pacemaker daemon via IPC (retrying after soft errors)
*
* \param[in,out] api IPC API instance
* \param[in] dispatch_type How IPC replies should be dispatched
+ * \param[in] attempts How many times to try (in case of soft error)
*
* \return Standard Pacemaker return code
*/
int
-pcmk_connect_ipc(pcmk_ipc_api_t *api, enum pcmk_ipc_dispatch dispatch_type)
+pcmk__connect_ipc(pcmk_ipc_api_t *api, enum pcmk_ipc_dispatch dispatch_type,
+ int attempts)
{
- const int n_attempts = 2;
int rc = pcmk_rc_ok;
- if (api == NULL) {
- crm_err("Cannot connect to uninitialized API object");
+ if ((api == NULL) || (attempts < 1)) {
return EINVAL;
}
if (api->ipc == NULL) {
- api->ipc = crm_ipc_new(pcmk_ipc_name(api, false),
- api->ipc_size_max);
+ api->ipc = crm_ipc_new(pcmk_ipc_name(api, false), api->ipc_size_max);
if (api->ipc == NULL) {
- crm_err("Failed to re-create IPC API");
return ENOMEM;
}
}
if (crm_ipc_connected(api->ipc)) {
- crm_trace("Already connected to %s IPC API", pcmk_ipc_name(api, true));
+ crm_trace("Already connected to %s", pcmk_ipc_name(api, true));
return pcmk_rc_ok;
}
api->dispatch_type = dispatch_type;
- for (int i = 0; i < n_attempts; i++) {
+ crm_debug("Attempting connection to %s (up to %d time%s)",
+ pcmk_ipc_name(api, true), attempts, pcmk__plural_s(attempts));
+ for (int remaining = attempts - 1; remaining >= 0; --remaining) {
switch (dispatch_type) {
case pcmk_ipc_dispatch_main:
rc = connect_with_main_loop(api);
@@ -524,17 +535,15 @@ pcmk_connect_ipc(pcmk_ipc_api_t *api, enum pcmk_ipc_dispatch dispatch_type)
break;
}
- if (rc != EAGAIN) {
- break;
+ if ((remaining == 0) || ((rc != EAGAIN) && (rc != EALREADY))) {
+ break; // Result is final
}
- /* EAGAIN may occur due to interruption by a signal or due to some
- * transient issue. Try one more time to be more resilient.
- */
- if (i < (n_attempts - 1)) {
- crm_trace("Connection to %s IPC API failed with EAGAIN, retrying",
- pcmk_ipc_name(api, true));
- }
+ // Retry after soft error (interrupted by signal, etc.)
+ pcmk__sleep_ms((attempts - remaining) * 500);
+ crm_debug("Re-attempting connection to %s (%d attempt%s remaining)",
+ pcmk_ipc_name(api, true), remaining,
+ pcmk__plural_s(remaining));
}
if (rc != pcmk_rc_ok) {
@@ -551,6 +560,26 @@ pcmk_connect_ipc(pcmk_ipc_api_t *api, enum pcmk_ipc_dispatch dispatch_type)
}
/*!
+ * \brief Connect to a Pacemaker daemon via IPC
+ *
+ * \param[in,out] api IPC API instance
+ * \param[in] dispatch_type How IPC replies should be dispatched
+ *
+ * \return Standard Pacemaker return code
+ */
+int
+pcmk_connect_ipc(pcmk_ipc_api_t *api, enum pcmk_ipc_dispatch dispatch_type)
+{
+ int rc = pcmk__connect_ipc(api, dispatch_type, 2);
+
+ if (rc != pcmk_rc_ok) {
+ crm_err("Connection to %s failed: %s",
+ pcmk_ipc_name(api, true), pcmk_rc_str(rc));
+ }
+ return rc;
+}
+
+/*!
* \brief Disconnect an IPC API instance
*
* \param[in,out] api IPC API connection
@@ -628,7 +657,7 @@ pcmk_register_ipc_callback(pcmk_ipc_api_t *api, pcmk_ipc_callback_t cb,
* \brief Send an XML request across an IPC API connection
*
* \param[in,out] api IPC API connection
- * \param[in,out] request XML request to send
+ * \param[in] request XML request to send
*
* \return Standard Pacemaker return code
*
@@ -636,7 +665,7 @@ pcmk_register_ipc_callback(pcmk_ipc_api_t *api, pcmk_ipc_callback_t cb,
* requests, because it handles different dispatch types appropriately.
*/
int
-pcmk__send_ipc_request(pcmk_ipc_api_t *api, xmlNode *request)
+pcmk__send_ipc_request(pcmk_ipc_api_t *api, const xmlNode *request)
{
int rc;
xmlNode *reply = NULL;
@@ -855,6 +884,77 @@ crm_ipc_new(const char *name, size_t max_size)
}
/*!
+ * \internal
+ * \brief Connect a generic (not daemon-specific) IPC object
+ *
+ * \param[in,out] ipc Generic IPC object to connect
+ *
+ * \return Standard Pacemaker return code
+ */
+int
+pcmk__connect_generic_ipc(crm_ipc_t *ipc)
+{
+ uid_t cl_uid = 0;
+ gid_t cl_gid = 0;
+ pid_t found_pid = 0;
+ uid_t found_uid = 0;
+ gid_t found_gid = 0;
+ int rc = pcmk_rc_ok;
+
+ if (ipc == NULL) {
+ return EINVAL;
+ }
+
+ ipc->need_reply = FALSE;
+ ipc->ipc = qb_ipcc_connect(ipc->server_name, ipc->buf_size);
+ if (ipc->ipc == NULL) {
+ return errno;
+ }
+
+ rc = qb_ipcc_fd_get(ipc->ipc, &ipc->pfd.fd);
+ if (rc < 0) { // -errno
+ crm_ipc_close(ipc);
+ return -rc;
+ }
+
+ rc = pcmk_daemon_user(&cl_uid, &cl_gid);
+ rc = pcmk_legacy2rc(rc);
+ if (rc != pcmk_rc_ok) {
+ crm_ipc_close(ipc);
+ return rc;
+ }
+
+ rc = is_ipc_provider_expected(ipc->ipc, ipc->pfd.fd, cl_uid, cl_gid,
+ &found_pid, &found_uid, &found_gid);
+ if (rc != pcmk_rc_ok) {
+ if (rc == pcmk_rc_ipc_unauthorized) {
+ crm_info("%s IPC provider authentication failed: process %lld has "
+ "uid %lld (expected %lld) and gid %lld (expected %lld)",
+ ipc->server_name,
+ (long long) PCMK__SPECIAL_PID_AS_0(found_pid),
+ (long long) found_uid, (long long) cl_uid,
+ (long long) found_gid, (long long) cl_gid);
+ }
+ crm_ipc_close(ipc);
+ return rc;
+ }
+
+ ipc->max_buf_size = qb_ipcc_get_buffer_size(ipc->ipc);
+ if (ipc->max_buf_size > ipc->buf_size) {
+ free(ipc->buffer);
+ ipc->buffer = calloc(ipc->max_buf_size, sizeof(char));
+ if (ipc->buffer == NULL) {
+ rc = errno;
+ crm_ipc_close(ipc);
+ return rc;
+ }
+ ipc->buf_size = ipc->max_buf_size;
+ }
+
+ return pcmk_rc_ok;
+}
+
+/*!
* \brief Establish an IPC connection to a Pacemaker component
*
* \param[in,out] client Connection instance obtained from crm_ipc_new()
@@ -866,76 +966,26 @@ crm_ipc_new(const char *name, size_t max_size)
bool
crm_ipc_connect(crm_ipc_t *client)
{
- uid_t cl_uid = 0;
- gid_t cl_gid = 0;
- pid_t found_pid = 0; uid_t found_uid = 0; gid_t found_gid = 0;
- int rv;
+ int rc = pcmk__connect_generic_ipc(client);
- if (client == NULL) {
- errno = EINVAL;
- return false;
+ if (rc == pcmk_rc_ok) {
+ return true;
}
-
- client->need_reply = FALSE;
- client->ipc = qb_ipcc_connect(client->server_name, client->buf_size);
-
- if (client->ipc == NULL) {
+ if ((client != NULL) && (client->ipc == NULL)) {
+ errno = (rc > 0)? rc : ENOTCONN;
crm_debug("Could not establish %s IPC connection: %s (%d)",
client->server_name, pcmk_rc_str(errno), errno);
- return false;
- }
-
- client->pfd.fd = crm_ipc_get_fd(client);
- if (client->pfd.fd < 0) {
- rv = errno;
- /* message already omitted */
- crm_ipc_close(client);
- errno = rv;
- return false;
- }
-
- rv = pcmk_daemon_user(&cl_uid, &cl_gid);
- if (rv < 0) {
- /* message already omitted */
- crm_ipc_close(client);
- errno = -rv;
- return false;
- }
-
- if ((rv = pcmk__crm_ipc_is_authentic_process(client->ipc, client->pfd.fd, cl_uid, cl_gid,
- &found_pid, &found_uid,
- &found_gid)) == pcmk_rc_ipc_unauthorized) {
- crm_err("%s IPC provider authentication failed: process %lld has "
- "uid %lld (expected %lld) and gid %lld (expected %lld)",
- client->server_name,
- (long long) PCMK__SPECIAL_PID_AS_0(found_pid),
- (long long) found_uid, (long long) cl_uid,
- (long long) found_gid, (long long) cl_gid);
- crm_ipc_close(client);
+ } else if (rc == pcmk_rc_ipc_unauthorized) {
+ crm_err("%s IPC provider authentication failed",
+ (client == NULL)? "Pacemaker" : client->server_name);
errno = ECONNABORTED;
- return false;
-
- } else if (rv != pcmk_rc_ok) {
- crm_perror(LOG_ERR, "Could not verify authenticity of %s IPC provider",
- client->server_name);
- crm_ipc_close(client);
- if (rv > 0) {
- errno = rv;
- } else {
- errno = ENOTCONN;
- }
- return false;
- }
-
- qb_ipcc_context_set(client->ipc, client);
-
- client->max_buf_size = qb_ipcc_get_buffer_size(client->ipc);
- if (client->max_buf_size > client->buf_size) {
- free(client->buffer);
- client->buffer = calloc(1, client->max_buf_size);
- client->buf_size = client->max_buf_size;
+ } else {
+ crm_perror(LOG_ERR,
+ "Could not verify authenticity of %s IPC provider",
+ (client == NULL)? "Pacemaker" : client->server_name);
+ errno = ENOTCONN;
}
- return true;
+ return false;
}
void
@@ -977,18 +1027,40 @@ crm_ipc_destroy(crm_ipc_t * client)
}
}
+/*!
+ * \internal
+ * \brief Get the file descriptor for a generic IPC object
+ *
+ * \param[in,out] ipc Generic IPC object to get file descriptor for
+ * \param[out] fd Where to store file descriptor
+ *
+ * \return Standard Pacemaker return code
+ */
+int
+pcmk__ipc_fd(crm_ipc_t *ipc, int *fd)
+{
+ if ((ipc == NULL) || (fd == NULL)) {
+ return EINVAL;
+ }
+ if ((ipc->ipc == NULL) || (ipc->pfd.fd < 0)) {
+ return ENOTCONN;
+ }
+ *fd = ipc->pfd.fd;
+ return pcmk_rc_ok;
+}
+
int
crm_ipc_get_fd(crm_ipc_t * client)
{
- int fd = 0;
+ int fd = -1;
- if (client && client->ipc && (qb_ipcc_fd_get(client->ipc, &fd) == 0)) {
- return fd;
+ if (pcmk__ipc_fd(client, &fd) != pcmk_rc_ok) {
+ crm_err("Could not obtain file descriptor for %s IPC",
+ ((client == NULL)? "unspecified" : client->server_name));
+ errno = EINVAL;
+ return -EINVAL;
}
- errno = EINVAL;
- crm_perror(LOG_ERR, "Could not obtain file descriptor for %s IPC",
- (client? client->server_name : "unspecified"));
- return -errno;
+ return fd;
}
bool
@@ -1057,12 +1129,13 @@ crm_ipc_decompress(crm_ipc_t * client)
rc = BZ2_bzBuffToBuffDecompress(uncompressed + sizeof(pcmk__ipc_header_t), &size_u,
client->buffer + sizeof(pcmk__ipc_header_t), header->size_compressed, 1, 0);
+ rc = pcmk__bzlib2rc(rc);
- if (rc != BZ_OK) {
- crm_err("Decompression failed: %s " CRM_XS " bzerror=%d",
- bz2_strerror(rc), rc);
+ if (rc != pcmk_rc_ok) {
+ crm_err("Decompression failed: %s " CRM_XS " rc=%d",
+ pcmk_rc_str(rc), rc);
free(uncompressed);
- return EILSEQ;
+ return rc;
}
/*
@@ -1221,7 +1294,7 @@ internal_ipc_get_reply(crm_ipc_t *client, int request_id, int ms_timeout,
* \brief Send an IPC XML message
*
* \param[in,out] client Connection to IPC server
- * \param[in,out] message XML message to send
+ * \param[in] message XML message to send
* \param[in] flags Bitmask of crm_ipc_flags
* \param[in] ms_timeout Give up if not sent within this much time
* (5 seconds if 0, or no timeout if negative)
@@ -1231,8 +1304,8 @@ internal_ipc_get_reply(crm_ipc_t *client, int request_id, int ms_timeout,
* if reply was needed, otherwise number of bytes sent
*/
int
-crm_ipc_send(crm_ipc_t * client, xmlNode * message, enum crm_ipc_flags flags, int32_t ms_timeout,
- xmlNode ** reply)
+crm_ipc_send(crm_ipc_t *client, const xmlNode *message,
+ enum crm_ipc_flags flags, int32_t ms_timeout, xmlNode **reply)
{
int rc = 0;
ssize_t qb_rc = 0;
@@ -1385,89 +1458,129 @@ crm_ipc_send(crm_ipc_t * client, xmlNode * message, enum crm_ipc_flags flags, in
return rc;
}
-int
-pcmk__crm_ipc_is_authentic_process(qb_ipcc_connection_t *qb_ipc, int sock, uid_t refuid, gid_t refgid,
- pid_t *gotpid, uid_t *gotuid, gid_t *gotgid)
+/*!
+ * \brief Ensure an IPC provider has expected user or group
+ *
+ * \param[in] qb_ipc libqb client connection if available
+ * \param[in] sock Connected Unix socket for IPC
+ * \param[in] refuid Expected user ID
+ * \param[in] refgid Expected group ID
+ * \param[out] gotpid If not NULL, where to store provider's actual process ID
+ * (or 1 on platforms where ID is not available)
+ * \param[out] gotuid If not NULL, where to store provider's actual user ID
+ * \param[out] gotgid If not NULL, where to store provider's actual group ID
+ *
+ * \return Standard Pacemaker return code
+ * \note An actual user ID of 0 (root) will always be considered authorized,
+ * regardless of the expected values provided. The caller can use the
+ * output arguments to be stricter than this function.
+ */
+static int
+is_ipc_provider_expected(qb_ipcc_connection_t *qb_ipc, int sock,
+ uid_t refuid, gid_t refgid,
+ pid_t *gotpid, uid_t *gotuid, gid_t *gotgid)
{
- int ret = 0;
- pid_t found_pid = 0; uid_t found_uid = 0; gid_t found_gid = 0;
-#if defined(HAVE_UCRED)
- struct ucred ucred;
- socklen_t ucred_len = sizeof(ucred);
-#endif
+ int rc = EOPNOTSUPP;
+ pid_t found_pid = 0;
+ uid_t found_uid = 0;
+ gid_t found_gid = 0;
#ifdef HAVE_QB_IPCC_AUTH_GET
- if (qb_ipc && !qb_ipcc_auth_get(qb_ipc, &found_pid, &found_uid, &found_gid)) {
- goto do_checks;
+ if (qb_ipc != NULL) {
+ rc = qb_ipcc_auth_get(qb_ipc, &found_pid, &found_uid, &found_gid);
+ rc = -rc; // libqb returns 0 or -errno
+ if (rc == pcmk_rc_ok) {
+ goto found;
+ }
}
#endif
-#if defined(HAVE_UCRED)
- if (!getsockopt(sock, SOL_SOCKET, SO_PEERCRED,
- &ucred, &ucred_len)
- && ucred_len == sizeof(ucred)) {
- found_pid = ucred.pid; found_uid = ucred.uid; found_gid = ucred.gid;
+#ifdef HAVE_UCRED
+ {
+ struct ucred ucred;
+ socklen_t ucred_len = sizeof(ucred);
-#elif defined(HAVE_SOCKPEERCRED)
- struct sockpeercred sockpeercred;
- socklen_t sockpeercred_len = sizeof(sockpeercred);
-
- if (!getsockopt(sock, SOL_SOCKET, SO_PEERCRED,
- &sockpeercred, &sockpeercred_len)
- && sockpeercred_len == sizeof(sockpeercred_len)) {
- found_pid = sockpeercred.pid;
- found_uid = sockpeercred.uid; found_gid = sockpeercred.gid;
+ if (getsockopt(sock, SOL_SOCKET, SO_PEERCRED, &ucred, &ucred_len) < 0) {
+ rc = errno;
+ } else if (ucred_len != sizeof(ucred)) {
+ rc = EOPNOTSUPP;
+ } else {
+ found_pid = ucred.pid;
+ found_uid = ucred.uid;
+ found_gid = ucred.gid;
+ goto found;
+ }
+ }
+#endif
-#elif defined(HAVE_GETPEEREID)
- if (!getpeereid(sock, &found_uid, &found_gid)) {
- found_pid = PCMK__SPECIAL_PID; /* cannot obtain PID (FreeBSD) */
+#ifdef HAVE_SOCKPEERCRED
+ {
+ struct sockpeercred sockpeercred;
+ socklen_t sockpeercred_len = sizeof(sockpeercred);
-#elif defined(HAVE_GETPEERUCRED)
- ucred_t *ucred;
- if (!getpeerucred(sock, &ucred)) {
- errno = 0;
- found_pid = ucred_getpid(ucred);
- found_uid = ucred_geteuid(ucred); found_gid = ucred_getegid(ucred);
- ret = -errno;
- ucred_free(ucred);
- if (ret) {
- return (ret < 0) ? ret : -pcmk_err_generic;
+ if (getsockopt(sock, SOL_SOCKET, SO_PEERCRED,
+ &sockpeercred, &sockpeercred_len) < 0) {
+ rc = errno;
+ } else if (sockpeercred_len != sizeof(sockpeercred)) {
+ rc = EOPNOTSUPP;
+ } else {
+ found_pid = sockpeercred.pid;
+ found_uid = sockpeercred.uid;
+ found_gid = sockpeercred.gid;
+ goto found;
}
-
-#else
-# error "No way to authenticate a Unix socket peer"
- errno = 0;
- if (0) {
+ }
#endif
-#ifdef HAVE_QB_IPCC_AUTH_GET
- do_checks:
+
+#ifdef HAVE_GETPEEREID // For example, FreeBSD
+ if (getpeereid(sock, &found_uid, &found_gid) < 0) {
+ rc = errno;
+ } else {
+ found_pid = PCMK__SPECIAL_PID;
+ goto found;
+ }
#endif
- if (gotpid != NULL) {
- *gotpid = found_pid;
- }
- if (gotuid != NULL) {
- *gotuid = found_uid;
- }
- if (gotgid != NULL) {
- *gotgid = found_gid;
- }
- if (found_uid == 0 || found_uid == refuid || found_gid == refgid) {
- ret = 0;
+
+#ifdef HAVE_GETPEERUCRED
+ {
+ ucred_t *ucred = NULL;
+
+ if (getpeerucred(sock, &ucred) < 0) {
+ rc = errno;
} else {
- ret = pcmk_rc_ipc_unauthorized;
+ found_pid = ucred_getpid(ucred);
+ found_uid = ucred_geteuid(ucred);
+ found_gid = ucred_getegid(ucred);
+ ucred_free(ucred);
+ goto found;
}
- } else {
- ret = (errno > 0) ? errno : pcmk_rc_error;
}
- return ret;
+#endif
+
+ return rc; // If we get here, nothing succeeded
+
+found:
+ if (gotpid != NULL) {
+ *gotpid = found_pid;
+ }
+ if (gotuid != NULL) {
+ *gotuid = found_uid;
+ }
+ if (gotgid != NULL) {
+ *gotgid = found_gid;
+ }
+ if ((found_uid != 0) && (found_uid != refuid) && (found_gid != refgid)) {
+ return pcmk_rc_ipc_unauthorized;
+ }
+ return pcmk_rc_ok;
}
int
crm_ipc_is_authentic_process(int sock, uid_t refuid, gid_t refgid,
pid_t *gotpid, uid_t *gotuid, gid_t *gotgid)
{
- int ret = pcmk__crm_ipc_is_authentic_process(NULL, sock, refuid, refgid,
- gotpid, gotuid, gotgid);
+ int ret = is_ipc_provider_expected(NULL, sock, refuid, refgid,
+ gotpid, gotuid, gotgid);
/* The old function had some very odd return codes*/
if (ret == 0) {
@@ -1528,8 +1641,8 @@ pcmk__ipc_is_authentic_process_active(const char *name, uid_t refuid,
goto bail;
}
- auth_rc = pcmk__crm_ipc_is_authentic_process(c, fd, refuid, refgid, &found_pid,
- &found_uid, &found_gid);
+ auth_rc = is_ipc_provider_expected(c, fd, refuid, refgid,
+ &found_pid, &found_uid, &found_gid);
if (auth_rc == pcmk_rc_ipc_unauthorized) {
crm_err("Daemon (IPC %s) effectively blocked with unauthorized"
" process %lld (uid: %lld, gid: %lld)",
diff --git a/lib/common/ipc_common.c b/lib/common/ipc_common.c
index d0c0636..a48b0e9 100644
--- a/lib/common/ipc_common.c
+++ b/lib/common/ipc_common.c
@@ -35,7 +35,7 @@ pcmk__ipc_buffer_size(unsigned int max)
if (global_max == 0) {
long long global_ll;
- if ((pcmk__scan_ll(getenv("PCMK_ipc_buffer"), &global_ll,
+ if ((pcmk__scan_ll(pcmk__env_option(PCMK__ENV_IPC_BUFFER), &global_ll,
0LL) != pcmk_rc_ok)
|| (global_ll <= 0)) {
global_max = MAX_MSG_SIZE; // Default for unset or invalid
diff --git a/lib/common/ipc_controld.c b/lib/common/ipc_controld.c
index 9303afd..8e2016e 100644
--- a/lib/common/ipc_controld.c
+++ b/lib/common/ipc_controld.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2020-2022 the Pacemaker project contributors
+ * Copyright 2020-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -135,7 +135,7 @@ set_node_info_data(pcmk_controld_api_reply_t *data, xmlNode *msg_data)
data->data.node_info.uuid = crm_element_value(msg_data, XML_ATTR_ID);
data->data.node_info.uname = crm_element_value(msg_data, XML_ATTR_UNAME);
- data->data.node_info.state = crm_element_value(msg_data, XML_NODE_IS_PEER);
+ data->data.node_info.state = crm_element_value(msg_data, PCMK__XA_CRMD);
}
static void
@@ -169,26 +169,24 @@ set_nodes_data(pcmk_controld_api_reply_t *data, xmlNode *msg_data)
node_info->id = id_ll;
}
node_info->uname = crm_element_value(node, XML_ATTR_UNAME);
- node_info->state = crm_element_value(node, XML_NODE_IN_CLUSTER);
+ node_info->state = crm_element_value(node, PCMK__XA_IN_CCM);
data->data.nodes = g_list_prepend(data->data.nodes, node_info);
}
}
static bool
-reply_expected(pcmk_ipc_api_t *api, xmlNode *request)
+reply_expected(pcmk_ipc_api_t *api, const xmlNode *request)
{
- const char *command = crm_element_value(request, F_CRM_TASK);
-
- if (command == NULL) {
- return false;
- }
-
- // We only need to handle commands that functions in this file can send
- return !strcmp(command, CRM_OP_REPROBE)
- || !strcmp(command, CRM_OP_NODE_INFO)
- || !strcmp(command, CRM_OP_PING)
- || !strcmp(command, CRM_OP_LRM_FAIL)
- || !strcmp(command, CRM_OP_LRM_DELETE);
+ // We only need to handle commands that API functions can send
+ return pcmk__str_any_of(crm_element_value(request, F_CRM_TASK),
+ PCMK__CONTROLD_CMD_NODES,
+ CRM_OP_LRM_DELETE,
+ CRM_OP_LRM_FAIL,
+ CRM_OP_NODE_INFO,
+ CRM_OP_PING,
+ CRM_OP_REPROBE,
+ CRM_OP_RM_NODE_CACHE,
+ NULL);
}
static bool
@@ -202,22 +200,12 @@ dispatch(pcmk_ipc_api_t *api, xmlNode *reply)
pcmk_controld_reply_unknown, NULL, NULL,
};
- /* If we got an ACK, return true so the caller knows to expect more responses
- * from the IPC server. We do this before decrementing replies_expected because
- * ACKs are not going to be included in that value.
- *
- * Note that we cannot do the same kind of status checking here that we do in
- * ipc_pacemakerd.c. The ACK message we receive does not necessarily contain
- * a status attribute. That is, we may receive this:
- *
- * <ack function="crmd_remote_proxy_cb" line="556"/>
- *
- * Instead of this:
- *
- * <ack function="dispatch_controller_ipc" line="391" status="112"/>
- */
- if (pcmk__str_eq(crm_element_name(reply), "ack", pcmk__str_none)) {
- return true; // More replies needed
+ if (pcmk__xe_is(reply, "ack")) {
+ /* ACKs are trivial responses that do not count toward expected replies,
+ * and do not have all the fields that validation requires, so skip that
+ * processing.
+ */
+ return private->replies_expected > 0;
}
if (private->replies_expected > 0) {
@@ -341,21 +329,18 @@ create_controller_request(const pcmk_ipc_api_t *api, const char *op,
// \return Standard Pacemaker return code
static int
-send_controller_request(pcmk_ipc_api_t *api, xmlNode *request,
+send_controller_request(pcmk_ipc_api_t *api, const xmlNode *request,
bool reply_is_expected)
{
- int rc;
-
if (crm_element_value(request, XML_ATTR_REFERENCE) == NULL) {
return EINVAL;
}
- rc = pcmk__send_ipc_request(api, request);
- if ((rc == pcmk_rc_ok) && reply_is_expected) {
+ if (reply_is_expected) {
struct controld_api_private_s *private = api->api_data;
private->replies_expected++;
}
- return rc;
+ return pcmk__send_ipc_request(api, request);
}
static xmlNode *
diff --git a/lib/common/ipc_pacemakerd.c b/lib/common/ipc_pacemakerd.c
index 91a3143..2f03709 100644
--- a/lib/common/ipc_pacemakerd.c
+++ b/lib/common/ipc_pacemakerd.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2020-2022 the Pacemaker project contributors
+ * Copyright 2020-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -178,7 +178,7 @@ post_disconnect(pcmk_ipc_api_t *api)
}
static bool
-reply_expected(pcmk_ipc_api_t *api, xmlNode *request)
+reply_expected(pcmk_ipc_api_t *api, const xmlNode *request)
{
const char *command = crm_element_value(request, F_CRM_TASK);
diff --git a/lib/common/ipc_schedulerd.c b/lib/common/ipc_schedulerd.c
index c1b81a4..cf788e5 100644
--- a/lib/common/ipc_schedulerd.c
+++ b/lib/common/ipc_schedulerd.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2021-2022 the Pacemaker project contributors
+ * Copyright 2021-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -62,7 +62,7 @@ post_connect(pcmk_ipc_api_t *api)
}
static bool
-reply_expected(pcmk_ipc_api_t *api, xmlNode *request)
+reply_expected(pcmk_ipc_api_t *api, const xmlNode *request)
{
const char *command = crm_element_value(request, F_CRM_TASK);
diff --git a/lib/common/ipc_server.c b/lib/common/ipc_server.c
index 60f20fb..5cd7e70 100644
--- a/lib/common/ipc_server.c
+++ b/lib/common/ipc_server.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2004-2022 the Pacemaker project contributors
+ * Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -421,9 +421,11 @@ pcmk__client_data2xml(pcmk__client_t *c, void *data, uint32_t *id,
rc = BZ2_bzBuffToBuffDecompress(uncompressed, &size_u, text, header->size_compressed, 1, 0);
text = uncompressed;
- if (rc != BZ_OK) {
- crm_err("Decompression failed: %s " CRM_XS " bzerror=%d",
- bz2_strerror(rc), rc);
+ rc = pcmk__bzlib2rc(rc);
+
+ if (rc != pcmk_rc_ok) {
+ crm_err("Decompression failed: %s " CRM_XS " rc=%d",
+ pcmk_rc_str(rc), rc);
free(uncompressed);
return NULL;
}
@@ -568,16 +570,16 @@ crm_ipcs_flush_events(pcmk__client_t *c)
* \internal
* \brief Create an I/O vector for sending an IPC XML message
*
- * \param[in] request Identifier for libqb response header
- * \param[in,out] message XML message to send
- * \param[in] max_send_size If 0, default IPC buffer size is used
- * \param[out] result Where to store prepared I/O vector
- * \param[out] bytes Size of prepared data in bytes
+ * \param[in] request Identifier for libqb response header
+ * \param[in] message XML message to send
+ * \param[in] max_send_size If 0, default IPC buffer size is used
+ * \param[out] result Where to store prepared I/O vector
+ * \param[out] bytes Size of prepared data in bytes
*
* \return Standard Pacemaker return code
*/
int
-pcmk__ipc_prepare_iov(uint32_t request, xmlNode *message,
+pcmk__ipc_prepare_iov(uint32_t request, const xmlNode *message,
uint32_t max_send_size, struct iovec **result,
ssize_t *bytes)
{
@@ -741,7 +743,7 @@ pcmk__ipc_send_iov(pcmk__client_t *c, struct iovec *iov, uint32_t flags)
}
int
-pcmk__ipc_send_xml(pcmk__client_t *c, uint32_t request, xmlNode *message,
+pcmk__ipc_send_xml(pcmk__client_t *c, uint32_t request, const xmlNode *message,
uint32_t flags)
{
struct iovec *iov = NULL;
@@ -819,6 +821,7 @@ pcmk__ipc_send_ack_as(const char *function, int line, pcmk__client_t *c,
if (ack != NULL) {
crm_trace("Ack'ing IPC message from client %s as <%s status=%d>",
pcmk__client_name(c), tag, status);
+ crm_log_xml_trace(ack, "sent-ack");
c->request_id = 0;
rc = pcmk__ipc_send_xml(c, request, ack, flags);
free_xml(ack);
@@ -995,14 +998,17 @@ pcmk__serve_schedulerd_ipc(struct qb_ipcs_service_handlers *cb)
bool
crm_is_daemon_name(const char *name)
{
- name = pcmk__message_name(name);
- return (!strcmp(name, CRM_SYSTEM_CRMD)
- || !strcmp(name, CRM_SYSTEM_STONITHD)
- || !strcmp(name, "stonith-ng")
- || !strcmp(name, "attrd")
- || !strcmp(name, CRM_SYSTEM_CIB)
- || !strcmp(name, CRM_SYSTEM_MCP)
- || !strcmp(name, CRM_SYSTEM_DC)
- || !strcmp(name, CRM_SYSTEM_TENGINE)
- || !strcmp(name, CRM_SYSTEM_LRMD));
+ return pcmk__str_any_of(pcmk__message_name(name),
+ "attrd",
+ CRM_SYSTEM_CIB,
+ CRM_SYSTEM_CRMD,
+ CRM_SYSTEM_DC,
+ CRM_SYSTEM_LRMD,
+ CRM_SYSTEM_MCP,
+ CRM_SYSTEM_PENGINE,
+ CRM_SYSTEM_STONITHD,
+ CRM_SYSTEM_TENGINE,
+ "pacemaker-remoted",
+ "stonith-ng",
+ NULL);
}
diff --git a/lib/common/iso8601.c b/lib/common/iso8601.c
index 3e000e1..9de018f 100644
--- a/lib/common/iso8601.c
+++ b/lib/common/iso8601.c
@@ -1930,9 +1930,10 @@ pcmk__readable_interval(guint interval_ms)
#define MS_IN_H (MS_IN_M * 60)
#define MS_IN_D (MS_IN_H * 24)
#define MAXSTR sizeof("..d..h..m..s...ms")
- static char str[MAXSTR] = { '\0', };
+ static char str[MAXSTR];
int offset = 0;
+ str[0] = '\0';
if (interval_ms > MS_IN_D) {
offset += snprintf(str + offset, MAXSTR - offset, "%ud",
interval_ms / MS_IN_D);
diff --git a/lib/common/logging.c b/lib/common/logging.c
index dded873..7768c35 100644
--- a/lib/common/logging.c
+++ b/lib/common/logging.c
@@ -51,6 +51,11 @@ static unsigned int crm_log_priority = LOG_NOTICE;
static GLogFunc glib_log_default = NULL;
static pcmk__output_t *logger_out = NULL;
+pcmk__config_error_func pcmk__config_error_handler = NULL;
+pcmk__config_warning_func pcmk__config_warning_handler = NULL;
+void *pcmk__config_error_context = NULL;
+void *pcmk__config_warning_context = NULL;
+
static gboolean crm_tracing_enabled(void);
static void
@@ -237,7 +242,7 @@ chown_logfile(const char *filename, int logfd)
static void
chmod_logfile(const char *filename, int logfd)
{
- const char *modestr = getenv("PCMK_logfile_mode");
+ const char *modestr = pcmk__env_option(PCMK__ENV_LOGFILE_MODE);
mode_t filemode = S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP;
if (modestr != NULL) {
@@ -297,7 +302,7 @@ setenv_logfile(const char *filename)
{
// Some resource agents will log only if environment variable is set
if (pcmk__env_option(PCMK__ENV_LOGFILE) == NULL) {
- pcmk__set_env_option(PCMK__ENV_LOGFILE, filename);
+ pcmk__set_env_option(PCMK__ENV_LOGFILE, filename, true);
}
}
@@ -609,6 +614,20 @@ crm_log_filter_source(int source, const char *trace_files, const char *trace_fns
}
}
+#ifndef HAVE_STRCHRNUL
+/* strchrnul() is a GNU extension. If not present, use our own definition.
+ * The GNU version returns char*, but we only need it to be const char*.
+ */
+static const char *
+strchrnul(const char *s, int c)
+{
+ while ((*s != c) && (*s != '\0')) {
+ ++s;
+ }
+ return s;
+}
+#endif
+
static void
crm_log_filter(struct qb_log_callsite *cs)
{
@@ -622,11 +641,11 @@ crm_log_filter(struct qb_log_callsite *cs)
if (need_init) {
need_init = 0;
- trace_fns = getenv("PCMK_trace_functions");
- trace_fmts = getenv("PCMK_trace_formats");
- trace_tags = getenv("PCMK_trace_tags");
- trace_files = getenv("PCMK_trace_files");
- trace_blackbox = getenv("PCMK_trace_blackbox");
+ trace_fns = pcmk__env_option(PCMK__ENV_TRACE_FUNCTIONS);
+ trace_fmts = pcmk__env_option(PCMK__ENV_TRACE_FORMATS);
+ trace_tags = pcmk__env_option(PCMK__ENV_TRACE_TAGS);
+ trace_files = pcmk__env_option(PCMK__ENV_TRACE_FILES);
+ trace_blackbox = pcmk__env_option(PCMK__ENV_TRACE_BLACKBOX);
if (trace_tags != NULL) {
uint32_t tag;
@@ -695,8 +714,10 @@ crm_update_callsites(void)
log = FALSE;
crm_debug
("Enabling callsites based on priority=%d, files=%s, functions=%s, formats=%s, tags=%s",
- crm_log_level, getenv("PCMK_trace_files"), getenv("PCMK_trace_functions"),
- getenv("PCMK_trace_formats"), getenv("PCMK_trace_tags"));
+ crm_log_level, pcmk__env_option(PCMK__ENV_TRACE_FILES),
+ pcmk__env_option(PCMK__ENV_TRACE_FUNCTIONS),
+ pcmk__env_option(PCMK__ENV_TRACE_FORMATS),
+ pcmk__env_option(PCMK__ENV_TRACE_TAGS));
}
qb_log_filter_fn_set(crm_log_filter);
}
@@ -704,13 +725,11 @@ crm_update_callsites(void)
static gboolean
crm_tracing_enabled(void)
{
- if (crm_log_level == LOG_TRACE) {
- return TRUE;
- } else if (getenv("PCMK_trace_files") || getenv("PCMK_trace_functions")
- || getenv("PCMK_trace_formats") || getenv("PCMK_trace_tags")) {
- return TRUE;
- }
- return FALSE;
+ return (crm_log_level == LOG_TRACE)
+ || (pcmk__env_option(PCMK__ENV_TRACE_FILES) != NULL)
+ || (pcmk__env_option(PCMK__ENV_TRACE_FUNCTIONS) != NULL)
+ || (pcmk__env_option(PCMK__ENV_TRACE_FORMATS) != NULL)
+ || (pcmk__env_option(PCMK__ENV_TRACE_TAGS) != NULL);
}
static int
@@ -784,7 +803,8 @@ set_identity(const char *entity, int argc, char *const *argv)
CRM_ASSERT(crm_system_name != NULL);
- setenv("PCMK_service", crm_system_name, 1);
+ // Used by fencing.py.py (in fence-agents)
+ pcmk__set_env_option(PCMK__ENV_SERVICE, crm_system_name, false);
}
void
@@ -897,7 +917,7 @@ crm_log_init(const char *entity, uint8_t level, gboolean daemon, gboolean to_std
} else {
facility = PCMK__VALUE_NONE;
}
- pcmk__set_env_option(PCMK__ENV_LOGFACILITY, facility);
+ pcmk__set_env_option(PCMK__ENV_LOGFACILITY, facility, true);
}
if (pcmk__str_eq(facility, PCMK__VALUE_NONE, pcmk__str_casei)) {
@@ -1127,16 +1147,21 @@ pcmk__cli_init_logging(const char *name, unsigned int verbosity)
/*!
* \brief Log XML line-by-line in a formatted fashion
*
- * \param[in] level Priority at which to log the messages
- * \param[in] text Prefix for each line
- * \param[in] xml XML to log
+ * \param[in] file File name to use for log filtering
+ * \param[in] function Function name to use for log filtering
+ * \param[in] line Line number to use for log filtering
+ * \param[in] tags Logging tags to use for log filtering
+ * \param[in] level Priority at which to log the messages
+ * \param[in] text Prefix for each line
+ * \param[in] xml XML to log
*
* \note This does nothing when \p level is \p LOG_STDOUT.
* \note Do not call this function directly. It should be called only from the
* \p do_crm_log_xml() macro.
*/
void
-pcmk_log_xml_impl(uint8_t level, const char *text, const xmlNode *xml)
+pcmk_log_xml_as(const char *file, const char *function, uint32_t line,
+ uint32_t tags, uint8_t level, const char *text, const xmlNode *xml)
{
if (xml == NULL) {
do_crm_log(level, "%s%sNo data to dump as XML",
@@ -1148,12 +1173,76 @@ pcmk_log_xml_impl(uint8_t level, const char *text, const xmlNode *xml)
}
pcmk__output_set_log_level(logger_out, level);
+ pcmk__output_set_log_filter(logger_out, file, function, line, tags);
pcmk__xml_show(logger_out, text, xml, 1,
pcmk__xml_fmt_pretty
|pcmk__xml_fmt_open
|pcmk__xml_fmt_children
|pcmk__xml_fmt_close);
+ pcmk__output_set_log_filter(logger_out, NULL, NULL, 0U, 0U);
+ }
+}
+
+/*!
+ * \internal
+ * \brief Log XML changes line-by-line in a formatted fashion
+ *
+ * \param[in] file File name to use for log filtering
+ * \param[in] function Function name to use for log filtering
+ * \param[in] line Line number to use for log filtering
+ * \param[in] tags Logging tags to use for log filtering
+ * \param[in] level Priority at which to log the messages
+ * \param[in] xml XML whose changes to log
+ *
+ * \note This does nothing when \p level is \c LOG_STDOUT.
+ */
+void
+pcmk__log_xml_changes_as(const char *file, const char *function, uint32_t line,
+ uint32_t tags, uint8_t level, const xmlNode *xml)
+{
+ if (xml == NULL) {
+ do_crm_log(level, "No XML to dump");
+ return;
+ }
+
+ if (logger_out == NULL) {
+ CRM_CHECK(pcmk__log_output_new(&logger_out) == pcmk_rc_ok, return);
}
+ pcmk__output_set_log_level(logger_out, level);
+ pcmk__output_set_log_filter(logger_out, file, function, line, tags);
+ pcmk__xml_show_changes(logger_out, xml);
+ pcmk__output_set_log_filter(logger_out, NULL, NULL, 0U, 0U);
+}
+
+/*!
+ * \internal
+ * \brief Log an XML patchset line-by-line in a formatted fashion
+ *
+ * \param[in] file File name to use for log filtering
+ * \param[in] function Function name to use for log filtering
+ * \param[in] line Line number to use for log filtering
+ * \param[in] tags Logging tags to use for log filtering
+ * \param[in] level Priority at which to log the messages
+ * \param[in] patchset XML patchset to log
+ *
+ * \note This does nothing when \p level is \c LOG_STDOUT.
+ */
+void
+pcmk__log_xml_patchset_as(const char *file, const char *function, uint32_t line,
+ uint32_t tags, uint8_t level, const xmlNode *patchset)
+{
+ if (patchset == NULL) {
+ do_crm_log(level, "No patchset to dump");
+ return;
+ }
+
+ if (logger_out == NULL) {
+ CRM_CHECK(pcmk__log_output_new(&logger_out) == pcmk_rc_ok, return);
+ }
+ pcmk__output_set_log_level(logger_out, level);
+ pcmk__output_set_log_filter(logger_out, file, function, line, tags);
+ logger_out->message(logger_out, "xml-patchset", patchset);
+ pcmk__output_set_log_filter(logger_out, NULL, NULL, 0U, 0U);
}
/*!
@@ -1188,5 +1277,23 @@ crm_add_logfile(const char *filename)
return pcmk__add_logfile(filename) == pcmk_rc_ok;
}
+void
+pcmk_log_xml_impl(uint8_t level, const char *text, const xmlNode *xml)
+{
+ pcmk_log_xml_as(__FILE__, __func__, __LINE__, 0, level, text, xml);
+}
+
// LCOV_EXCL_STOP
// End deprecated API
+
+void pcmk__set_config_error_handler(pcmk__config_error_func error_handler, void *error_context)
+{
+ pcmk__config_error_handler = error_handler;
+ pcmk__config_error_context = error_context;
+}
+
+void pcmk__set_config_warning_handler(pcmk__config_warning_func warning_handler, void *warning_context)
+{
+ pcmk__config_warning_handler = warning_handler;
+ pcmk__config_warning_context = warning_context;
+} \ No newline at end of file
diff --git a/lib/common/mainloop.c b/lib/common/mainloop.c
index 3124e43..f971713 100644
--- a/lib/common/mainloop.c
+++ b/lib/common/mainloop.c
@@ -393,16 +393,6 @@ mainloop_add_signal(int sig, void (*dispatch) (int sig))
mainloop_destroy_signal_entry(sig);
return FALSE;
}
-#if 0
- /* If we want signals to interrupt mainloop's poll(), instead of waiting for
- * the timeout, then we should call siginterrupt() below
- *
- * For now, just enforce a low timeout
- */
- if (siginterrupt(sig, 1) < 0) {
- crm_perror(LOG_INFO, "Could not enable system call interruptions for signal %d", sig);
- }
-#endif
return TRUE;
}
@@ -624,7 +614,7 @@ struct qb_ipcs_poll_handlers gio_poll_funcs = {
static enum qb_ipc_type
pick_ipc_type(enum qb_ipc_type requested)
{
- const char *env = getenv("PCMK_ipc_type");
+ const char *env = pcmk__env_option(PCMK__ENV_IPC_TYPE);
if (env && strcmp("shared-mem", env) == 0) {
return QB_IPC_SHM;
@@ -668,7 +658,8 @@ mainloop_add_ipc_server_with_prio(const char *name, enum qb_ipc_type type,
server = qb_ipcs_create(name, 0, pick_ipc_type(type), callbacks);
if (server == NULL) {
- crm_err("Could not create %s IPC server: %s (%d)", name, pcmk_strerror(rc), rc);
+ crm_err("Could not create %s IPC server: %s (%d)",
+ name, pcmk_rc_str(errno), errno);
return NULL;
}
@@ -874,21 +865,34 @@ pcmk__add_mainloop_ipc(crm_ipc_t *ipc, int priority, void *userdata,
const struct ipc_client_callbacks *callbacks,
mainloop_io_t **source)
{
+ int rc = pcmk_rc_ok;
+ int fd = -1;
+ const char *ipc_name = NULL;
+
CRM_CHECK((ipc != NULL) && (callbacks != NULL), return EINVAL);
- if (!crm_ipc_connect(ipc)) {
- int rc = errno;
- crm_debug("Connection to %s failed: %d", crm_ipc_name(ipc), errno);
+ ipc_name = pcmk__s(crm_ipc_name(ipc), "Pacemaker");
+ rc = pcmk__connect_generic_ipc(ipc);
+ if (rc != pcmk_rc_ok) {
+ crm_debug("Connection to %s failed: %s", ipc_name, pcmk_rc_str(rc));
return rc;
}
- *source = mainloop_add_fd(crm_ipc_name(ipc), priority, crm_ipc_get_fd(ipc),
- userdata, NULL);
- if (*source == NULL) {
- int rc = errno;
+ rc = pcmk__ipc_fd(ipc, &fd);
+ if (rc != pcmk_rc_ok) {
+ crm_debug("Could not obtain file descriptor for %s IPC: %s",
+ ipc_name, pcmk_rc_str(rc));
crm_ipc_close(ipc);
return rc;
}
+
+ *source = mainloop_add_fd(ipc_name, priority, fd, userdata, NULL);
+ if (*source == NULL) {
+ rc = errno;
+ crm_ipc_close(ipc);
+ return rc;
+ }
+
(*source)->ipc = ipc;
(*source)->destroy_fn = callbacks->destroy;
(*source)->dispatch_fn_ipc = callbacks->dispatch;
diff --git a/lib/common/mock.c b/lib/common/mock.c
index 2bd8334..6f837ad 100644
--- a/lib/common/mock.c
+++ b/lib/common/mock.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2021-2022 the Pacemaker project contributors
+ * Copyright 2021-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -7,6 +7,8 @@
* version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
*/
+#include <crm_internal.h>
+
#include <errno.h>
#include <pwd.h>
#include <stdarg.h>
@@ -262,6 +264,8 @@ __wrap_endgrent(void) {
* will_return(__wrap_fopen, errno_to_set);
*
* expect_* functions: https://api.cmocka.org/group__cmocka__param.html
+ *
+ * This has two mocked functions, since fopen() is sometimes actually fopen64().
*/
bool pcmk__mock_fopen = false;
@@ -285,6 +289,26 @@ __wrap_fopen(const char *pathname, const char *mode)
}
}
+#ifdef HAVE_FOPEN64
+FILE *
+__wrap_fopen64(const char *pathname, const char *mode)
+{
+ if (pcmk__mock_fopen) {
+ check_expected_ptr(pathname);
+ check_expected_ptr(mode);
+ errno = mock_type(int);
+
+ if (errno != 0) {
+ return NULL;
+ } else {
+ return __real_fopen64(pathname, mode);
+ }
+
+ } else {
+ return __real_fopen64(pathname, mode);
+ }
+}
+#endif
/* getpwnam_r()
*
diff --git a/lib/common/mock_private.h b/lib/common/mock_private.h
index 45207c4..b0e0ed2 100644
--- a/lib/common/mock_private.h
+++ b/lib/common/mock_private.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2021-2022 the Pacemaker project contributors
+ * Copyright 2021-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -29,6 +29,10 @@ void *__wrap_calloc(size_t nmemb, size_t size);
extern bool pcmk__mock_fopen;
FILE *__real_fopen(const char *pathname, const char *mode);
FILE *__wrap_fopen(const char *pathname, const char *mode);
+#ifdef HAVE_FOPEN64
+FILE *__real_fopen64(const char *pathname, const char *mode);
+FILE *__wrap_fopen64(const char *pathname, const char *mode);
+#endif
extern bool pcmk__mock_getenv;
char *__real_getenv(const char *name);
diff --git a/lib/common/nvpair.c b/lib/common/nvpair.c
index 3766c45..dbb9c99 100644
--- a/lib/common/nvpair.c
+++ b/lib/common/nvpair.c
@@ -334,55 +334,6 @@ crm_xml_add(xmlNode *node, const char *name, const char *value)
}
/*!
- * \brief Replace an XML attribute with specified name and (possibly NULL) value
- *
- * \param[in,out] node XML node to modify
- * \param[in] name Attribute name to set
- * \param[in] value Attribute value to set
- *
- * \return New value on success, \c NULL otherwise
- * \note This does nothing if node or name is \c NULL or empty.
- */
-const char *
-crm_xml_replace(xmlNode *node, const char *name, const char *value)
-{
- bool dirty = FALSE;
- xmlAttr *attr = NULL;
- const char *old_value = NULL;
-
- CRM_CHECK(node != NULL, return NULL);
- CRM_CHECK(name != NULL && name[0] != 0, return NULL);
-
- old_value = crm_element_value(node, name);
-
- /* Could be re-setting the same value */
- CRM_CHECK(old_value != value, return value);
-
- if (pcmk__check_acl(node, name, pcmk__xf_acl_write) == FALSE) {
- /* Create a fake object linked to doc->_private instead? */
- crm_trace("Cannot replace %s=%s to %s", name, value, node->name);
- return NULL;
-
- } else if (old_value && !value) {
- xml_remove_prop(node, name);
- return NULL;
- }
-
- if (pcmk__tracking_xml_changes(node, FALSE)) {
- if (!old_value || !value || !strcmp(old_value, value)) {
- dirty = TRUE;
- }
- }
-
- attr = xmlSetProp(node, (pcmkXmlStr) name, (pcmkXmlStr) value);
- if (dirty) {
- pcmk__mark_xml_attr_dirty(attr);
- }
- CRM_CHECK(attr && attr->children && attr->children->content, return NULL);
- return (char *) attr->children->content;
-}
-
-/*!
* \brief Create an XML attribute with specified name and integer value
*
* This is like \c crm_xml_add() but taking an integer value.
@@ -503,7 +454,7 @@ crm_element_value(const xmlNode *data, const char *name)
return NULL;
} else if (name == NULL) {
- crm_err("Couldn't find NULL in %s", crm_element_name(data));
+ crm_err("Couldn't find NULL in %s", data->name);
return NULL;
}
@@ -883,7 +834,7 @@ xml2list(const xmlNode *parent)
nvpair_list = find_xml_node(parent, XML_TAG_ATTRS, FALSE);
if (nvpair_list == NULL) {
- crm_trace("No attributes in %s", crm_element_name(parent));
+ crm_trace("No attributes in %s", parent->name);
crm_log_xml_trace(parent, "No attributes for resource op");
}
@@ -988,5 +939,44 @@ pcmk_format_named_time(const char *name, time_t epoch_time)
return result;
}
+const char *
+crm_xml_replace(xmlNode *node, const char *name, const char *value)
+{
+ bool dirty = FALSE;
+ xmlAttr *attr = NULL;
+ const char *old_value = NULL;
+
+ CRM_CHECK(node != NULL, return NULL);
+ CRM_CHECK(name != NULL && name[0] != 0, return NULL);
+
+ old_value = crm_element_value(node, name);
+
+ /* Could be re-setting the same value */
+ CRM_CHECK(old_value != value, return value);
+
+ if (pcmk__check_acl(node, name, pcmk__xf_acl_write) == FALSE) {
+ /* Create a fake object linked to doc->_private instead? */
+ crm_trace("Cannot replace %s=%s to %s", name, value, node->name);
+ return NULL;
+
+ } else if (old_value && !value) {
+ xml_remove_prop(node, name);
+ return NULL;
+ }
+
+ if (pcmk__tracking_xml_changes(node, FALSE)) {
+ if (!old_value || !value || !strcmp(old_value, value)) {
+ dirty = TRUE;
+ }
+ }
+
+ attr = xmlSetProp(node, (pcmkXmlStr) name, (pcmkXmlStr) value);
+ if (dirty) {
+ pcmk__mark_xml_attr_dirty(attr);
+ }
+ CRM_CHECK(attr && attr->children && attr->children->content, return NULL);
+ return (char *) attr->children->content;
+}
+
// LCOV_EXCL_STOP
// End deprecated API
diff --git a/lib/common/options.c b/lib/common/options.c
index cb32b3f..2d86ebc 100644
--- a/lib/common/options.c
+++ b/lib/common/options.c
@@ -91,15 +91,23 @@ pcmk__env_option(const char *option)
/*!
* \brief Set or unset a Pacemaker environment variable option
*
- * Set an environment variable option with both a PCMK_ and (for
- * backward compatibility) HA_ prefix.
+ * Set an environment variable option with a \c "PCMK_" prefix and optionally
+ * an \c "HA_" prefix for backward compatibility.
*
* \param[in] option Environment variable name (without prefix)
* \param[in] value New value (or NULL to unset)
+ * \param[in] compat If false and \p value is not \c NULL, set only
+ * \c "PCMK_<option>"; otherwise, set (or unset) both
+ * \c "PCMK_<option>" and \c "HA_<option>"
+ *
+ * \note \p compat is ignored when \p value is \c NULL. A \c NULL \p value
+ * means we're unsetting \p option. \c pcmk__get_env_option() checks for
+ * both prefixes, so we want to clear them both.
*/
void
-pcmk__set_env_option(const char *option, const char *value)
+pcmk__set_env_option(const char *option, const char *value, bool compat)
{
+ // @COMPAT Drop support for "HA_" options eventually
const char *const prefixes[] = {"PCMK_", "HA_"};
char env_name[NAME_MAX];
@@ -132,6 +140,11 @@ pcmk__set_env_option(const char *option, const char *value)
crm_err("Failed to %sset %s: %s", (value != NULL)? "" : "un",
env_name, strerror(errno));
}
+
+ if (!compat && (value != NULL)) {
+ // For set, don't proceed to HA_<option> unless compat is enabled
+ break;
+ }
}
}
diff --git a/lib/common/output_html.c b/lib/common/output_html.c
index 47b14c1..92e9010 100644
--- a/lib/common/output_html.c
+++ b/lib/common/output_html.c
@@ -152,7 +152,7 @@ html_finish(pcmk__output_t *out, crm_exit_t exit_status, bool print, void **copy
* anything else that the user could add, and we want it done last to pick up
* any options that may have been given.
*/
- head_node = xmlNewNode(NULL, (pcmkXmlStr) "head");
+ head_node = xmlNewDocRawNode(NULL, NULL, (pcmkXmlStr) "head", NULL);
if (title != NULL ) {
pcmk_create_xml_text_node(head_node, "title", title);
@@ -458,7 +458,7 @@ pcmk__html_add_header(const char *name, ...) {
va_start(ap, name);
- header_node = xmlNewNode(NULL, (pcmkXmlStr) name);
+ header_node = xmlNewDocRawNode(NULL, NULL, (pcmkXmlStr) name, NULL);
while (1) {
char *key = va_arg(ap, char *);
char *value;
diff --git a/lib/common/output_log.c b/lib/common/output_log.c
index aca168d..54fa37e 100644
--- a/lib/common/output_log.c
+++ b/lib/common/output_log.c
@@ -12,6 +12,7 @@
#include <ctype.h>
#include <stdarg.h>
+#include <stdint.h>
#include <stdlib.h>
#include <stdio.h>
@@ -23,8 +24,43 @@ typedef struct private_data_s {
/* gathered in log_begin_list */
GQueue/*<char*>*/ *prefixes;
uint8_t log_level;
+ const char *function;
+ const char *file;
+ uint32_t line;
+ uint32_t tags;
} private_data_t;
+/*!
+ * \internal
+ * \brief Log a message using output object's log level and filters
+ *
+ * \param[in] priv Output object's private_data_t
+ * \param[in] fmt printf(3)-style format string
+ * \param[in] args... Format string arguments
+ */
+#define logger(priv, fmt, args...) do { \
+ qb_log_from_external_source(pcmk__s((priv)->function, __func__), \
+ pcmk__s((priv)->file, __FILE__), fmt, (priv)->log_level, \
+ (((priv)->line == 0)? __LINE__ : (priv)->line), (priv)->tags, \
+ ##args); \
+ } while (0);
+
+/*!
+ * \internal
+ * \brief Log a message using an explicit log level and output object's filters
+ *
+ * \param[in] priv Output object's private_data_t
+ * \param[in] level Log level
+ * \param[in] fmt printf(3)-style format string
+ * \param[in] ap Variadic arguments
+ */
+#define logger_va(priv, level, fmt, ap) do { \
+ qb_log_from_external_source_va(pcmk__s((priv)->function, __func__), \
+ pcmk__s((priv)->file, __FILE__), fmt, level, \
+ (((priv)->line == 0)? __LINE__ : (priv)->line), (priv)->tags, \
+ ap); \
+ } while (0);
+
static void
log_subprocess_output(pcmk__output_t *out, int exit_status,
const char *proc_stdout, const char *proc_stderr) {
@@ -94,35 +130,31 @@ log_version(pcmk__output_t *out, bool extended) {
priv = out->priv;
if (extended) {
- do_crm_log(priv->log_level, "Pacemaker %s (Build: %s): %s",
- PACEMAKER_VERSION, BUILD_VERSION, CRM_FEATURES);
+ logger(priv, "Pacemaker %s (Build: %s): %s",
+ PACEMAKER_VERSION, BUILD_VERSION, CRM_FEATURES);
} else {
- do_crm_log(priv->log_level, "Pacemaker %s", PACEMAKER_VERSION);
- do_crm_log(priv->log_level, "Written by Andrew Beekhof and"
- "the Pacemaker project contributors");
+ logger(priv, "Pacemaker " PACEMAKER_VERSION);
+ logger(priv, "Written by Andrew Beekhof and "
+ "the Pacemaker project contributors");
}
}
G_GNUC_PRINTF(2, 3)
static void
-log_err(pcmk__output_t *out, const char *format, ...) {
+log_err(pcmk__output_t *out, const char *format, ...)
+{
va_list ap;
- char* buffer = NULL;
- int len = 0;
+ private_data_t *priv = NULL;
- CRM_ASSERT(out != NULL);
+ CRM_ASSERT((out != NULL) && (out->priv != NULL));
+ priv = out->priv;
- va_start(ap, format);
- /* Informational output does not get indented, to separate it from other
+ /* Error output does not get indented, to separate it from other
* potentially indented list output.
*/
- len = vasprintf(&buffer, format, ap);
- CRM_ASSERT(len >= 0);
+ va_start(ap, format);
+ logger_va(priv, LOG_ERR, format, ap);
va_end(ap);
-
- crm_err("%s", buffer);
-
- free(buffer);
}
static void
@@ -195,15 +227,15 @@ log_list_item(pcmk__output_t *out, const char *name, const char *format, ...) {
if (strcmp(buffer, "") != 0) { /* We don't want empty messages */
if ((name != NULL) && (strcmp(name, "") != 0)) {
if (strcmp(prefix, "") != 0) {
- do_crm_log(priv->log_level, "%s: %s: %s", prefix, name, buffer);
+ logger(priv, "%s: %s: %s", prefix, name, buffer);
} else {
- do_crm_log(priv->log_level, "%s: %s", name, buffer);
+ logger(priv, "%s: %s", name, buffer);
}
} else {
if (strcmp(prefix, "") != 0) {
- do_crm_log(priv->log_level, "%s: %s", prefix, buffer);
+ logger(priv, "%s: %s", prefix, buffer);
} else {
- do_crm_log(priv->log_level, "%s", buffer);
+ logger(priv, "%s", buffer);
}
}
}
@@ -228,23 +260,21 @@ log_end_list(pcmk__output_t *out) {
G_GNUC_PRINTF(2, 3)
static int
-log_info(pcmk__output_t *out, const char *format, ...) {
- private_data_t *priv = NULL;
- int len = 0;
+log_info(pcmk__output_t *out, const char *format, ...)
+{
va_list ap;
- char* buffer = NULL;
+ private_data_t *priv = NULL;
CRM_ASSERT(out != NULL && out->priv != NULL);
priv = out->priv;
+ /* Informational output does not get indented, to separate it from other
+ * potentially indented list output.
+ */
va_start(ap, format);
- len = vasprintf(&buffer, format, ap);
- CRM_ASSERT(len >= 0);
+ logger_va(priv, priv->log_level, format, ap);
va_end(ap);
- do_crm_log(priv->log_level, "%s", buffer);
-
- free(buffer);
return pcmk_rc_ok;
}
@@ -252,22 +282,16 @@ G_GNUC_PRINTF(2, 3)
static int
log_transient(pcmk__output_t *out, const char *format, ...)
{
- private_data_t *priv = NULL;
- int len = 0;
va_list ap;
- char *buffer = NULL;
+ private_data_t *priv = NULL;
CRM_ASSERT(out != NULL && out->priv != NULL);
priv = out->priv;
va_start(ap, format);
- len = vasprintf(&buffer, format, ap);
- CRM_ASSERT(len >= 0);
+ logger_va(priv, QB_MAX(priv->log_level, LOG_DEBUG), format, ap);
va_end(ap);
- do_crm_log(QB_MAX(priv->log_level, LOG_DEBUG), "%s", buffer);
-
- free(buffer);
return pcmk_rc_ok;
}
@@ -351,3 +375,33 @@ pcmk__output_set_log_level(pcmk__output_t *out, uint8_t log_level) {
priv = out->priv;
priv->log_level = log_level;
}
+
+/*!
+ * \internal
+ * \brief Set the file, function, line, and tags used to filter log output
+ *
+ * \param[in,out] out Logger output object
+ * \param[in] file File name to filter with (or NULL for default)
+ * \param[in] function Function name to filter with (or NULL for default)
+ * \param[in] line Line number to filter with (or 0 for default)
+ * \param[in] tags Tags to filter with (or 0 for none)
+ *
+ * \note Custom filters should generally be used only in short areas of a single
+ * function. When done, callers should call this function again with
+ * NULL/0 arguments to reset the filters.
+ */
+void
+pcmk__output_set_log_filter(pcmk__output_t *out, const char *file,
+ const char *function, uint32_t line, uint32_t tags)
+{
+ private_data_t *priv = NULL;
+
+ CRM_ASSERT((out != NULL) && (out->priv != NULL));
+ CRM_CHECK(pcmk__str_eq(out->fmt_name, "log", pcmk__str_none), return);
+
+ priv = out->priv;
+ priv->file = file;
+ priv->function = function;
+ priv->line = line;
+ priv->tags = tags;
+}
diff --git a/lib/common/output_xml.c b/lib/common/output_xml.c
index 0972638..ba61145 100644
--- a/lib/common/output_xml.c
+++ b/lib/common/output_xml.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2019-2022 the Pacemaker project contributors
+ * Copyright 2019-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -13,6 +13,10 @@
#include <stdarg.h>
#include <stdlib.h>
#include <stdio.h>
+#include <crm/crm.h>
+#include <crm/common/output.h>
+#include <crm/common/xml.h>
+#include <crm/common/xml_internal.h> /* pcmk__xml2fd */
#include <glib.h>
#include <crm/common/cmdline_internal.h>
@@ -43,8 +47,8 @@ typedef struct subst_s {
static subst_t substitutions[] = {
{ "Active Resources", "resources" },
- { "Allocation Scores", "allocations" },
- { "Allocation Scores and Utilization Information", "allocations_utilizations" },
+ { "Assignment Scores", "allocations" },
+ { "Assignment Scores and Utilization Information", "allocations_utilizations" },
{ "Cluster Summary", "summary" },
{ "Current cluster status", "cluster_status" },
{ "Executing Cluster Transition", "transition" },
@@ -190,10 +194,7 @@ xml_finish(pcmk__output_t *out, crm_exit_t exit_status, bool print, void **copy_
}
if (print) {
- char *buf = dump_xml_formatted_with_text(priv->root);
- fprintf(out->dest, "%s", buf);
- fflush(out->dest);
- free(buf);
+ pcmk__xml2fd(fileno(out->dest), priv->root);
}
if (copy_dest != NULL) {
@@ -286,7 +287,10 @@ xml_output_xml(pcmk__output_t *out, const char *name, const char *buf) {
CRM_ASSERT(out != NULL);
parent = pcmk__output_create_xml_node(out, name, NULL);
- cdata_node = xmlNewCDataBlock(getDocPtr(parent), (pcmkXmlStr) buf, strlen(buf));
+ if (parent == NULL) {
+ return;
+ }
+ cdata_node = xmlNewCDataBlock(parent->doc, (pcmkXmlStr) buf, strlen(buf));
xmlAddChild(parent, cdata_node);
}
diff --git a/lib/common/patchset.c b/lib/common/patchset.c
index 8c1362d..34e27fb 100644
--- a/lib/common/patchset.c
+++ b/lib/common/patchset.c
@@ -41,6 +41,14 @@ add_xml_changes_to_patchset(xmlNode *xml, xmlNode *patchset)
xml_node_private_t *nodepriv = xml->_private;
const char *value = NULL;
+ if (nodepriv == NULL) {
+ /* Elements that shouldn't occur in a CIB don't have _private set. They
+ * should be stripped out, ignored, or have an error thrown by any code
+ * that processes their parent, so we ignore any changes to them.
+ */
+ return;
+ }
+
// If this XML node is new, just report that
if (patchset && pcmk_is_set(nodepriv->flags, pcmk__xf_created)) {
GString *xpath = pcmk__element_xpath(xml->parent);
@@ -93,7 +101,7 @@ add_xml_changes_to_patchset(xmlNode *xml, xmlNode *patchset)
} else {
crm_xml_add(attr, XML_DIFF_OP, "set");
- value = crm_element_value(xml, (const char *) pIter->name);
+ value = pcmk__xml_attr_value(pIter);
crm_xml_add(attr, XML_NVPAIR_ATTR_VALUE, value);
}
}
@@ -189,7 +197,7 @@ xml_repair_v1_diff(xmlNode *last, xmlNode *next, xmlNode *local_diff,
return;
}
- tag = "diff-removed";
+ tag = XML_TAG_DIFF_REMOVED;
diff_child = find_xml_node(local_diff, tag, FALSE);
if (diff_child == NULL) {
diff_child = create_xml_node(local_diff, tag);
@@ -210,7 +218,7 @@ xml_repair_v1_diff(xmlNode *last, xmlNode *next, xmlNode *local_diff,
}
}
- tag = "diff-added";
+ tag = XML_TAG_DIFF_ADDED;
diff_child = find_xml_node(local_diff, tag, FALSE);
if (diff_child == NULL) {
diff_child = create_xml_node(local_diff, tag);
@@ -229,7 +237,8 @@ xml_repair_v1_diff(xmlNode *last, xmlNode *next, xmlNode *local_diff,
}
for (xmlAttrPtr a = pcmk__xe_first_attr(next); a != NULL; a = a->next) {
- const char *p_value = crm_element_value(next, (const char *) a->name);
+
+ const char *p_value = pcmk__xml_attr_value(a);
xmlSetProp(cib, a->name, (pcmkXmlStr) p_value);
}
@@ -246,7 +255,7 @@ xml_create_patchset_v1(xmlNode *source, xmlNode *target, bool config,
if (patchset) {
CRM_LOG_ASSERT(xml_document_dirty(target));
xml_repair_v1_diff(source, target, patchset, config);
- crm_xml_add(patchset, "format", "1");
+ crm_xml_add(patchset, PCMK_XA_FORMAT, "1");
}
return patchset;
}
@@ -276,7 +285,7 @@ xml_create_patchset_v2(xmlNode *source, xmlNode *target)
docpriv = target->doc->_private;
patchset = create_xml_node(NULL, XML_TAG_DIFF);
- crm_xml_add_int(patchset, "format", 2);
+ crm_xml_add_int(patchset, PCMK_XA_FORMAT, 2);
version = create_xml_node(patchset, XML_DIFF_VERSION);
@@ -389,7 +398,7 @@ patchset_process_digest(xmlNode *patch, xmlNode *source, xmlNode *target,
*/
CRM_LOG_ASSERT(!xml_document_dirty(target));
- crm_element_value_int(patch, "format", &format);
+ crm_element_value_int(patch, PCMK_XA_FORMAT, &format);
if ((format > 1) && !with_digest) {
return;
}
@@ -418,7 +427,6 @@ process_v1_removals(xmlNode *target, xmlNode *patch)
xmlNode *cIter = NULL;
char *id = NULL;
- const char *name = NULL;
const char *value = NULL;
if ((target == NULL) || (patch == NULL)) {
@@ -431,18 +439,15 @@ process_v1_removals(xmlNode *target, xmlNode *patch)
subtract_xml_comment(target->parent, target, patch, &dummy);
}
- name = crm_element_name(target);
- CRM_CHECK(name != NULL, return);
- CRM_CHECK(pcmk__str_eq(crm_element_name(target), crm_element_name(patch),
- pcmk__str_casei),
- return);
+ CRM_CHECK(pcmk__xe_is(target, (const char *) patch->name), return);
CRM_CHECK(pcmk__str_eq(ID(target), ID(patch), pcmk__str_casei), return);
// Check for XML_DIFF_MARKER in a child
id = crm_element_value_copy(target, XML_ATTR_ID);
value = crm_element_value(patch, XML_DIFF_MARKER);
if ((value != NULL) && (strcmp(value, "removed:top") == 0)) {
- crm_trace("We are the root of the deletion: %s.id=%s", name, id);
+ crm_trace("We are the root of the deletion: %s.id=%s",
+ target->name, id);
free_xml(target);
free(id);
return;
@@ -482,18 +487,17 @@ process_v1_additions(xmlNode *parent, xmlNode *target, xmlNode *patch)
}
// Check for XML_DIFF_MARKER in a child
+ name = (const char *) patch->name;
value = crm_element_value(patch, XML_DIFF_MARKER);
if ((target == NULL) && (value != NULL)
&& (strcmp(value, "added:top") == 0)) {
id = ID(patch);
- name = crm_element_name(patch);
crm_trace("We are the root of the addition: %s.id=%s", name, id);
add_node_copy(parent, patch);
return;
} else if (target == NULL) {
id = ID(patch);
- name = crm_element_name(patch);
crm_err("Could not locate: %s.id=%s", name, id);
return;
}
@@ -502,17 +506,13 @@ process_v1_additions(xmlNode *parent, xmlNode *target, xmlNode *patch)
pcmk__xc_update(parent, target, patch);
}
- name = crm_element_name(target);
- CRM_CHECK(name != NULL, return);
- CRM_CHECK(pcmk__str_eq(crm_element_name(target), crm_element_name(patch),
- pcmk__str_casei),
- return);
+ CRM_CHECK(pcmk__xe_is(target, name), return);
CRM_CHECK(pcmk__str_eq(ID(target), ID(patch), pcmk__str_casei), return);
for (xIter = pcmk__xe_first_attr(patch); xIter != NULL;
xIter = xIter->next) {
const char *p_name = (const char *) xIter->name;
- const char *p_value = crm_element_value(patch, p_name);
+ const char *p_value = pcmk__xml_attr_value(xIter);
xml_remove_prop(target, p_name); // Preserve patch order
crm_xml_add(target, p_name, p_value);
@@ -547,7 +547,7 @@ find_patch_xml_node(const xmlNode *patchset, int format, bool added,
switch (format) {
case 1:
- label = added? "diff-added" : "diff-removed";
+ label = added? XML_TAG_DIFF_ADDED : XML_TAG_DIFF_REMOVED;
*patch_node = find_xml_node(patchset, label, FALSE);
cib_node = find_xml_node(*patch_node, "cib", FALSE);
if (cib_node != NULL) {
@@ -582,7 +582,7 @@ xml_patch_versions(const xmlNode *patchset, int add[3], int del[3])
};
- crm_element_value_int(patchset, "format", &format);
+ crm_element_value_int(patchset, PCMK_XA_FORMAT, &format);
/* Process removals */
if (!find_patch_xml_node(patchset, format, FALSE, &tmp)) {
@@ -614,12 +614,11 @@ xml_patch_versions(const xmlNode *patchset, int add[3], int del[3])
*
* \param[in] xml Root of current CIB
* \param[in] patchset Patchset to check
- * \param[in] format Patchset version
*
* \return Standard Pacemaker return code
*/
static int
-xml_patch_version_check(const xmlNode *xml, const xmlNode *patchset, int format)
+xml_patch_version_check(const xmlNode *xml, const xmlNode *patchset)
{
int lpc = 0;
bool changed = FALSE;
@@ -701,8 +700,8 @@ apply_v1_patchset(xmlNode *xml, const xmlNode *patchset)
int root_nodes_seen = 0;
xmlNode *child_diff = NULL;
- xmlNode *added = find_xml_node(patchset, "diff-added", FALSE);
- xmlNode *removed = find_xml_node(patchset, "diff-removed", FALSE);
+ xmlNode *added = find_xml_node(patchset, XML_TAG_DIFF_ADDED, FALSE);
+ xmlNode *removed = find_xml_node(patchset, XML_TAG_DIFF_REMOVED, FALSE);
xmlNode *old = copy_xml(xml);
crm_trace("Subtraction Phase");
@@ -981,7 +980,7 @@ apply_v2_patchset(xmlNode *xml, const xmlNode *patchset)
for (xmlAttrPtr pIter = pcmk__xe_first_attr(attrs); pIter != NULL;
pIter = pIter->next) {
const char *name = (const char *) pIter->name;
- const char *value = crm_element_value(attrs, name);
+ const char *value = pcmk__xml_attr_value(pIter);
crm_xml_add(match, name, value);
}
@@ -1022,6 +1021,10 @@ apply_v2_patchset(xmlNode *xml, const xmlNode *patchset)
}
child = xmlDocCopyNode(change->children, match->doc, 1);
+ if (child == NULL) {
+ return ENOMEM;
+ }
+
if (match_child) {
crm_trace("Adding %s at position %d", child->name, position);
xmlAddPrevSibling(match_child, child);
@@ -1098,43 +1101,31 @@ xml_apply_patchset(xmlNode *xml, xmlNode *patchset, bool check_version)
int format = 1;
int rc = pcmk_ok;
xmlNode *old = NULL;
- const char *digest = crm_element_value(patchset, XML_ATTR_DIGEST);
+ const char *digest = NULL;
if (patchset == NULL) {
return rc;
}
- pcmk__if_tracing(
- {
- pcmk__output_t *logger_out = NULL;
-
- rc = pcmk_rc2legacy(pcmk__log_output_new(&logger_out));
- CRM_CHECK(rc == pcmk_ok, return rc);
+ pcmk__log_xml_patchset(LOG_TRACE, patchset);
- pcmk__output_set_log_level(logger_out, LOG_TRACE);
- rc = logger_out->message(logger_out, "xml-patchset", patchset);
- logger_out->finish(logger_out, pcmk_rc2exitc(rc), true,
- NULL);
- pcmk__output_free(logger_out);
- rc = pcmk_ok;
- },
- {}
- );
-
- crm_element_value_int(patchset, "format", &format);
if (check_version) {
- rc = pcmk_rc2legacy(xml_patch_version_check(xml, patchset, format));
+ rc = pcmk_rc2legacy(xml_patch_version_check(xml, patchset));
if (rc != pcmk_ok) {
return rc;
}
}
- if (digest) {
- // Make it available for logging if result doesn't have expected digest
- old = copy_xml(xml);
+ digest = crm_element_value(patchset, XML_ATTR_DIGEST);
+ if (digest != NULL) {
+ /* Make original XML available for logging in case result doesn't have
+ * expected digest
+ */
+ pcmk__if_tracing(old = copy_xml(xml), {});
}
if (rc == pcmk_ok) {
+ crm_element_value_int(patchset, PCMK_XA_FORMAT, &format);
switch (format) {
case 1:
rc = pcmk_rc2legacy(apply_v1_patchset(xml, patchset));
@@ -1195,9 +1186,9 @@ xmlNode *
diff_xml_object(xmlNode *old, xmlNode *new, gboolean suppress)
{
xmlNode *tmp1 = NULL;
- xmlNode *diff = create_xml_node(NULL, "diff");
- xmlNode *removed = create_xml_node(diff, "diff-removed");
- xmlNode *added = create_xml_node(diff, "diff-added");
+ xmlNode *diff = create_xml_node(NULL, XML_TAG_DIFF);
+ xmlNode *removed = create_xml_node(diff, XML_TAG_DIFF_REMOVED);
+ xmlNode *added = create_xml_node(diff, XML_TAG_DIFF_ADDED);
crm_xml_add(diff, XML_ATTR_CRM_VERSION, CRM_FEATURE_SET);
@@ -1268,11 +1259,12 @@ subtract_xml_object(xmlNode *parent, xmlNode *left, xmlNode *right,
}
id = ID(left);
+ name = (const char *) left->name;
if (right == NULL) {
xmlNode *deleted = NULL;
crm_trace("Processing <%s " XML_ATTR_ID "=%s> (complete copy)",
- crm_element_name(left), id);
+ name, id);
deleted = add_node_copy(parent, left);
crm_xml_add(deleted, XML_DIFF_MARKER, marker);
@@ -1280,11 +1272,8 @@ subtract_xml_object(xmlNode *parent, xmlNode *left, xmlNode *right,
return deleted;
}
- name = crm_element_name(left);
CRM_CHECK(name != NULL, return NULL);
- CRM_CHECK(pcmk__str_eq(crm_element_name(left), crm_element_name(right),
- pcmk__str_casei),
- return NULL);
+ CRM_CHECK(pcmk__xe_is(left, (const char *) right->name), return NULL);
// Check for XML_DIFF_MARKER in a child
value = crm_element_value(right, XML_DIFF_MARKER);
@@ -1367,7 +1356,7 @@ subtract_xml_object(xmlNode *parent, xmlNode *left, xmlNode *right,
break;
} else {
- const char *left_value = crm_element_value(left, prop_name);
+ const char *left_value = pcmk__xml_attr_value(xIter);
xmlSetProp(diff, (pcmkXmlStr) prop_name, (pcmkXmlStr) value);
crm_xml_add(diff, prop_name, left_value);
@@ -1375,7 +1364,7 @@ subtract_xml_object(xmlNode *parent, xmlNode *left, xmlNode *right,
} else {
/* Only now do we need the left value */
- const char *left_value = crm_element_value(left, prop_name);
+ const char *left_value = pcmk__xml_attr_value(xIter);
if (strcmp(left_value, right_val) == 0) {
/* unchanged */
@@ -1386,8 +1375,7 @@ subtract_xml_object(xmlNode *parent, xmlNode *left, xmlNode *right,
xmlAttrPtr pIter = NULL;
crm_trace("Changes detected to %s in "
- "<%s " XML_ATTR_ID "=%s>",
- prop_name, crm_element_name(left), id);
+ "<%s " XML_ATTR_ID "=%s>", prop_name, name, id);
for (pIter = pcmk__xe_first_attr(left); pIter != NULL;
pIter = pIter->next) {
const char *p_name = (const char *) pIter->name;
@@ -1401,8 +1389,7 @@ subtract_xml_object(xmlNode *parent, xmlNode *left, xmlNode *right,
} else {
crm_trace("Changes detected to %s (%s -> %s) in "
"<%s " XML_ATTR_ID "=%s>",
- prop_name, left_value, right_val,
- crm_element_name(left), id);
+ prop_name, left_value, right_val, name, id);
crm_xml_add(diff, prop_name, left_value);
}
}
@@ -1434,8 +1421,8 @@ apply_xml_diff(xmlNode *old_xml, xmlNode *diff, xmlNode **new_xml)
const char *version = crm_element_value(diff, XML_ATTR_CRM_VERSION);
xmlNode *child_diff = NULL;
- xmlNode *added = find_xml_node(diff, "diff-added", FALSE);
- xmlNode *removed = find_xml_node(diff, "diff-removed", FALSE);
+ xmlNode *added = find_xml_node(diff, XML_TAG_DIFF_ADDED, FALSE);
+ xmlNode *removed = find_xml_node(diff, XML_TAG_DIFF_REMOVED, FALSE);
CRM_CHECK(new_xml != NULL, return FALSE);
diff --git a/lib/common/patchset_display.c b/lib/common/patchset_display.c
index 731d437..5cc0b52 100644
--- a/lib/common/patchset_display.c
+++ b/lib/common/patchset_display.c
@@ -47,7 +47,7 @@ xml_show_patchset_header(pcmk__output_t *out, const xmlNode *patchset)
xml_patch_versions(patchset, add, del);
if ((add[0] != del[0]) || (add[1] != del[1]) || (add[2] != del[2])) {
- const char *fmt = crm_element_value(patchset, "format");
+ const char *fmt = crm_element_value(patchset, PCMK_XA_FORMAT);
const char *digest = crm_element_value(patchset, XML_ATTR_DIGEST);
out->info(out, "Diff: --- %d.%d.%d %s", del[0], del[1], del[2], fmt);
@@ -80,7 +80,7 @@ static int
xml_show_patchset_v1_recursive(pcmk__output_t *out, const char *prefix,
const xmlNode *data, int depth, uint32_t options)
{
- if (!xml_has_children(data)
+ if ((data->children == NULL)
|| (crm_element_value(data, XML_DIFF_MARKER) != NULL)) {
// Found a change; clear the pcmk__xml_fmt_diff_short option if set
@@ -143,7 +143,7 @@ xml_show_patchset_v1(pcmk__output_t *out, const xmlNode *patchset,
* However, v1 patchsets can only exist during rolling upgrades from
* Pacemaker 1.1.11, so not worth worrying about.
*/
- removed = find_xml_node(patchset, "diff-removed", FALSE);
+ removed = find_xml_node(patchset, XML_TAG_DIFF_REMOVED, FALSE);
for (child = pcmk__xml_first_child(removed); child != NULL;
child = pcmk__xml_next(child)) {
int temp_rc = xml_show_patchset_v1_recursive(out, "- ", child, 0,
@@ -159,7 +159,7 @@ xml_show_patchset_v1(pcmk__output_t *out, const xmlNode *patchset,
}
is_first = true;
- added = find_xml_node(patchset, "diff-added", FALSE);
+ added = find_xml_node(patchset, XML_TAG_DIFF_ADDED, FALSE);
for (child = pcmk__xml_first_child(added); child != NULL;
child = pcmk__xml_next(child)) {
int temp_rc = xml_show_patchset_v1_recursive(out, "+ ", child, 0,
@@ -303,11 +303,11 @@ xml_show_patchset_v2(pcmk__output_t *out, const xmlNode *patchset)
*
* \note \p args should contain only the XML patchset
*/
-PCMK__OUTPUT_ARGS("xml-patchset", "xmlNodePtr")
+PCMK__OUTPUT_ARGS("xml-patchset", "const xmlNode *")
static int
xml_patchset_default(pcmk__output_t *out, va_list args)
{
- xmlNodePtr patchset = va_arg(args, xmlNodePtr);
+ const xmlNode *patchset = va_arg(args, const xmlNode *);
int format = 1;
@@ -316,7 +316,7 @@ xml_patchset_default(pcmk__output_t *out, va_list args)
return pcmk_rc_no_output;
}
- crm_element_value_int(patchset, "format", &format);
+ crm_element_value_int(patchset, PCMK_XA_FORMAT, &format);
switch (format) {
case 1:
return xml_show_patchset_v1(out, patchset, pcmk__xml_fmt_pretty);
@@ -342,13 +342,13 @@ xml_patchset_default(pcmk__output_t *out, va_list args)
*
* \note \p args should contain only the XML patchset
*/
-PCMK__OUTPUT_ARGS("xml-patchset", "xmlNodePtr")
+PCMK__OUTPUT_ARGS("xml-patchset", "const xmlNode *")
static int
xml_patchset_log(pcmk__output_t *out, va_list args)
{
static struct qb_log_callsite *patchset_cs = NULL;
- xmlNodePtr patchset = va_arg(args, xmlNodePtr);
+ const xmlNode *patchset = va_arg(args, const xmlNode *);
uint8_t log_level = pcmk__output_get_log_level(out);
int format = 1;
@@ -373,7 +373,7 @@ xml_patchset_log(pcmk__output_t *out, va_list args)
return pcmk_rc_no_output;
}
- crm_element_value_int(patchset, "format", &format);
+ crm_element_value_int(patchset, PCMK_XA_FORMAT, &format);
switch (format) {
case 1:
if (log_level < LOG_DEBUG) {
@@ -404,11 +404,11 @@ xml_patchset_log(pcmk__output_t *out, va_list args)
*
* \note \p args should contain only the XML patchset
*/
-PCMK__OUTPUT_ARGS("xml-patchset", "xmlNodePtr")
+PCMK__OUTPUT_ARGS("xml-patchset", "const xmlNode *")
static int
xml_patchset_xml(pcmk__output_t *out, va_list args)
{
- xmlNodePtr patchset = va_arg(args, xmlNodePtr);
+ const xmlNode *patchset = va_arg(args, const xmlNode *);
if (patchset != NULL) {
char *buf = dump_xml_formatted_with_text(patchset);
@@ -490,7 +490,7 @@ xml_log_patchset(uint8_t log_level, const char *function,
goto done;
}
- crm_element_value_int(patchset, "format", &format);
+ crm_element_value_int(patchset, PCMK_XA_FORMAT, &format);
switch (format) {
case 1:
if (log_level < LOG_DEBUG) {
diff --git a/lib/common/remote.c b/lib/common/remote.c
index 8c5969a..fe19296 100644
--- a/lib/common/remote.c
+++ b/lib/common/remote.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2008-2022 the Pacemaker project contributors
+ * Copyright 2008-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -167,7 +167,8 @@ set_minimum_dh_bits(const gnutls_session_t *session)
{
int dh_min_bits;
- pcmk__scan_min_int(getenv("PCMK_dh_min_bits"), &dh_min_bits, 0);
+ pcmk__scan_min_int(pcmk__env_option(PCMK__ENV_DH_MIN_BITS), &dh_min_bits,
+ 0);
/* This function is deprecated since GnuTLS 3.1.7, in favor of letting
* the priority string imply the DH requirements, but this is the only
@@ -186,8 +187,11 @@ get_bound_dh_bits(unsigned int dh_bits)
int dh_min_bits;
int dh_max_bits;
- pcmk__scan_min_int(getenv("PCMK_dh_min_bits"), &dh_min_bits, 0);
- pcmk__scan_min_int(getenv("PCMK_dh_max_bits"), &dh_max_bits, 0);
+ pcmk__scan_min_int(pcmk__env_option(PCMK__ENV_DH_MIN_BITS), &dh_min_bits,
+ 0);
+ pcmk__scan_min_int(pcmk__env_option(PCMK__ENV_DH_MAX_BITS), &dh_max_bits,
+ 0);
+
if ((dh_max_bits > 0) && (dh_max_bits < dh_min_bits)) {
crm_warn("Ignoring PCMK_dh_max_bits less than PCMK_dh_min_bits");
dh_max_bits = 0;
@@ -228,7 +232,7 @@ pcmk__new_tls_session(int csock, unsigned int conn_type,
* http://www.manpagez.com/info/gnutls/gnutls-2.10.4/gnutls_81.php#Echo-Server-with-anonymous-authentication
*/
- prio_base = getenv("PCMK_tls_priorities");
+ prio_base = pcmk__env_option(PCMK__ENV_TLS_PRIORITIES);
if (prio_base == NULL) {
prio_base = PCMK_GNUTLS_PRIORITIES;
}
@@ -485,7 +489,7 @@ remote_send_iovs(pcmk__remote_t *remote, struct iovec *iov, int iovs)
* \return Standard Pacemaker return code
*/
int
-pcmk__remote_send_xml(pcmk__remote_t *remote, xmlNode *msg)
+pcmk__remote_send_xml(pcmk__remote_t *remote, const xmlNode *msg)
{
int rc = pcmk_rc_ok;
static uint64_t id = 0;
@@ -558,16 +562,17 @@ pcmk__remote_message_xml(pcmk__remote_t *remote)
rc = BZ2_bzBuffToBuffDecompress(uncompressed + header->payload_offset, &size_u,
remote->buffer + header->payload_offset,
header->payload_compressed, 1, 0);
+ rc = pcmk__bzlib2rc(rc);
- if (rc != BZ_OK && header->version > REMOTE_MSG_VERSION) {
+ if (rc != pcmk_rc_ok && header->version > REMOTE_MSG_VERSION) {
crm_warn("Couldn't decompress v%d message, we only understand v%d",
header->version, REMOTE_MSG_VERSION);
free(uncompressed);
return NULL;
- } else if (rc != BZ_OK) {
- crm_err("Decompression failed: %s " CRM_XS " bzerror=%d",
- bz2_strerror(rc), rc);
+ } else if (rc != pcmk_rc_ok) {
+ crm_err("Decompression failed: %s " CRM_XS " rc=%d",
+ pcmk_rc_str(rc), rc);
free(uncompressed);
return NULL;
}
@@ -1079,13 +1084,16 @@ pcmk__connect_remote(const char *host, int port, int timeout, int *timer_id,
hints.ai_family = AF_UNSPEC; /* Allow IPv4 or IPv6 */
hints.ai_socktype = SOCK_STREAM;
hints.ai_flags = AI_CANONNAME;
+
rc = getaddrinfo(server, NULL, &hints, &res);
- if (rc != 0) {
+ rc = pcmk__gaierror2rc(rc);
+
+ if (rc != pcmk_rc_ok) {
crm_err("Unable to get IP address info for %s: %s",
- server, gai_strerror(rc));
- rc = ENOTCONN;
+ server, pcmk_rc_str(rc));
goto async_cleanup;
}
+
if (!res || !res->ai_addr) {
crm_err("Unable to get IP address info for %s: no result", server);
rc = ENOTCONN;
@@ -1252,13 +1260,14 @@ crm_default_remote_port(void)
static int port = 0;
if (port == 0) {
- const char *env = getenv("PCMK_remote_port");
+ const char *env = pcmk__env_option(PCMK__ENV_REMOTE_PORT);
if (env) {
errno = 0;
port = strtol(env, NULL, 10);
if (errno || (port < 1) || (port > 65535)) {
- crm_warn("Environment variable PCMK_remote_port has invalid value '%s', using %d instead",
+ crm_warn("Environment variable PCMK_" PCMK__ENV_REMOTE_PORT
+ " has invalid value '%s', using %d instead",
env, DEFAULT_REMOTE_PORT);
port = DEFAULT_REMOTE_PORT;
}
diff --git a/lib/common/results.c b/lib/common/results.c
index 93d79eb..dde8b27 100644
--- a/lib/common/results.c
+++ b/lib/common/results.c
@@ -15,6 +15,7 @@
#include <bzlib.h>
#include <errno.h>
+#include <netdb.h>
#include <stdlib.h>
#include <string.h>
#include <qb/qbdefs.h>
@@ -305,6 +306,18 @@ static const struct pcmk__rc_info {
"Bad XML patch format",
-pcmk_err_generic,
},
+ { "pcmk_rc_no_transaction",
+ "No active transaction found",
+ -pcmk_err_generic,
+ },
+ { "pcmk_rc_ns_resolution",
+ "Nameserver resolution error",
+ -pcmk_err_generic,
+ },
+ { "pcmk_rc_compression",
+ "Compression/decompression error",
+ -pcmk_err_generic,
+ },
};
/*!
@@ -716,6 +729,7 @@ pcmk_rc2exitc(int rc)
case ENOSYS:
case EOVERFLOW:
case pcmk_rc_underflow:
+ case pcmk_rc_compression:
return CRM_EX_SOFTWARE;
case EBADMSG:
@@ -759,10 +773,12 @@ pcmk_rc2exitc(int rc)
case ENODEV:
case ENOENT:
case ENXIO:
+ case pcmk_rc_no_transaction:
case pcmk_rc_unknown_format:
return CRM_EX_NOSUCH;
case pcmk_rc_node_unknown:
+ case pcmk_rc_ns_resolution:
return CRM_EX_NOHOST;
case ETIME:
@@ -837,37 +853,83 @@ pcmk_rc2ocf(int rc)
// Other functions
-const char *
-bz2_strerror(int rc)
+/*!
+ * \brief Map a getaddrinfo() return code to the most similar Pacemaker
+ * return code
+ *
+ * \param[in] gai getaddrinfo() return code
+ *
+ * \return Most similar Pacemaker return code
+ */
+int
+pcmk__gaierror2rc(int gai)
{
- // See ftp://sources.redhat.com/pub/bzip2/docs/manual_3.html#SEC17
- switch (rc) {
+ switch (gai) {
+ case 0:
+ return pcmk_rc_ok;
+
+ case EAI_AGAIN:
+ return EAGAIN;
+
+ case EAI_BADFLAGS:
+ case EAI_SERVICE:
+ return EINVAL;
+
+ case EAI_FAMILY:
+ return EAFNOSUPPORT;
+
+ case EAI_MEMORY:
+ return ENOMEM;
+
+ case EAI_NONAME:
+ return pcmk_rc_node_unknown;
+
+ case EAI_SOCKTYPE:
+ return ESOCKTNOSUPPORT;
+
+ case EAI_SYSTEM:
+ return errno;
+
+ default:
+ return pcmk_rc_ns_resolution;
+ }
+}
+
+/*!
+ * \brief Map a bz2 return code to the most similar Pacemaker return code
+ *
+ * \param[in] bz2 bz2 return code
+ *
+ * \return Most similar Pacemaker return code
+ */
+int
+pcmk__bzlib2rc(int bz2)
+{
+ switch (bz2) {
case BZ_OK:
case BZ_RUN_OK:
case BZ_FLUSH_OK:
case BZ_FINISH_OK:
case BZ_STREAM_END:
- return "Ok";
- case BZ_CONFIG_ERROR:
- return "libbz2 has been improperly compiled on your platform";
- case BZ_SEQUENCE_ERROR:
- return "library functions called in the wrong order";
- case BZ_PARAM_ERROR:
- return "parameter is out of range or otherwise incorrect";
+ return pcmk_rc_ok;
+
case BZ_MEM_ERROR:
- return "memory allocation failed";
+ return ENOMEM;
+
case BZ_DATA_ERROR:
- return "data integrity error is detected during decompression";
case BZ_DATA_ERROR_MAGIC:
- return "the compressed stream does not start with the correct magic bytes";
- case BZ_IO_ERROR:
- return "error reading or writing in the compressed file";
case BZ_UNEXPECTED_EOF:
- return "compressed file finishes before the logical end of stream is detected";
+ return pcmk_rc_bad_input;
+
+ case BZ_IO_ERROR:
+ return EIO;
+
case BZ_OUTBUFF_FULL:
- return "output data will not fit into the buffer provided";
+ return EFBIG;
+
+ default:
+ return pcmk_rc_compression;
}
- return "Data compression error";
}
crm_exit_t
@@ -1039,6 +1101,39 @@ pcmk__copy_result(const pcmk__action_result_t *src, pcmk__action_result_t *dst)
#include <crm/common/results_compat.h>
+const char *
+bz2_strerror(int rc)
+{
+ // See ftp://sources.redhat.com/pub/bzip2/docs/manual_3.html#SEC17
+ switch (rc) {
+ case BZ_OK:
+ case BZ_RUN_OK:
+ case BZ_FLUSH_OK:
+ case BZ_FINISH_OK:
+ case BZ_STREAM_END:
+ return "Ok";
+ case BZ_CONFIG_ERROR:
+ return "libbz2 has been improperly compiled on your platform";
+ case BZ_SEQUENCE_ERROR:
+ return "library functions called in the wrong order";
+ case BZ_PARAM_ERROR:
+ return "parameter is out of range or otherwise incorrect";
+ case BZ_MEM_ERROR:
+ return "memory allocation failed";
+ case BZ_DATA_ERROR:
+ return "data integrity error is detected during decompression";
+ case BZ_DATA_ERROR_MAGIC:
+ return "the compressed stream does not start with the correct magic bytes";
+ case BZ_IO_ERROR:
+ return "error reading or writing in the compressed file";
+ case BZ_UNEXPECTED_EOF:
+ return "compressed file finishes before the logical end of stream is detected";
+ case BZ_OUTBUFF_FULL:
+ return "output data will not fit into the buffer provided";
+ }
+ return "Data compression error";
+}
+
crm_exit_t
crm_errno2exit(int rc)
{
diff --git a/lib/common/scheduler.c b/lib/common/scheduler.c
new file mode 100644
index 0000000..20e6fdf
--- /dev/null
+++ b/lib/common/scheduler.c
@@ -0,0 +1,14 @@
+/*
+ * Copyright 2004-2023 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU Lesser General Public License
+ * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
+ */
+
+#include <crm_internal.h>
+
+#include <stdint.h> // uint32_t
+
+uint32_t pcmk__warnings = 0;
diff --git a/lib/common/schemas.c b/lib/common/schemas.c
index 88a3051..b3c09eb 100644
--- a/lib/common/schemas.c
+++ b/lib/common/schemas.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2004-2022 the Pacemaker project contributors
+ * Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -432,34 +432,8 @@ crm_schema_init(void)
NULL, NULL, FALSE, -1);
}
-#if 0
-static void
-relaxng_invalid_stderr(void *userData, xmlErrorPtr error)
-{
- /*
- Structure xmlError
- struct _xmlError {
- int domain : What part of the library raised this er
- int code : The error code, e.g. an xmlParserError
- char * message : human-readable informative error messag
- xmlErrorLevel level : how consequent is the error
- char * file : the filename
- int line : the line number if available
- char * str1 : extra string information
- char * str2 : extra string information
- char * str3 : extra string information
- int int1 : extra number information
- int int2 : column number of the error or 0 if N/A
- void * ctxt : the parser context if available
- void * node : the node in the tree
- }
- */
- crm_err("Structured error: line=%d, level=%d %s", error->line, error->level, error->message);
-}
-#endif
-
static gboolean
-validate_with_relaxng(xmlDocPtr doc, gboolean to_logs, const char *relaxng_file,
+validate_with_relaxng(xmlDocPtr doc, xmlRelaxNGValidityErrorFunc error_handler, void *error_handler_context, const char *relaxng_file,
relaxng_ctx_cache_t **cached_ctx)
{
int rc = 0;
@@ -476,15 +450,14 @@ validate_with_relaxng(xmlDocPtr doc, gboolean to_logs, const char *relaxng_file,
crm_debug("Creating RNG parser context");
ctx = calloc(1, sizeof(relaxng_ctx_cache_t));
- xmlLoadExtDtdDefaultValue = 1;
ctx->parser = xmlRelaxNGNewParserCtxt(relaxng_file);
CRM_CHECK(ctx->parser != NULL, goto cleanup);
- if (to_logs) {
+ if (error_handler) {
xmlRelaxNGSetParserErrors(ctx->parser,
- (xmlRelaxNGValidityErrorFunc) xml_log,
- (xmlRelaxNGValidityWarningFunc) xml_log,
- GUINT_TO_POINTER(LOG_ERR));
+ (xmlRelaxNGValidityErrorFunc) error_handler,
+ (xmlRelaxNGValidityWarningFunc) error_handler,
+ error_handler_context);
} else {
xmlRelaxNGSetParserErrors(ctx->parser,
(xmlRelaxNGValidityErrorFunc) fprintf,
@@ -500,11 +473,11 @@ validate_with_relaxng(xmlDocPtr doc, gboolean to_logs, const char *relaxng_file,
ctx->valid = xmlRelaxNGNewValidCtxt(ctx->rng);
CRM_CHECK(ctx->valid != NULL, goto cleanup);
- if (to_logs) {
+ if (error_handler) {
xmlRelaxNGSetValidErrors(ctx->valid,
- (xmlRelaxNGValidityErrorFunc) xml_log,
- (xmlRelaxNGValidityWarningFunc) xml_log,
- GUINT_TO_POINTER(LOG_ERR));
+ (xmlRelaxNGValidityErrorFunc) error_handler,
+ (xmlRelaxNGValidityWarningFunc) error_handler,
+ error_handler_context);
} else {
xmlRelaxNGSetValidErrors(ctx->valid,
(xmlRelaxNGValidityErrorFunc) fprintf,
@@ -513,10 +486,6 @@ validate_with_relaxng(xmlDocPtr doc, gboolean to_logs, const char *relaxng_file,
}
}
- /* xmlRelaxNGSetValidStructuredErrors( */
- /* valid, relaxng_invalid_stderr, valid); */
-
- xmlLineNumbersDefault(1);
rc = xmlRelaxNGValidateDoc(ctx->valid, doc);
if (rc > 0) {
valid = FALSE;
@@ -590,39 +559,36 @@ crm_schema_cleanup(void)
}
static gboolean
-validate_with(xmlNode *xml, int method, gboolean to_logs)
+validate_with(xmlNode *xml, int method, xmlRelaxNGValidityErrorFunc error_handler, void* error_handler_context)
{
- xmlDocPtr doc = NULL;
gboolean valid = FALSE;
char *file = NULL;
+ struct schema_s *schema = NULL;
+ relaxng_ctx_cache_t **cache = NULL;
if (method < 0) {
return FALSE;
}
- if (known_schemas[method].validator == schema_validator_none) {
+ schema = &(known_schemas[method]);
+ if (schema->validator == schema_validator_none) {
return TRUE;
}
- CRM_CHECK(xml != NULL, return FALSE);
-
- if (pcmk__str_eq(known_schemas[method].name, "pacemaker-next",
- pcmk__str_none)) {
+ if (pcmk__str_eq(schema->name, "pacemaker-next", pcmk__str_none)) {
crm_warn("The pacemaker-next schema is deprecated and will be removed "
"in a future release.");
}
- doc = getDocPtr(xml);
file = pcmk__xml_artefact_path(pcmk__xml_artefact_ns_legacy_rng,
- known_schemas[method].name);
+ schema->name);
crm_trace("Validating with %s (type=%d)",
- pcmk__s(file, "missing schema"), known_schemas[method].validator);
- switch (known_schemas[method].validator) {
+ pcmk__s(file, "missing schema"), schema->validator);
+ switch (schema->validator) {
case schema_validator_rng:
- valid =
- validate_with_relaxng(doc, to_logs, file,
- (relaxng_ctx_cache_t **) & (known_schemas[method].cache));
+ cache = (relaxng_ctx_cache_t **) &(schema->cache);
+ valid = validate_with_relaxng(xml->doc, error_handler, error_handler_context, file, cache);
break;
default:
crm_err("Unknown validator type: %d",
@@ -639,7 +605,7 @@ validate_with_silent(xmlNode *xml, int method)
{
bool rc, sl_backup = silent_logging;
silent_logging = TRUE;
- rc = validate_with(xml, method, TRUE);
+ rc = validate_with(xml, method, (xmlRelaxNGValidityErrorFunc) xml_log, GUINT_TO_POINTER(LOG_ERR));
silent_logging = sl_backup;
return rc;
}
@@ -676,7 +642,7 @@ dump_file(const char *filename)
}
gboolean
-validate_xml_verbose(xmlNode *xml_blob)
+validate_xml_verbose(const xmlNode *xml_blob)
{
int fd = 0;
xmlDoc *doc = NULL;
@@ -692,7 +658,7 @@ validate_xml_verbose(xmlNode *xml_blob)
dump_file(filename);
- doc = xmlParseFile(filename);
+ doc = xmlReadFile(filename, NULL, 0);
xml = xmlDocGetRootElement(doc);
rc = validate_xml(xml, NULL, FALSE);
free_xml(xml);
@@ -706,8 +672,16 @@ validate_xml_verbose(xmlNode *xml_blob)
gboolean
validate_xml(xmlNode *xml_blob, const char *validation, gboolean to_logs)
{
+ return pcmk__validate_xml(xml_blob, validation, to_logs ? (xmlRelaxNGValidityErrorFunc) xml_log : NULL, GUINT_TO_POINTER(LOG_ERR));
+}
+
+gboolean
+pcmk__validate_xml(xmlNode *xml_blob, const char *validation, xmlRelaxNGValidityErrorFunc error_handler, void* error_handler_context)
+{
int version = 0;
+ CRM_CHECK((xml_blob != NULL) && (xml_blob->doc != NULL), return FALSE);
+
if (validation == NULL) {
validation = crm_element_value(xml_blob, XML_ATTR_VALIDATION);
}
@@ -717,7 +691,7 @@ validate_xml(xmlNode *xml_blob, const char *validation, gboolean to_logs)
bool valid = FALSE;
for (lpc = 0; lpc < xml_schema_max; lpc++) {
- if (validate_with(xml_blob, lpc, FALSE)) {
+ if (validate_with(xml_blob, lpc, NULL, NULL)) {
valid = TRUE;
crm_xml_add(xml_blob, XML_ATTR_VALIDATION,
known_schemas[lpc].name);
@@ -735,7 +709,7 @@ validate_xml(xmlNode *xml_blob, const char *validation, gboolean to_logs)
if (strcmp(validation, PCMK__VALUE_NONE) == 0) {
return TRUE;
} else if (version < xml_schema_max) {
- return validate_with(xml_blob, version, to_logs);
+ return validate_with(xml_blob, version, error_handler, error_handler_context);
}
crm_err("Unknown validator: %s", validation);
@@ -884,47 +858,17 @@ cib_upgrade_err(void *ctx, const char *fmt, ...)
va_end(ap);
}
-
-/* Denotes temporary emergency fix for "xmldiff'ing not text-node-ready";
- proper fix is most likely to teach __xml_diff_object and friends to
- deal with XML_TEXT_NODE (and more?), i.e., those nodes currently
- missing "_private" field (implicitly as NULL) which clashes with
- unchecked accesses (e.g. in __xml_offset) -- the outcome may be that
- those unexpected XML nodes will simply be ignored for the purpose of
- diff'ing, or it may be made more robust, or per the user's preference
- (which then may be exposed as crm_diff switch).
-
- Said XML_TEXT_NODE may appear unexpectedly due to how upgrade-2.10.xsl
- is arranged.
-
- The emergency fix is simple: reparse XSLT output with blank-ignoring
- parser. */
-#ifndef PCMK_SCHEMAS_EMERGENCY_XSLT
-#define PCMK_SCHEMAS_EMERGENCY_XSLT 1
-#endif
-
static xmlNode *
apply_transformation(xmlNode *xml, const char *transform, gboolean to_logs)
{
char *xform = NULL;
xmlNode *out = NULL;
xmlDocPtr res = NULL;
- xmlDocPtr doc = NULL;
xsltStylesheet *xslt = NULL;
-#if PCMK_SCHEMAS_EMERGENCY_XSLT != 0
- xmlChar *emergency_result;
- int emergency_txt_len;
- int emergency_res;
-#endif
-
- CRM_CHECK(xml != NULL, return FALSE);
- doc = getDocPtr(xml);
+
xform = pcmk__xml_artefact_path(pcmk__xml_artefact_ns_legacy_xslt,
transform);
- xmlLoadExtDtdDefaultValue = 1;
- xmlSubstituteEntitiesDefault(1);
-
/* for capturing, e.g., what's emitted via <xsl:message> */
if (to_logs) {
xsltSetGenericErrorFunc(NULL, cib_upgrade_err);
@@ -935,22 +879,12 @@ apply_transformation(xmlNode *xml, const char *transform, gboolean to_logs)
xslt = xsltParseStylesheetFile((pcmkXmlStr) xform);
CRM_CHECK(xslt != NULL, goto cleanup);
- res = xsltApplyStylesheet(xslt, doc, NULL);
+ res = xsltApplyStylesheet(xslt, xml->doc, NULL);
CRM_CHECK(res != NULL, goto cleanup);
xsltSetGenericErrorFunc(NULL, NULL); /* restore default one */
-
-#if PCMK_SCHEMAS_EMERGENCY_XSLT != 0
- emergency_res = xsltSaveResultToString(&emergency_result,
- &emergency_txt_len, res, xslt);
- xmlFreeDoc(res);
- CRM_CHECK(emergency_res == 0, goto cleanup);
- out = string2xml((const char *) emergency_result);
- free(emergency_result);
-#else
out = xmlDocGetRootElement(res);
-#endif
cleanup:
if (xslt) {
@@ -1055,12 +989,15 @@ update_validation(xmlNode **xml_blob, int *best, int max, gboolean transform,
int max_stable_schemas = xml_latest_schema_index();
int lpc = 0, match = -1, rc = pcmk_ok;
int next = -1; /* -1 denotes "inactive" value */
+ xmlRelaxNGValidityErrorFunc error_handler =
+ to_logs ? (xmlRelaxNGValidityErrorFunc) xml_log : NULL;
CRM_CHECK(best != NULL, return -EINVAL);
*best = 0;
- CRM_CHECK(xml_blob != NULL, return -EINVAL);
- CRM_CHECK(*xml_blob != NULL, return -EINVAL);
+ CRM_CHECK((xml_blob != NULL) && (*xml_blob != NULL)
+ && ((*xml_blob)->doc != NULL),
+ return -EINVAL);
xml = *xml_blob;
value = crm_element_value_copy(xml, XML_ATTR_VALIDATION);
@@ -1090,7 +1027,7 @@ update_validation(xmlNode **xml_blob, int *best, int max, gboolean transform,
known_schemas[lpc].name ? known_schemas[lpc].name : "<unset>",
lpc, max_stable_schemas);
- if (validate_with(xml, lpc, to_logs) == FALSE) {
+ if (validate_with(xml, lpc, error_handler, GUINT_TO_POINTER(LOG_ERR)) == FALSE) {
if (next != -1) {
crm_info("Configuration not valid for schema: %s",
known_schemas[lpc].name);
@@ -1155,7 +1092,7 @@ update_validation(xmlNode **xml_blob, int *best, int max, gboolean transform,
known_schemas[lpc].transform);
rc = -pcmk_err_transform_failed;
- } else if (validate_with(upgrade, next, to_logs)) {
+ } else if (validate_with(upgrade, next, error_handler, GUINT_TO_POINTER(LOG_ERR))) {
crm_info("Transformation %s.xsl successful",
known_schemas[lpc].transform);
lpc = next;
diff --git a/lib/common/strings.c b/lib/common/strings.c
index b245102..d9d2fda 100644
--- a/lib/common/strings.c
+++ b/lib/common/strings.c
@@ -417,10 +417,7 @@ crm_is_true(const char *s)
{
gboolean ret = FALSE;
- if (s != NULL) {
- crm_str_to_boolean(s, &ret);
- }
- return ret;
+ return (crm_str_to_boolean(s, &ret) < 0)? FALSE : ret;
}
int
@@ -768,12 +765,15 @@ pcmk__compress(const char *data, unsigned int length, unsigned int max,
*result_len = max;
rc = BZ2_bzBuffToBuffCompress(compressed, result_len, uncompressed, length,
CRM_BZ2_BLOCKS, 0, CRM_BZ2_WORK);
+ rc = pcmk__bzlib2rc(rc);
+
free(uncompressed);
- if (rc != BZ_OK) {
- crm_err("Compression of %d bytes failed: %s " CRM_XS " bzerror=%d",
- length, bz2_strerror(rc), rc);
+
+ if (rc != pcmk_rc_ok) {
+ crm_err("Compression of %d bytes failed: %s " CRM_XS " rc=%d",
+ length, pcmk_rc_str(rc), rc);
free(compressed);
- return pcmk_rc_error;
+ return rc;
}
#ifdef CLOCK_MONOTONIC
diff --git a/lib/common/tests/Makefile.am b/lib/common/tests/Makefile.am
index b147309..c0407e5 100644
--- a/lib/common/tests/Makefile.am
+++ b/lib/common/tests/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2020-2022 the Pacemaker project contributors
+# Copyright 2020-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -9,6 +9,7 @@
SUBDIRS = \
acl \
+ actions \
agents \
cmdline \
flags \
@@ -17,7 +18,6 @@ SUBDIRS = \
iso8601 \
lists \
nvpair \
- operations \
options \
output \
results \
diff --git a/lib/common/tests/acl/Makefile.am b/lib/common/tests/acl/Makefile.am
index 50408f9..19903db 100644
--- a/lib/common/tests/acl/Makefile.am
+++ b/lib/common/tests/acl/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2021-2022 the Pacemaker project contributors
+# Copyright 2021-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -12,10 +12,9 @@ include $(top_srcdir)/mk/unittest.mk
# Add "_test" to the end of all test program names to simplify .gitignore.
-check_PROGRAMS = \
- pcmk__is_user_in_group_test \
- pcmk_acl_required_test \
- xml_acl_denied_test \
- xml_acl_enabled_test
+check_PROGRAMS = pcmk__is_user_in_group_test \
+ pcmk_acl_required_test \
+ xml_acl_denied_test \
+ xml_acl_enabled_test
TESTS = $(check_PROGRAMS)
diff --git a/lib/common/tests/operations/Makefile.am b/lib/common/tests/actions/Makefile.am
index 4687e1b..6890b84 100644
--- a/lib/common/tests/operations/Makefile.am
+++ b/lib/common/tests/actions/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2020-2022 the Pacemaker project contributors
+# Copyright 2020-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -11,12 +11,12 @@ include $(top_srcdir)/mk/tap.mk
include $(top_srcdir)/mk/unittest.mk
# Add "_test" to the end of all test program names to simplify .gitignore.
-check_PROGRAMS = copy_in_properties_test \
- expand_plus_plus_test \
- fix_plus_plus_recursive_test \
- parse_op_key_test \
- pcmk_is_probe_test \
- pcmk_xe_is_probe_test \
- pcmk_xe_mask_probe_failure_test
+check_PROGRAMS = copy_in_properties_test \
+ expand_plus_plus_test \
+ fix_plus_plus_recursive_test \
+ parse_op_key_test \
+ pcmk_is_probe_test \
+ pcmk_xe_is_probe_test \
+ pcmk_xe_mask_probe_failure_test
TESTS = $(check_PROGRAMS)
diff --git a/lib/common/tests/operations/copy_in_properties_test.c b/lib/common/tests/actions/copy_in_properties_test.c
index 7882551..7882551 100644
--- a/lib/common/tests/operations/copy_in_properties_test.c
+++ b/lib/common/tests/actions/copy_in_properties_test.c
diff --git a/lib/common/tests/operations/expand_plus_plus_test.c b/lib/common/tests/actions/expand_plus_plus_test.c
index 41471f9..41471f9 100644
--- a/lib/common/tests/operations/expand_plus_plus_test.c
+++ b/lib/common/tests/actions/expand_plus_plus_test.c
diff --git a/lib/common/tests/operations/fix_plus_plus_recursive_test.c b/lib/common/tests/actions/fix_plus_plus_recursive_test.c
index b3c7cc2..b3c7cc2 100644
--- a/lib/common/tests/operations/fix_plus_plus_recursive_test.c
+++ b/lib/common/tests/actions/fix_plus_plus_recursive_test.c
diff --git a/lib/common/tests/operations/parse_op_key_test.c b/lib/common/tests/actions/parse_op_key_test.c
index 1b1bfff..1b1bfff 100644
--- a/lib/common/tests/operations/parse_op_key_test.c
+++ b/lib/common/tests/actions/parse_op_key_test.c
diff --git a/lib/common/tests/operations/pcmk_is_probe_test.c b/lib/common/tests/actions/pcmk_is_probe_test.c
index 4a65e3f..4a65e3f 100644
--- a/lib/common/tests/operations/pcmk_is_probe_test.c
+++ b/lib/common/tests/actions/pcmk_is_probe_test.c
diff --git a/lib/common/tests/operations/pcmk_xe_is_probe_test.c b/lib/common/tests/actions/pcmk_xe_is_probe_test.c
index 62b21d9..62b21d9 100644
--- a/lib/common/tests/operations/pcmk_xe_is_probe_test.c
+++ b/lib/common/tests/actions/pcmk_xe_is_probe_test.c
diff --git a/lib/common/tests/operations/pcmk_xe_mask_probe_failure_test.c b/lib/common/tests/actions/pcmk_xe_mask_probe_failure_test.c
index 9e38019..9e38019 100644
--- a/lib/common/tests/operations/pcmk_xe_mask_probe_failure_test.c
+++ b/lib/common/tests/actions/pcmk_xe_mask_probe_failure_test.c
diff --git a/lib/common/tests/agents/Makefile.am b/lib/common/tests/agents/Makefile.am
index 7a54b7d..b3837d7 100644
--- a/lib/common/tests/agents/Makefile.am
+++ b/lib/common/tests/agents/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2020-2022 the Pacemaker project contributors
+# Copyright 2020-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -11,10 +11,10 @@ include $(top_srcdir)/mk/tap.mk
include $(top_srcdir)/mk/unittest.mk
# Add "_test" to the end of all test program names to simplify .gitignore.
-check_PROGRAMS = crm_generate_ra_key_test \
- crm_parse_agent_spec_test \
- pcmk__effective_rc_test \
- pcmk_get_ra_caps_test \
- pcmk_stonith_param_test
+check_PROGRAMS = crm_generate_ra_key_test \
+ crm_parse_agent_spec_test \
+ pcmk__effective_rc_test \
+ pcmk_get_ra_caps_test \
+ pcmk_stonith_param_test
TESTS = $(check_PROGRAMS)
diff --git a/lib/common/tests/agents/crm_parse_agent_spec_test.c b/lib/common/tests/agents/crm_parse_agent_spec_test.c
index cfd75f0..1d44459 100644
--- a/lib/common/tests/agents/crm_parse_agent_spec_test.c
+++ b/lib/common/tests/agents/crm_parse_agent_spec_test.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2022 the Pacemaker project contributors
+ * Copyright 2022-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -22,14 +22,22 @@ all_params_null(void **state) {
static void
no_prov_or_type(void **state) {
- assert_int_equal(crm_parse_agent_spec("ocf", NULL, NULL, NULL), -EINVAL);
- assert_int_equal(crm_parse_agent_spec("ocf:", NULL, NULL, NULL), -EINVAL);
- assert_int_equal(crm_parse_agent_spec("ocf::", NULL, NULL, NULL), -EINVAL);
+ char *std = NULL;
+ char *prov = NULL;
+ char *ty = NULL;
+
+ assert_int_equal(crm_parse_agent_spec("ocf", &std, &prov, &ty), -EINVAL);
+ assert_int_equal(crm_parse_agent_spec("ocf:", &std, &prov, &ty), -EINVAL);
+ assert_int_equal(crm_parse_agent_spec("ocf::", &std, &prov, &ty), -EINVAL);
}
static void
no_type(void **state) {
- assert_int_equal(crm_parse_agent_spec("ocf:pacemaker:", NULL, NULL, NULL), -EINVAL);
+ char *std = NULL;
+ char *prov = NULL;
+ char *ty = NULL;
+
+ assert_int_equal(crm_parse_agent_spec("ocf:pacemaker:", &std, &prov, &ty), -EINVAL);
}
static void
diff --git a/lib/common/tests/cmdline/Makefile.am b/lib/common/tests/cmdline/Makefile.am
index d781ed5..792425b 100644
--- a/lib/common/tests/cmdline/Makefile.am
+++ b/lib/common/tests/cmdline/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2020-2022 the Pacemaker project contributors
+# Copyright 2020-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -12,6 +12,7 @@ include $(top_srcdir)/mk/unittest.mk
# Add "_test" to the end of all test program names to simplify .gitignore.
check_PROGRAMS = pcmk__cmdline_preproc_test \
- pcmk__quote_cmdline_test
+ pcmk__new_common_args_test \
+ pcmk__quote_cmdline_test
TESTS = $(check_PROGRAMS)
diff --git a/lib/common/tests/cmdline/pcmk__cmdline_preproc_test.c b/lib/common/tests/cmdline/pcmk__cmdline_preproc_test.c
index 863fbb9..299fec6 100644
--- a/lib/common/tests/cmdline/pcmk__cmdline_preproc_test.c
+++ b/lib/common/tests/cmdline/pcmk__cmdline_preproc_test.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2020-2022 the Pacemaker project contributors
+ * Copyright 2020-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -112,6 +112,16 @@ negative_score_2(void **state) {
}
static void
+negative_score_3(void **state) {
+ const char *argv[] = { "crm_attribute", "-p", "-v", "-INFINITY", NULL };
+ const gchar *expected[] = { "crm_attribute", "-p", "-v", "-INFINITY", NULL };
+
+ gchar **processed = pcmk__cmdline_preproc((char **) argv, "pv");
+ LISTS_EQ(processed, expected);
+ g_strfreev(processed);
+}
+
+static void
string_arg_with_dash(void **state) {
const char *argv[] = { "crm_mon", "-n", "crm_mon_options", "-v", "--opt1 --opt2", NULL };
const gchar *expected[] = { "crm_mon", "-n", "crm_mon_options", "-v", "--opt1 --opt2", NULL };
@@ -151,6 +161,7 @@ PCMK__UNIT_TEST(NULL, NULL,
cmocka_unit_test(long_arg),
cmocka_unit_test(negative_score),
cmocka_unit_test(negative_score_2),
+ cmocka_unit_test(negative_score_3),
cmocka_unit_test(string_arg_with_dash),
cmocka_unit_test(string_arg_with_dash_2),
cmocka_unit_test(string_arg_with_dash_3))
diff --git a/lib/common/tests/cmdline/pcmk__new_common_args_test.c b/lib/common/tests/cmdline/pcmk__new_common_args_test.c
new file mode 100644
index 0000000..6b70465
--- /dev/null
+++ b/lib/common/tests/cmdline/pcmk__new_common_args_test.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2023 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU General Public License version 2
+ * or later (GPLv2+) WITHOUT ANY WARRANTY.
+ */
+
+#include <crm_internal.h>
+
+#include <crm/common/unittest_internal.h>
+#include <crm/common/cmdline_internal.h>
+
+#include "mock_private.h"
+
+#include <glib.h>
+
+static void
+calloc_fails(void **state)
+{
+ pcmk__assert_exits(CRM_EX_OSERR,
+ {
+ pcmk__mock_calloc = true; // calloc() will return NULL
+ expect_value(__wrap_calloc, nmemb, 1);
+ expect_value(__wrap_calloc, size, sizeof(pcmk__common_args_t));
+ pcmk__new_common_args("boring summary");
+ pcmk__mock_calloc = false; // Use real calloc()
+ }
+ );
+}
+
+static void
+strdup_fails(void **state)
+{
+ pcmk__assert_exits(CRM_EX_OSERR,
+ {
+ pcmk__mock_strdup = true; // strdup() will return NULL
+ expect_string(__wrap_strdup, s, "boring summary");
+ pcmk__new_common_args("boring summary");
+ pcmk__mock_strdup = false; // Use the real strdup()
+ }
+ );
+}
+
+static void
+success(void **state)
+{
+ pcmk__common_args_t *args = pcmk__new_common_args("boring summary");
+ assert_string_equal(args->summary, "boring summary");
+ assert_null(args->output_as_descr);
+ assert_false(args->version);
+ assert_false(args->quiet);
+ assert_int_equal(args->verbosity, 0);
+ assert_null(args->output_ty);
+ assert_null(args->output_dest);
+}
+
+PCMK__UNIT_TEST(NULL, NULL,
+ cmocka_unit_test(calloc_fails),
+ cmocka_unit_test(strdup_fails),
+ cmocka_unit_test(success))
diff --git a/lib/common/tests/flags/Makefile.am b/lib/common/tests/flags/Makefile.am
index 16d8ffb..22a101a 100644
--- a/lib/common/tests/flags/Makefile.am
+++ b/lib/common/tests/flags/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2020-2022 the Pacemaker project contributors
+# Copyright 2020-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -11,10 +11,9 @@ include $(top_srcdir)/mk/tap.mk
include $(top_srcdir)/mk/unittest.mk
# Add "_test" to the end of all test program names to simplify .gitignore.
-check_PROGRAMS = \
- pcmk__clear_flags_as_test \
- pcmk__set_flags_as_test \
- pcmk_all_flags_set_test \
- pcmk_any_flags_set_test
+check_PROGRAMS = pcmk__clear_flags_as_test \
+ pcmk__set_flags_as_test \
+ pcmk_all_flags_set_test \
+ pcmk_any_flags_set_test
TESTS = $(check_PROGRAMS)
diff --git a/lib/common/tests/io/Makefile.am b/lib/common/tests/io/Makefile.am
index c26482c..f7519d8 100644
--- a/lib/common/tests/io/Makefile.am
+++ b/lib/common/tests/io/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2020-2022 the Pacemaker project contributors
+# Copyright 2020-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -11,8 +11,7 @@ include $(top_srcdir)/mk/tap.mk
include $(top_srcdir)/mk/unittest.mk
# Add "_test" to the end of all test program names to simplify .gitignore.
-check_PROGRAMS = \
- pcmk__full_path_test \
- pcmk__get_tmpdir_test
+check_PROGRAMS = pcmk__full_path_test \
+ pcmk__get_tmpdir_test
TESTS = $(check_PROGRAMS)
diff --git a/lib/common/tests/lists/Makefile.am b/lib/common/tests/lists/Makefile.am
index ae0c0b6..0fa1e15 100644
--- a/lib/common/tests/lists/Makefile.am
+++ b/lib/common/tests/lists/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2022 the Pacemaker project contributors
+# Copyright 2022-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -12,9 +12,8 @@ include $(top_srcdir)/mk/unittest.mk
# Add "_test" to the end of all test program names to simplify .gitignore.
-check_PROGRAMS = \
- pcmk__list_of_1_test \
- pcmk__list_of_multiple_test \
- pcmk__subtract_lists_test
+check_PROGRAMS = pcmk__list_of_1_test \
+ pcmk__list_of_multiple_test \
+ pcmk__subtract_lists_test
TESTS = $(check_PROGRAMS)
diff --git a/lib/common/tests/nvpair/Makefile.am b/lib/common/tests/nvpair/Makefile.am
index 7acaba3..7f406bd 100644
--- a/lib/common/tests/nvpair/Makefile.am
+++ b/lib/common/tests/nvpair/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2021-2022 the Pacemaker project contributors
+# Copyright 2021-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -11,8 +11,8 @@ include $(top_srcdir)/mk/tap.mk
include $(top_srcdir)/mk/unittest.mk
# Add "_test" to the end of all test program names to simplify .gitignore.
-check_PROGRAMS = pcmk__xe_attr_is_true_test \
- pcmk__xe_get_bool_attr_test \
- pcmk__xe_set_bool_attr_test
+check_PROGRAMS = pcmk__xe_attr_is_true_test \
+ pcmk__xe_get_bool_attr_test \
+ pcmk__xe_set_bool_attr_test
TESTS = $(check_PROGRAMS)
diff --git a/lib/common/tests/options/Makefile.am b/lib/common/tests/options/Makefile.am
index 9a5fa98..cc1008e 100644
--- a/lib/common/tests/options/Makefile.am
+++ b/lib/common/tests/options/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2022 the Pacemaker project contributors
+# Copyright 2022-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -11,9 +11,8 @@ include $(top_srcdir)/mk/tap.mk
include $(top_srcdir)/mk/unittest.mk
# Add "_test" to the end of all test program names to simplify .gitignore.
-check_PROGRAMS = \
- pcmk__env_option_test \
- pcmk__set_env_option_test \
- pcmk__env_option_enabled_test
+check_PROGRAMS = pcmk__env_option_test \
+ pcmk__set_env_option_test \
+ pcmk__env_option_enabled_test
TESTS = $(check_PROGRAMS)
diff --git a/lib/common/tests/options/pcmk__set_env_option_test.c b/lib/common/tests/options/pcmk__set_env_option_test.c
index 753bf74..22fd795 100644
--- a/lib/common/tests/options/pcmk__set_env_option_test.c
+++ b/lib/common/tests/options/pcmk__set_env_option_test.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2022 the Pacemaker project contributors
+ * Copyright 2022-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -20,18 +20,18 @@ bad_input_string(void **state)
// Never call setenv()
pcmk__mock_setenv = true;
- pcmk__set_env_option(NULL, "new_value");
- pcmk__set_env_option("", "new_value");
- pcmk__set_env_option("name=val", "new_value");
+ pcmk__set_env_option(NULL, "new_value", true);
+ pcmk__set_env_option("", "new_value", true);
+ pcmk__set_env_option("name=val", "new_value", true);
pcmk__mock_setenv = false;
// Never call unsetenv()
pcmk__mock_unsetenv = true;
- pcmk__set_env_option(NULL, NULL);
- pcmk__set_env_option("", NULL);
- pcmk__set_env_option("name=val", NULL);
+ pcmk__set_env_option(NULL, NULL, true);
+ pcmk__set_env_option("", NULL, true);
+ pcmk__set_env_option("name=val", NULL, true);
pcmk__mock_unsetenv = false;
}
@@ -53,11 +53,11 @@ input_too_long_for_both(void **state)
// Never call setenv() or unsetenv()
pcmk__mock_setenv = true;
- pcmk__set_env_option(long_opt, "new_value");
+ pcmk__set_env_option(long_opt, "new_value", true);
pcmk__mock_setenv = false;
pcmk__mock_unsetenv = true;
- pcmk__set_env_option(long_opt, NULL);
+ pcmk__set_env_option(long_opt, NULL, true);
pcmk__mock_unsetenv = false;
}
@@ -87,7 +87,7 @@ input_too_long_for_pcmk(void **state)
expect_string(__wrap_setenv, value, "new_value");
expect_value(__wrap_setenv, overwrite, 1);
will_return(__wrap_setenv, 0);
- pcmk__set_env_option(long_opt, "new_value");
+ pcmk__set_env_option(long_opt, "new_value", true);
pcmk__mock_setenv = false;
@@ -96,7 +96,7 @@ input_too_long_for_pcmk(void **state)
expect_string(__wrap_unsetenv, name, buf);
will_return(__wrap_unsetenv, 0);
- pcmk__set_env_option(long_opt, NULL);
+ pcmk__set_env_option(long_opt, NULL, true);
pcmk__mock_unsetenv = false;
}
@@ -115,7 +115,7 @@ valid_inputs_set(void **state)
expect_string(__wrap_setenv, value, "new_value");
expect_value(__wrap_setenv, overwrite, 1);
will_return(__wrap_setenv, 0);
- pcmk__set_env_option("env_var", "new_value");
+ pcmk__set_env_option("env_var", "new_value", true);
// Empty string is also a valid value
expect_string(__wrap_setenv, name, "PCMK_env_var");
@@ -126,7 +126,7 @@ valid_inputs_set(void **state)
expect_string(__wrap_setenv, value, "");
expect_value(__wrap_setenv, overwrite, 1);
will_return(__wrap_setenv, 0);
- pcmk__set_env_option("env_var", "");
+ pcmk__set_env_option("env_var", "", true);
pcmk__mock_setenv = false;
}
@@ -141,7 +141,33 @@ valid_inputs_unset(void **state)
will_return(__wrap_unsetenv, 0);
expect_string(__wrap_unsetenv, name, "HA_env_var");
will_return(__wrap_unsetenv, 0);
- pcmk__set_env_option("env_var", NULL);
+ pcmk__set_env_option("env_var", NULL, true);
+
+ pcmk__mock_unsetenv = false;
+}
+
+static void
+disable_compat(void **state)
+{
+ // Make sure we set only "PCMK_<option>" and not "HA_<option>"
+ pcmk__mock_setenv = true;
+
+ expect_string(__wrap_setenv, name, "PCMK_env_var");
+ expect_string(__wrap_setenv, value, "new_value");
+ expect_value(__wrap_setenv, overwrite, 1);
+ will_return(__wrap_setenv, 0);
+ pcmk__set_env_option("env_var", "new_value", false);
+
+ pcmk__mock_setenv = false;
+
+ // Make sure we clear both "PCMK_<option>" and "HA_<option>"
+ pcmk__mock_unsetenv = true;
+
+ expect_string(__wrap_unsetenv, name, "PCMK_env_var");
+ will_return(__wrap_unsetenv, 0);
+ expect_string(__wrap_unsetenv, name, "HA_env_var");
+ will_return(__wrap_unsetenv, 0);
+ pcmk__set_env_option("env_var", NULL, false);
pcmk__mock_unsetenv = false;
}
@@ -151,4 +177,5 @@ PCMK__UNIT_TEST(NULL, NULL,
cmocka_unit_test(input_too_long_for_both),
cmocka_unit_test(input_too_long_for_pcmk),
cmocka_unit_test(valid_inputs_set),
- cmocka_unit_test(valid_inputs_unset))
+ cmocka_unit_test(valid_inputs_unset),
+ cmocka_unit_test(disable_compat))
diff --git a/lib/common/tests/output/Makefile.am b/lib/common/tests/output/Makefile.am
index 6ac7b5f..30f1494 100644
--- a/lib/common/tests/output/Makefile.am
+++ b/lib/common/tests/output/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2021-2022 the Pacemaker project contributors
+# Copyright 2021-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -11,14 +11,14 @@ include $(top_srcdir)/mk/tap.mk
include $(top_srcdir)/mk/unittest.mk
# Add "_test" to the end of all test program names to simplify .gitignore.
-check_PROGRAMS = pcmk__call_message_test \
- pcmk__output_and_clear_error_test \
- pcmk__output_free_test \
- pcmk__output_new_test \
- pcmk__register_format_test \
- pcmk__register_formats_test \
- pcmk__register_message_test \
- pcmk__register_messages_test \
- pcmk__unregister_formats_test
+check_PROGRAMS = pcmk__call_message_test \
+ pcmk__output_and_clear_error_test \
+ pcmk__output_free_test \
+ pcmk__output_new_test \
+ pcmk__register_format_test \
+ pcmk__register_formats_test \
+ pcmk__register_message_test \
+ pcmk__register_messages_test \
+ pcmk__unregister_formats_test
TESTS = $(check_PROGRAMS)
diff --git a/lib/common/tests/output/pcmk__output_new_test.c b/lib/common/tests/output/pcmk__output_new_test.c
index de4268c..a05d9a7 100644
--- a/lib/common/tests/output/pcmk__output_new_test.c
+++ b/lib/common/tests/output/pcmk__output_new_test.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2022 the Pacemaker project contributors
+ * Copyright 2022-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -95,9 +95,15 @@ fopen_fails(void **state) {
pcmk__output_t *out = NULL;
pcmk__mock_fopen = true;
+#if defined(HAVE_FOPEN64) && defined(_FILE_OFFSET_BITS) && (_FILE_OFFSET_BITS == 64) && (SIZEOF_LONG < 8)
+ expect_string(__wrap_fopen64, pathname, "destfile");
+ expect_string(__wrap_fopen64, mode, "w");
+ will_return(__wrap_fopen64, EPERM);
+#else
expect_string(__wrap_fopen, pathname, "destfile");
expect_string(__wrap_fopen, mode, "w");
will_return(__wrap_fopen, EPERM);
+#endif
assert_int_equal(pcmk__output_new(&out, "text", "destfile", NULL), EPERM);
diff --git a/lib/common/tests/results/Makefile.am b/lib/common/tests/results/Makefile.am
index 8d51d12..a7d5663 100644
--- a/lib/common/tests/results/Makefile.am
+++ b/lib/common/tests/results/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2021-2022 the Pacemaker project contributors
+# Copyright 2021-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -11,6 +11,6 @@ include $(top_srcdir)/mk/tap.mk
include $(top_srcdir)/mk/unittest.mk
# Add "_test" to the end of all test program names to simplify .gitignore.
-check_PROGRAMS = pcmk__results_test
+check_PROGRAMS = pcmk__results_test
TESTS = $(check_PROGRAMS)
diff --git a/lib/common/tests/results/pcmk__results_test.c b/lib/common/tests/results/pcmk__results_test.c
index 53665d1..016eb7f 100644
--- a/lib/common/tests/results/pcmk__results_test.c
+++ b/lib/common/tests/results/pcmk__results_test.c
@@ -47,15 +47,9 @@ test_for_pcmk_rc2exitc(void **state) {
assert_int_equal(pcmk_rc2exitc(-7777777), CRM_EX_ERROR);
}
-static void
-test_for_bz2_strerror(void **state) {
- assert_string_equal(bz2_strerror(BZ_STREAM_END), "Ok");
-}
-
PCMK__UNIT_TEST(NULL, NULL,
cmocka_unit_test(test_for_pcmk_rc_name),
cmocka_unit_test(test_for_pcmk_rc_str),
cmocka_unit_test(test_for_crm_exit_name),
cmocka_unit_test(test_for_crm_exit_str),
- cmocka_unit_test(test_for_pcmk_rc2exitc),
- cmocka_unit_test(test_for_bz2_strerror))
+ cmocka_unit_test(test_for_pcmk_rc2exitc))
diff --git a/lib/common/tests/scores/Makefile.am b/lib/common/tests/scores/Makefile.am
index 66ca073..cb96155 100644
--- a/lib/common/tests/scores/Makefile.am
+++ b/lib/common/tests/scores/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2020-2022 the Pacemaker project contributors
+# Copyright 2020-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -11,9 +11,8 @@ include $(top_srcdir)/mk/tap.mk
include $(top_srcdir)/mk/unittest.mk
# Add "_test" to the end of all test program names to simplify .gitignore.
-check_PROGRAMS = \
- char2score_test \
- pcmk__add_scores_test \
- pcmk_readable_score_test
+check_PROGRAMS = char2score_test \
+ pcmk__add_scores_test \
+ pcmk_readable_score_test
TESTS = $(check_PROGRAMS)
diff --git a/lib/common/tests/scores/pcmk__add_scores_test.c b/lib/common/tests/scores/pcmk__add_scores_test.c
index 85ac232..1309659 100644
--- a/lib/common/tests/scores/pcmk__add_scores_test.c
+++ b/lib/common/tests/scores/pcmk__add_scores_test.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2022 the Pacemaker project contributors
+ * Copyright 2022-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -53,6 +53,8 @@ result_infinite(void **state)
assert_int_equal(pcmk__add_scores(INT_MAX, INT_MAX), CRM_SCORE_INFINITY);
assert_int_equal(pcmk__add_scores(INT_MIN, INT_MIN), -CRM_SCORE_INFINITY);
assert_int_equal(pcmk__add_scores(2000000, 50), CRM_SCORE_INFINITY);
+ assert_int_equal(pcmk__add_scores(CRM_SCORE_INFINITY/2, CRM_SCORE_INFINITY/2), CRM_SCORE_INFINITY);
+ assert_int_equal(pcmk__add_scores(-CRM_SCORE_INFINITY/2, -CRM_SCORE_INFINITY/2), -CRM_SCORE_INFINITY);
assert_int_equal(pcmk__add_scores(-4000000, 50), -CRM_SCORE_INFINITY);
}
diff --git a/lib/common/tests/strings/Makefile.am b/lib/common/tests/strings/Makefile.am
index 9abb8e9..e66af0d 100644
--- a/lib/common/tests/strings/Makefile.am
+++ b/lib/common/tests/strings/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2020-2022 the Pacemaker project contributors
+# Copyright 2020-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -11,31 +11,31 @@ include $(top_srcdir)/mk/tap.mk
include $(top_srcdir)/mk/unittest.mk
# Add "_test" to the end of all test program names to simplify .gitignore.
-check_PROGRAMS = \
- crm_get_msec_test \
- crm_is_true_test \
- crm_str_to_boolean_test \
- pcmk__add_word_test \
- pcmk__btoa_test \
- pcmk__char_in_any_str_test \
- pcmk__compress_test \
- pcmk__ends_with_test \
- pcmk__g_strcat_test \
- pcmk__guint_from_hash_test \
- pcmk__numeric_strcasecmp_test \
- pcmk__parse_ll_range_test \
- pcmk__s_test \
- pcmk__scan_double_test \
- pcmk__scan_min_int_test \
- pcmk__scan_port_test \
- pcmk__starts_with_test \
- pcmk__str_any_of_test \
- pcmk__str_in_list_test \
- pcmk__str_table_dup_test \
- pcmk__str_update_test \
- pcmk__strcmp_test \
- pcmk__strkey_table_test \
- pcmk__strikey_table_test \
- pcmk__trim_test
+check_PROGRAMS = crm_get_msec_test \
+ crm_is_true_test \
+ crm_str_to_boolean_test \
+ pcmk__add_word_test \
+ pcmk__btoa_test \
+ pcmk__char_in_any_str_test \
+ pcmk__compress_test \
+ pcmk__ends_with_test \
+ pcmk__g_strcat_test \
+ pcmk__guint_from_hash_test \
+ pcmk__numeric_strcasecmp_test \
+ pcmk__parse_ll_range_test \
+ pcmk__s_test \
+ pcmk__scan_double_test \
+ pcmk__scan_ll_test \
+ pcmk__scan_min_int_test \
+ pcmk__scan_port_test \
+ pcmk__starts_with_test \
+ pcmk__str_any_of_test \
+ pcmk__str_in_list_test \
+ pcmk__str_table_dup_test \
+ pcmk__str_update_test \
+ pcmk__strcmp_test \
+ pcmk__strkey_table_test \
+ pcmk__strikey_table_test \
+ pcmk__trim_test
TESTS = $(check_PROGRAMS)
diff --git a/lib/common/tests/strings/pcmk__compress_test.c b/lib/common/tests/strings/pcmk__compress_test.c
index 7480937..7b59d9d 100644
--- a/lib/common/tests/strings/pcmk__compress_test.c
+++ b/lib/common/tests/strings/pcmk__compress_test.c
@@ -33,7 +33,7 @@ max_too_small(void **state)
char *result = calloc(1024, sizeof(char));
unsigned int len;
- assert_int_equal(pcmk__compress(SIMPLE_DATA, 40, 10, &result, &len), pcmk_rc_error);
+ assert_int_equal(pcmk__compress(SIMPLE_DATA, 40, 10, &result, &len), EFBIG);
}
static void
diff --git a/lib/common/tests/strings/pcmk__guint_from_hash_test.c b/lib/common/tests/strings/pcmk__guint_from_hash_test.c
index e2b4762..225c5b3 100644
--- a/lib/common/tests/strings/pcmk__guint_from_hash_test.c
+++ b/lib/common/tests/strings/pcmk__guint_from_hash_test.c
@@ -59,6 +59,7 @@ conversion_errors(void **state)
g_hash_table_insert(tbl, strdup("negative"), strdup("-3"));
g_hash_table_insert(tbl, strdup("toobig"), strdup("20000000000000000"));
+ g_hash_table_insert(tbl, strdup("baddata"), strdup("asdf"));
assert_int_equal(pcmk__guint_from_hash(tbl, "negative", 456, &result), ERANGE);
assert_int_equal(result, 456);
@@ -66,6 +67,9 @@ conversion_errors(void **state)
assert_int_equal(pcmk__guint_from_hash(tbl, "toobig", 456, &result), ERANGE);
assert_int_equal(result, 456);
+ assert_int_equal(pcmk__guint_from_hash(tbl, "baddata", 456, &result), EINVAL);
+ assert_int_equal(result, 456);
+
g_hash_table_destroy(tbl);
}
diff --git a/lib/common/tests/strings/pcmk__scan_ll_test.c b/lib/common/tests/strings/pcmk__scan_ll_test.c
new file mode 100644
index 0000000..645ecb4
--- /dev/null
+++ b/lib/common/tests/strings/pcmk__scan_ll_test.c
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2023 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU General Public License version 2
+ * or later (GPLv2+) WITHOUT ANY WARRANTY.
+ */
+
+#include <crm_internal.h>
+
+#include <crm/common/unittest_internal.h>
+
+static void
+empty_input_string(void **state)
+{
+ long long result;
+
+ assert_int_equal(pcmk__scan_ll(NULL, &result, 47), pcmk_rc_ok);
+ assert_int_equal(result, 47);
+}
+
+static void
+bad_input_string(void **state)
+{
+ long long result;
+
+ assert_int_equal(pcmk__scan_ll("asdf", &result, 47), EINVAL);
+ assert_int_equal(result, 47);
+ assert_int_equal(pcmk__scan_ll("as12", &result, 47), EINVAL);
+ assert_int_equal(result, 47);
+}
+
+static void
+trailing_chars(void **state)
+{
+ long long result;
+
+ assert_int_equal(pcmk__scan_ll("12as", &result, 47), pcmk_rc_ok);
+ assert_int_equal(result, 12);
+}
+
+static void
+no_result_variable(void **state)
+{
+ assert_int_equal(pcmk__scan_ll("1234", NULL, 47), pcmk_rc_ok);
+ assert_int_equal(pcmk__scan_ll("asdf", NULL, 47), EINVAL);
+}
+
+static void
+typical_case(void **state)
+{
+ long long result;
+
+ assert_int_equal(pcmk__scan_ll("1234", &result, 47), pcmk_rc_ok);
+ assert_int_equal(result, 1234);
+}
+
+PCMK__UNIT_TEST(NULL, NULL,
+ cmocka_unit_test(empty_input_string),
+ cmocka_unit_test(bad_input_string),
+ cmocka_unit_test(trailing_chars),
+ cmocka_unit_test(no_result_variable),
+ cmocka_unit_test(typical_case))
diff --git a/lib/common/tests/utils/Makefile.am b/lib/common/tests/utils/Makefile.am
index edccf09..f028ce4 100644
--- a/lib/common/tests/utils/Makefile.am
+++ b/lib/common/tests/utils/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2020-2022 the Pacemaker project contributors
+# Copyright 2020-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -11,15 +11,17 @@ include $(top_srcdir)/mk/tap.mk
include $(top_srcdir)/mk/unittest.mk
# Add "_test" to the end of all test program names to simplify .gitignore.
-check_PROGRAMS = \
- compare_version_test \
- crm_meta_name_test \
- crm_meta_value_test \
- crm_user_lookup_test \
- pcmk_daemon_user_test \
- pcmk_str_is_infinity_test \
- pcmk_str_is_minus_infinity_test \
- pcmk__getpid_s_test
+check_PROGRAMS = compare_version_test \
+ crm_meta_name_test \
+ crm_meta_value_test \
+ crm_user_lookup_test \
+ pcmk_daemon_user_test \
+ pcmk_str_is_infinity_test \
+ pcmk_str_is_minus_infinity_test \
+ pcmk__fail_attr_name_test \
+ pcmk__failcount_name_test \
+ pcmk__getpid_s_test \
+ pcmk__lastfailure_name_test
if WRAPPABLE_UNAME
check_PROGRAMS += pcmk_hostname_test
diff --git a/lib/common/tests/utils/pcmk__fail_attr_name_test.c b/lib/common/tests/utils/pcmk__fail_attr_name_test.c
new file mode 100644
index 0000000..c6c25fc
--- /dev/null
+++ b/lib/common/tests/utils/pcmk__fail_attr_name_test.c
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2023 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU General Public License version 2
+ * or later (GPLv2+) WITHOUT ANY WARRANTY.
+ */
+
+#include <crm_internal.h>
+
+#include <crm/common/unittest_internal.h>
+
+static void
+null_arguments(void **state)
+{
+ assert_null(pcmk__fail_attr_name(NULL, NULL, NULL, 30000));
+ assert_null(pcmk__fail_attr_name(NULL, "myrsc", "monitor", 30000));
+ assert_null(pcmk__fail_attr_name("xyz", NULL, "monitor", 30000));
+ assert_null(pcmk__fail_attr_name("xyz", "myrsc", NULL, 30000));
+}
+
+static void
+standard_usage(void **state)
+{
+ char *s = NULL;
+
+ assert_string_equal(pcmk__fail_attr_name("xyz", "myrsc", "monitor", 30000),
+ "xyz-myrsc#monitor_30000");
+
+ free(s);
+}
+
+PCMK__UNIT_TEST(NULL, NULL,
+ cmocka_unit_test(null_arguments),
+ cmocka_unit_test(standard_usage))
diff --git a/lib/common/tests/utils/pcmk__failcount_name_test.c b/lib/common/tests/utils/pcmk__failcount_name_test.c
new file mode 100644
index 0000000..a801f4d
--- /dev/null
+++ b/lib/common/tests/utils/pcmk__failcount_name_test.c
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2023 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU General Public License version 2
+ * or later (GPLv2+) WITHOUT ANY WARRANTY.
+ */
+
+#include <crm_internal.h>
+
+#include <crm/common/unittest_internal.h>
+
+static void
+null_arguments(void **state)
+{
+ assert_null(pcmk__failcount_name(NULL, NULL, 30000));
+ assert_null(pcmk__failcount_name("myrsc", NULL, 30000));
+ assert_null(pcmk__failcount_name(NULL, "monitor", 30000));
+}
+
+static void
+standard_usage(void **state)
+{
+ char *s = NULL;
+
+ assert_string_equal(pcmk__failcount_name("myrsc", "monitor", 30000),
+ "fail-count-myrsc#monitor_30000");
+
+ free(s);
+}
+
+PCMK__UNIT_TEST(NULL, NULL,
+ cmocka_unit_test(null_arguments),
+ cmocka_unit_test(standard_usage))
diff --git a/lib/common/tests/utils/pcmk__lastfailure_name_test.c b/lib/common/tests/utils/pcmk__lastfailure_name_test.c
new file mode 100644
index 0000000..eab01f2
--- /dev/null
+++ b/lib/common/tests/utils/pcmk__lastfailure_name_test.c
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2023 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU General Public License version 2
+ * or later (GPLv2+) WITHOUT ANY WARRANTY.
+ */
+
+#include <crm_internal.h>
+
+#include <crm/common/unittest_internal.h>
+
+static void
+null_arguments(void **state)
+{
+ assert_null(pcmk__lastfailure_name(NULL, NULL, 30000));
+ assert_null(pcmk__lastfailure_name("myrsc", NULL, 30000));
+ assert_null(pcmk__lastfailure_name(NULL, "monitor", 30000));
+}
+
+static void
+standard_usage(void **state)
+{
+ char *s = NULL;
+
+ assert_string_equal(pcmk__lastfailure_name("myrsc", "monitor", 30000),
+ "last-failure-myrsc#monitor_30000");
+
+ free(s);
+}
+
+PCMK__UNIT_TEST(NULL, NULL,
+ cmocka_unit_test(null_arguments),
+ cmocka_unit_test(standard_usage))
diff --git a/lib/common/tests/xml/Makefile.am b/lib/common/tests/xml/Makefile.am
index 0ccdcc3..465c950 100644
--- a/lib/common/tests/xml/Makefile.am
+++ b/lib/common/tests/xml/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2022 the Pacemaker project contributors
+# Copyright 2022-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -11,7 +11,7 @@ include $(top_srcdir)/mk/tap.mk
include $(top_srcdir)/mk/unittest.mk
# Add "_test" to the end of all test program names to simplify .gitignore.
-check_PROGRAMS = pcmk__xe_foreach_child_test \
- pcmk__xe_match_test
+check_PROGRAMS = pcmk__xe_foreach_child_test \
+ pcmk__xe_match_test
TESTS = $(check_PROGRAMS)
diff --git a/lib/common/tests/xml/pcmk__xe_foreach_child_test.c b/lib/common/tests/xml/pcmk__xe_foreach_child_test.c
index 9bcba87..ffb9171 100644
--- a/lib/common/tests/xml/pcmk__xe_foreach_child_test.c
+++ b/lib/common/tests/xml/pcmk__xe_foreach_child_test.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2022 the Pacemaker project contributors
+ * Copyright 2022-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -14,7 +14,7 @@
static int compare_name_handler(xmlNode *xml, void *userdata) {
function_called();
- assert_string_equal((char *) userdata, crm_element_name(xml));
+ assert_string_equal((char *) userdata, (const char *) xml->name);
return pcmk_rc_ok;
}
@@ -140,7 +140,8 @@ const char *str3 =
static int any_of_handler(xmlNode *xml, void *userdata) {
function_called();
- assert_true(pcmk__str_any_of(crm_element_name(xml), "node1", "node2", "node3", NULL));
+ assert_true(pcmk__str_any_of((const char *) xml->name,
+ "node1", "node2", "node3", NULL));
return pcmk_rc_ok;
}
@@ -160,7 +161,7 @@ any_of_test(void **state) {
static int stops_on_first_handler(xmlNode *xml, void *userdata) {
function_called();
- if (pcmk__str_eq(crm_element_name(xml), "node1", pcmk__str_none)) {
+ if (pcmk__xe_is(xml, "node1")) {
return pcmk_rc_error;
} else {
return pcmk_rc_ok;
@@ -170,7 +171,7 @@ static int stops_on_first_handler(xmlNode *xml, void *userdata) {
static int stops_on_second_handler(xmlNode *xml, void *userdata) {
function_called();
- if (pcmk__str_eq(crm_element_name(xml), "node2", pcmk__str_none)) {
+ if (pcmk__xe_is(xml, "node2")) {
return pcmk_rc_error;
} else {
return pcmk_rc_ok;
@@ -180,7 +181,7 @@ static int stops_on_second_handler(xmlNode *xml, void *userdata) {
static int stops_on_third_handler(xmlNode *xml, void *userdata) {
function_called();
- if (pcmk__str_eq(crm_element_name(xml), "node3", pcmk__str_none)) {
+ if (pcmk__xe_is(xml, "node3")) {
return pcmk_rc_error;
} else {
return pcmk_rc_ok;
diff --git a/lib/common/tests/xpath/Makefile.am b/lib/common/tests/xpath/Makefile.am
index 94abeee..d4c504b 100644
--- a/lib/common/tests/xpath/Makefile.am
+++ b/lib/common/tests/xpath/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2021-2022 the Pacemaker project contributors
+# Copyright 2021-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -11,6 +11,6 @@ include $(top_srcdir)/mk/tap.mk
include $(top_srcdir)/mk/unittest.mk
# Add "_test" to the end of all test program names to simplify .gitignore.
-check_PROGRAMS = pcmk__xpath_node_id_test
+check_PROGRAMS = pcmk__xpath_node_id_test
TESTS = $(check_PROGRAMS)
diff --git a/lib/common/watchdog.c b/lib/common/watchdog.c
index ff2d273..e569214 100644
--- a/lib/common/watchdog.c
+++ b/lib/common/watchdog.c
@@ -20,10 +20,6 @@
#include <dirent.h>
#include <signal.h>
-#ifdef _POSIX_MEMLOCK
-# include <sys/mman.h>
-#endif
-
static pid_t sbd_pid = 0;
static void
@@ -56,6 +52,7 @@ panic_local(void)
int rc = pcmk_ok;
uid_t uid = geteuid();
pid_t ppid = getppid();
+ const char *panic_action = pcmk__env_option(PCMK__ENV_PANIC_ACTION);
if(uid != 0 && ppid > 1) {
/* We're a non-root pacemaker daemon (pacemaker-based,
@@ -93,13 +90,15 @@ panic_local(void)
/* We're either pacemakerd, or a pacemaker daemon running as root */
- if (pcmk__str_eq("crash", getenv("PCMK_panic_action"), pcmk__str_casei)) {
+ if (pcmk__str_eq(panic_action, "crash", pcmk__str_casei)) {
sysrq_trigger('c');
- } else if (pcmk__str_eq("sync-crash", getenv("PCMK_panic_action"), pcmk__str_casei)) {
+
+ } else if (pcmk__str_eq(panic_action, "sync-crash", pcmk__str_casei)) {
sync();
sysrq_trigger('c');
+
} else {
- if (pcmk__str_eq("sync-reboot", getenv("PCMK_panic_action"), pcmk__str_casei)) {
+ if (pcmk__str_eq(panic_action, "sync-reboot", pcmk__str_casei)) {
sync();
}
sysrq_trigger('b');
diff --git a/lib/common/xml.c b/lib/common/xml.c
index 22078ce..53ebff7 100644
--- a/lib/common/xml.c
+++ b/lib/common/xml.c
@@ -42,7 +42,8 @@
* parsing without XML_PARSE_RECOVER, and if that fails, try parsing again with
* it, logging a warning if it succeeds.
*/
-#define PCMK__XML_PARSE_OPTS (XML_PARSE_NOBLANKS | XML_PARSE_RECOVER)
+#define PCMK__XML_PARSE_OPTS_WITHOUT_RECOVER (XML_PARSE_NOBLANKS)
+#define PCMK__XML_PARSE_OPTS_WITH_RECOVER (XML_PARSE_NOBLANKS | XML_PARSE_RECOVER)
bool
pcmk__tracking_xml_changes(xmlNode *xml, bool lazy)
@@ -85,8 +86,8 @@ pcmk__set_xml_doc_flag(xmlNode *xml, enum xml_private_flags flag)
}
// Mark document, element, and all element's parents as changed
-static inline void
-mark_xml_node_dirty(xmlNode *xml)
+void
+pcmk__mark_xml_node_dirty(xmlNode *xml)
{
pcmk__set_xml_doc_flag(xml, pcmk__xf_dirty);
set_parent_flag(xml, pcmk__xf_dirty);
@@ -114,12 +115,15 @@ void
pcmk__mark_xml_created(xmlNode *xml)
{
xmlNode *cIter = NULL;
- xml_node_private_t *nodepriv = xml->_private;
+ xml_node_private_t *nodepriv = NULL;
+
+ CRM_ASSERT(xml != NULL);
+ nodepriv = xml->_private;
if (nodepriv && pcmk__tracking_xml_changes(xml, FALSE)) {
if (!pcmk_is_set(nodepriv->flags, pcmk__xf_created)) {
pcmk__set_xml_flags(nodepriv, pcmk__xf_created);
- mark_xml_node_dirty(xml);
+ pcmk__mark_xml_node_dirty(xml);
}
for (cIter = pcmk__xml_first_child(xml); cIter != NULL;
cIter = pcmk__xml_next(cIter)) {
@@ -128,17 +132,6 @@ pcmk__mark_xml_created(xmlNode *xml)
}
}
-void
-pcmk__mark_xml_attr_dirty(xmlAttr *a)
-{
- xmlNode *parent = a->parent;
- xml_node_private_t *nodepriv = a->_private;
-
- pcmk__set_xml_flags(nodepriv, pcmk__xf_dirty|pcmk__xf_modified);
- pcmk__clear_xml_flags(nodepriv, pcmk__xf_deleted);
- mark_xml_node_dirty(parent);
-}
-
#define XML_DOC_PRIVATE_MAGIC 0x81726354UL
#define XML_NODE_PRIVATE_MAGIC 0x54637281UL
@@ -250,7 +243,7 @@ new_private_data(xmlNode *node)
/* XML_ELEMENT_NODE doesn't get picked up here, node->doc is
* not hooked up at the point we are called
*/
- mark_xml_node_dirty(node);
+ pcmk__mark_xml_node_dirty(node);
}
break;
}
@@ -321,19 +314,6 @@ pcmk__xml_position(const xmlNode *xml, enum xml_private_flags ignore_if_set)
return position;
}
-// This also clears attribute's flags if not marked as deleted
-static bool
-marked_as_deleted(xmlAttrPtr a, void *user_data)
-{
- xml_node_private_t *nodepriv = a->_private;
-
- if (pcmk_is_set(nodepriv->flags, pcmk__xf_deleted)) {
- return true;
- }
- nodepriv->flags = pcmk__xf_none;
- return false;
-}
-
// Remove all attributes marked as deleted from an XML node
static void
accept_attr_deletions(xmlNode *xml)
@@ -342,7 +322,7 @@ accept_attr_deletions(xmlNode *xml)
((xml_node_private_t *) xml->_private)->flags = pcmk__xf_none;
// Remove this XML node's attributes that were marked as deleted
- pcmk__xe_remove_matching_attrs(xml, marked_as_deleted, NULL);
+ pcmk__xe_remove_matching_attrs(xml, pcmk__marked_as_deleted, NULL);
// Recursively do the same for this XML node's children
for (xmlNodePtr cIter = pcmk__xml_first_child(xml); cIter != NULL;
@@ -371,7 +351,7 @@ pcmk__xml_match(const xmlNode *haystack, const xmlNode *needle, bool exact)
const char *id = ID(needle);
const char *attr = (id == NULL)? NULL : XML_ATTR_ID;
- return pcmk__xe_match(haystack, crm_element_name(needle), attr, id);
+ return pcmk__xe_match(haystack, (const char *) needle->name, attr, id);
}
}
@@ -404,11 +384,7 @@ xmlNode *
find_xml_node(const xmlNode *root, const char *search_path, gboolean must_find)
{
xmlNode *a_child = NULL;
- const char *name = "NULL";
-
- if (root != NULL) {
- name = crm_element_name(root);
- }
+ const char *name = (root == NULL)? "<NULL>" : (const char *) root->name;
if (search_path == NULL) {
crm_warn("Will never find <NULL>");
@@ -418,7 +394,6 @@ find_xml_node(const xmlNode *root, const char *search_path, gboolean must_find)
for (a_child = pcmk__xml_first_child(root); a_child != NULL;
a_child = pcmk__xml_next(a_child)) {
if (strcmp((const char *)a_child->name, search_path) == 0) {
-/* crm_trace("returning node (%s).", crm_element_name(a_child)); */
return a_child;
}
}
@@ -473,7 +448,7 @@ pcmk__xe_match(const xmlNode *parent, const char *node_name,
(attr_n? attr_n : ""),
(attr_n? "=" : ""),
(attr_n? attr_v : ""),
- crm_element_name(parent));
+ (const char *) parent->name);
return NULL;
}
@@ -643,31 +618,17 @@ pcmk__xe_remove_matching_attrs(xmlNode *element,
}
}
-xmlDoc *
-getDocPtr(xmlNode * node)
-{
- xmlDoc *doc = NULL;
-
- CRM_CHECK(node != NULL, return NULL);
-
- doc = node->doc;
- if (doc == NULL) {
- doc = xmlNewDoc((pcmkXmlStr) "1.0");
- xmlDocSetRootElement(doc, node);
- xmlSetTreeDoc(node, doc);
- }
- return doc;
-}
-
xmlNode *
add_node_copy(xmlNode * parent, xmlNode * src_node)
{
xmlNode *child = NULL;
- xmlDoc *doc = getDocPtr(parent);
- CRM_CHECK(src_node != NULL, return NULL);
+ CRM_CHECK((parent != NULL) && (src_node != NULL), return NULL);
- child = xmlDocCopyNode(src_node, doc, 1);
+ child = xmlDocCopyNode(src_node, parent->doc, 1);
+ if (child == NULL) {
+ return NULL;
+ }
xmlAddChild(parent, child);
pcmk__mark_xml_created(child);
return child;
@@ -686,13 +647,22 @@ create_xml_node(xmlNode * parent, const char *name)
if (parent == NULL) {
doc = xmlNewDoc((pcmkXmlStr) "1.0");
+ if (doc == NULL) {
+ return NULL;
+ }
+
node = xmlNewDocRawNode(doc, NULL, (pcmkXmlStr) name, NULL);
+ if (node == NULL) {
+ xmlFreeDoc(doc);
+ return NULL;
+ }
xmlDocSetRootElement(doc, node);
} else {
- doc = getDocPtr(parent);
- node = xmlNewDocRawNode(doc, NULL, (pcmkXmlStr) name, NULL);
- xmlAddChild(parent, node);
+ node = xmlNewChild(parent, NULL, (pcmkXmlStr) name, NULL);
+ if (node == NULL) {
+ return NULL;
+ }
}
pcmk__mark_xml_created(node);
return node;
@@ -823,7 +793,6 @@ copy_xml(xmlNode * src)
CRM_ASSERT(copy != NULL);
xmlDocSetRootElement(doc, copy);
- xmlSetTreeDoc(copy, doc);
return copy;
}
@@ -833,7 +802,7 @@ string2xml(const char *input)
xmlNode *xml = NULL;
xmlDocPtr output = NULL;
xmlParserCtxtPtr ctxt = NULL;
- xmlErrorPtr last_error = NULL;
+ const xmlError *last_error = NULL;
if (input == NULL) {
crm_err("Can't parse NULL input");
@@ -847,7 +816,17 @@ string2xml(const char *input)
xmlCtxtResetLastError(ctxt);
xmlSetGenericErrorFunc(ctxt, pcmk__log_xmllib_err);
output = xmlCtxtReadDoc(ctxt, (pcmkXmlStr) input, NULL, NULL,
- PCMK__XML_PARSE_OPTS);
+ PCMK__XML_PARSE_OPTS_WITHOUT_RECOVER);
+
+ if (output == NULL) {
+ output = xmlCtxtReadDoc(ctxt, (pcmkXmlStr) input, NULL, NULL,
+ PCMK__XML_PARSE_OPTS_WITH_RECOVER);
+ if (output) {
+ crm_warn("Successfully recovered from XML errors "
+ "(note: a future release will treat this as a fatal failure)");
+ }
+ }
+
if (output) {
xml = xmlDocGetRootElement(output);
}
@@ -933,9 +912,11 @@ decompress_file(const char *filename)
}
bz_file = BZ2_bzReadOpen(&rc, input, 0, 0, NULL, 0);
- if (rc != BZ_OK) {
+ rc = pcmk__bzlib2rc(rc);
+
+ if (rc != pcmk_rc_ok) {
crm_err("Could not prepare to read compressed %s: %s "
- CRM_XS " bzerror=%d", filename, bz2_strerror(rc), rc);
+ CRM_XS " rc=%d", filename, pcmk_rc_str(rc), rc);
BZ2_bzReadClose(&rc, bz_file);
fclose(input);
return NULL;
@@ -957,9 +938,11 @@ decompress_file(const char *filename)
buffer[length] = '\0';
- if (rc != BZ_STREAM_END) {
- crm_err("Could not read compressed %s: %s "
- CRM_XS " bzerror=%d", filename, bz2_strerror(rc), rc);
+ rc = pcmk__bzlib2rc(rc);
+
+ if (rc != pcmk_rc_ok) {
+ crm_err("Could not read compressed %s: %s " CRM_XS " rc=%d",
+ filename, pcmk_rc_str(rc), rc);
free(buffer);
buffer = NULL;
}
@@ -1010,7 +993,7 @@ filename2xml(const char *filename)
xmlDocPtr output = NULL;
bool uncompressed = true;
xmlParserCtxtPtr ctxt = NULL;
- xmlErrorPtr last_error = NULL;
+ const xmlError *last_error = NULL;
/* create a parser context */
ctxt = xmlNewParserCtxt();
@@ -1026,16 +1009,45 @@ filename2xml(const char *filename)
if (pcmk__str_eq(filename, "-", pcmk__str_null_matches)) {
/* STDIN_FILENO == fileno(stdin) */
output = xmlCtxtReadFd(ctxt, STDIN_FILENO, "unknown.xml", NULL,
- PCMK__XML_PARSE_OPTS);
+ PCMK__XML_PARSE_OPTS_WITHOUT_RECOVER);
+
+ if (output == NULL) {
+ output = xmlCtxtReadFd(ctxt, STDIN_FILENO, "unknown.xml", NULL,
+ PCMK__XML_PARSE_OPTS_WITH_RECOVER);
+ if (output) {
+ crm_warn("Successfully recovered from XML errors "
+ "(note: a future release will treat this as a fatal failure)");
+ }
+ }
} else if (uncompressed) {
- output = xmlCtxtReadFile(ctxt, filename, NULL, PCMK__XML_PARSE_OPTS);
+ output = xmlCtxtReadFile(ctxt, filename, NULL,
+ PCMK__XML_PARSE_OPTS_WITHOUT_RECOVER);
+
+ if (output == NULL) {
+ output = xmlCtxtReadFile(ctxt, filename, NULL,
+ PCMK__XML_PARSE_OPTS_WITH_RECOVER);
+ if (output) {
+ crm_warn("Successfully recovered from XML errors "
+ "(note: a future release will treat this as a fatal failure)");
+ }
+ }
} else {
char *input = decompress_file(filename);
output = xmlCtxtReadDoc(ctxt, (pcmkXmlStr) input, NULL, NULL,
- PCMK__XML_PARSE_OPTS);
+ PCMK__XML_PARSE_OPTS_WITHOUT_RECOVER);
+
+ if (output == NULL) {
+ output = xmlCtxtReadDoc(ctxt, (pcmkXmlStr) input, NULL, NULL,
+ PCMK__XML_PARSE_OPTS_WITH_RECOVER);
+ if (output) {
+ crm_warn("Successfully recovered from XML errors "
+ "(note: a future release will treat this as a fatal failure)");
+ }
+ }
+
free(input);
}
@@ -1134,7 +1146,7 @@ crm_xml_set_id(xmlNode *xml, const char *format, ...)
* \internal
* \brief Write XML to a file stream
*
- * \param[in] xml_node XML to write
+ * \param[in] xml XML to write
* \param[in] filename Name of file being written (for logging only)
* \param[in,out] stream Open file stream corresponding to filename
* \param[in] compress Whether to compress XML before writing
@@ -1143,18 +1155,18 @@ crm_xml_set_id(xmlNode *xml, const char *format, ...)
* \return Standard Pacemaker return code
*/
static int
-write_xml_stream(xmlNode *xml_node, const char *filename, FILE *stream,
+write_xml_stream(const xmlNode *xml, const char *filename, FILE *stream,
bool compress, unsigned int *nbytes)
{
int rc = pcmk_rc_ok;
char *buffer = NULL;
*nbytes = 0;
- crm_log_xml_trace(xml_node, "writing");
+ crm_log_xml_trace(xml, "writing");
- buffer = dump_xml_formatted(xml_node);
+ buffer = dump_xml_formatted(xml);
CRM_CHECK(buffer && strlen(buffer),
- crm_log_xml_warn(xml_node, "formatting failed");
+ crm_log_xml_warn(xml, "formatting failed");
rc = pcmk_rc_error;
goto bail);
@@ -1164,24 +1176,30 @@ write_xml_stream(xmlNode *xml_node, const char *filename, FILE *stream,
rc = BZ_OK;
bz_file = BZ2_bzWriteOpen(&rc, stream, 5, 0, 30);
- if (rc != BZ_OK) {
+ rc = pcmk__bzlib2rc(rc);
+
+ if (rc != pcmk_rc_ok) {
crm_warn("Not compressing %s: could not prepare file stream: %s "
- CRM_XS " bzerror=%d", filename, bz2_strerror(rc), rc);
+ CRM_XS " rc=%d", filename, pcmk_rc_str(rc), rc);
} else {
BZ2_bzWrite(&rc, bz_file, buffer, strlen(buffer));
- if (rc != BZ_OK) {
+ rc = pcmk__bzlib2rc(rc);
+
+ if (rc != pcmk_rc_ok) {
crm_warn("Not compressing %s: could not compress data: %s "
- CRM_XS " bzerror=%d errno=%d",
- filename, bz2_strerror(rc), rc, errno);
+ CRM_XS " rc=%d errno=%d",
+ filename, pcmk_rc_str(rc), rc, errno);
}
}
- if (rc == BZ_OK) {
+ if (rc == pcmk_rc_ok) {
BZ2_bzWriteClose(&rc, bz_file, 0, &in, nbytes);
- if (rc != BZ_OK) {
+ rc = pcmk__bzlib2rc(rc);
+
+ if (rc != pcmk_rc_ok) {
crm_warn("Not compressing %s: could not write compressed data: %s "
- CRM_XS " bzerror=%d errno=%d",
- filename, bz2_strerror(rc), rc, errno);
+ CRM_XS " rc=%d errno=%d",
+ filename, pcmk_rc_str(rc), rc, errno);
*nbytes = 0; // retry without compression
} else {
crm_trace("Compressed XML for %s from %u bytes to %u",
@@ -1226,7 +1244,7 @@ write_xml_stream(xmlNode *xml_node, const char *filename, FILE *stream,
/*!
* \brief Write XML to a file descriptor
*
- * \param[in] xml_node XML to write
+ * \param[in] xml XML to write
* \param[in] filename Name of file being written (for logging only)
* \param[in] fd Open file descriptor corresponding to filename
* \param[in] compress Whether to compress XML before writing
@@ -1234,18 +1252,19 @@ write_xml_stream(xmlNode *xml_node, const char *filename, FILE *stream,
* \return Number of bytes written on success, -errno otherwise
*/
int
-write_xml_fd(xmlNode * xml_node, const char *filename, int fd, gboolean compress)
+write_xml_fd(const xmlNode *xml, const char *filename, int fd,
+ gboolean compress)
{
FILE *stream = NULL;
unsigned int nbytes = 0;
int rc = pcmk_rc_ok;
- CRM_CHECK(xml_node && (fd > 0), return -EINVAL);
+ CRM_CHECK((xml != NULL) && (fd > 0), return -EINVAL);
stream = fdopen(fd, "w");
if (stream == NULL) {
return -errno;
}
- rc = write_xml_stream(xml_node, filename, stream, compress, &nbytes);
+ rc = write_xml_stream(xml, filename, stream, compress, &nbytes);
if (rc != pcmk_rc_ok) {
return pcmk_rc2legacy(rc);
}
@@ -1255,25 +1274,25 @@ write_xml_fd(xmlNode * xml_node, const char *filename, int fd, gboolean compress
/*!
* \brief Write XML to a file
*
- * \param[in] xml_node XML to write
+ * \param[in] xml XML to write
* \param[in] filename Name of file to write
* \param[in] compress Whether to compress XML before writing
*
* \return Number of bytes written on success, -errno otherwise
*/
int
-write_xml_file(xmlNode * xml_node, const char *filename, gboolean compress)
+write_xml_file(const xmlNode *xml, const char *filename, gboolean compress)
{
FILE *stream = NULL;
unsigned int nbytes = 0;
int rc = pcmk_rc_ok;
- CRM_CHECK(xml_node && filename, return -EINVAL);
+ CRM_CHECK((xml != NULL) && (filename != NULL), return -EINVAL);
stream = fopen(filename, "w");
if (stream == NULL) {
return -errno;
}
- rc = write_xml_stream(xml_node, filename, stream, compress, &nbytes);
+ rc = write_xml_stream(xml, filename, stream, compress, &nbytes);
if (rc != pcmk_rc_ok) {
return pcmk_rc2legacy(rc);
}
@@ -1382,37 +1401,6 @@ crm_xml_escape(const char *text)
/*!
* \internal
- * \brief Append an XML attribute to a buffer
- *
- * \param[in] attr Attribute to append
- * \param[in,out] buffer Where to append the content (must not be \p NULL)
- */
-static void
-dump_xml_attr(const xmlAttr *attr, GString *buffer)
-{
- char *p_value = NULL;
- const char *p_name = NULL;
- xml_node_private_t *nodepriv = NULL;
-
- if (attr == NULL || attr->children == NULL) {
- return;
- }
-
- nodepriv = attr->_private;
- if (nodepriv && pcmk_is_set(nodepriv->flags, pcmk__xf_deleted)) {
- return;
- }
-
- p_name = (const char *) attr->name;
- p_value = crm_xml_escape((const char *)attr->children->content);
- pcmk__g_strcat(buffer, " ", p_name, "=\"", pcmk__s(p_value, "<null>"), "\"",
- NULL);
-
- free(p_value);
-}
-
-/*!
- * \internal
* \brief Append a string representation of an XML element to a buffer
*
* \param[in] data XML whose representation to append
@@ -1424,24 +1412,21 @@ static void
dump_xml_element(const xmlNode *data, uint32_t options, GString *buffer,
int depth)
{
- const char *name = crm_element_name(data);
bool pretty = pcmk_is_set(options, pcmk__xml_fmt_pretty);
bool filtered = pcmk_is_set(options, pcmk__xml_fmt_filtered);
int spaces = pretty? (2 * depth) : 0;
- CRM_ASSERT(name != NULL);
-
for (int lpc = 0; lpc < spaces; lpc++) {
g_string_append_c(buffer, ' ');
}
- pcmk__g_strcat(buffer, "<", name, NULL);
+ pcmk__g_strcat(buffer, "<", data->name, NULL);
for (const xmlAttr *attr = pcmk__xe_first_attr(data); attr != NULL;
attr = attr->next) {
if (!filtered || !pcmk__xa_filterable((const char *) (attr->name))) {
- dump_xml_attr(attr, buffer);
+ pcmk__dump_xml_attr(attr, buffer);
}
}
@@ -1457,16 +1442,16 @@ dump_xml_element(const xmlNode *data, uint32_t options, GString *buffer,
}
if (data->children) {
- xmlNode *xChild = NULL;
- for(xChild = data->children; xChild != NULL; xChild = xChild->next) {
- pcmk__xml2text(xChild, options, buffer, depth + 1);
+ for (const xmlNode *child = data->children; child != NULL;
+ child = child->next) {
+ pcmk__xml2text(child, options, buffer, depth + 1);
}
for (int lpc = 0; lpc < spaces; lpc++) {
g_string_append_c(buffer, ' ');
}
- pcmk__g_strcat(buffer, "</", name, ">", NULL);
+ pcmk__g_strcat(buffer, "</", data->name, ">", NULL);
if (pretty) {
g_string_append_c(buffer, '\n');
@@ -1559,7 +1544,45 @@ dump_xml_comment(const xmlNode *data, uint32_t options, GString *buffer,
}
}
-#define PCMK__XMLDUMP_STATS 0
+/*!
+ * \internal
+ * \brief Get a string representation of an XML element type
+ *
+ * \param[in] type XML element type
+ *
+ * \return String representation of \p type
+ */
+static const char *
+xml_element_type2str(xmlElementType type)
+{
+ static const char *const element_type_names[] = {
+ [XML_ELEMENT_NODE] = "element",
+ [XML_ATTRIBUTE_NODE] = "attribute",
+ [XML_TEXT_NODE] = "text",
+ [XML_CDATA_SECTION_NODE] = "CDATA section",
+ [XML_ENTITY_REF_NODE] = "entity reference",
+ [XML_ENTITY_NODE] = "entity",
+ [XML_PI_NODE] = "PI",
+ [XML_COMMENT_NODE] = "comment",
+ [XML_DOCUMENT_NODE] = "document",
+ [XML_DOCUMENT_TYPE_NODE] = "document type",
+ [XML_DOCUMENT_FRAG_NODE] = "document fragment",
+ [XML_NOTATION_NODE] = "notation",
+ [XML_HTML_DOCUMENT_NODE] = "HTML document",
+ [XML_DTD_NODE] = "DTD",
+ [XML_ELEMENT_DECL] = "element declaration",
+ [XML_ATTRIBUTE_DECL] = "attribute declaration",
+ [XML_ENTITY_DECL] = "entity declaration",
+ [XML_NAMESPACE_DECL] = "namespace declaration",
+ [XML_XINCLUDE_START] = "XInclude start",
+ [XML_XINCLUDE_END] = "XInclude end",
+ };
+
+ if ((type < 0) || (type >= PCMK__NELEM(element_type_names))) {
+ return "unrecognized type";
+ }
+ return element_type_names[type];
+}
/*!
* \internal
@@ -1571,7 +1594,8 @@ dump_xml_comment(const xmlNode *data, uint32_t options, GString *buffer,
* \param[in] depth Current indentation level
*/
void
-pcmk__xml2text(xmlNodePtr data, uint32_t options, GString *buffer, int depth)
+pcmk__xml2text(const xmlNode *data, uint32_t options, GString *buffer,
+ int depth)
{
if (data == NULL) {
crm_trace("Nothing to dump");
@@ -1581,60 +1605,6 @@ pcmk__xml2text(xmlNodePtr data, uint32_t options, GString *buffer, int depth)
CRM_ASSERT(buffer != NULL);
CRM_CHECK(depth >= 0, depth = 0);
- if (pcmk_is_set(options, pcmk__xml_fmt_full)) {
- /* libxml's serialization reuse is a good idea, sadly we cannot
- apply it for the filtered cases (preceding filtering pass
- would preclude further reuse of such in-situ modified XML
- in generic context and is likely not a win performance-wise),
- and there's also a historically unstable throughput argument
- (likely stemming from memory allocation overhead, eventhough
- that shall be minimized with defaults preset in crm_xml_init) */
-#if (PCMK__XMLDUMP_STATS - 0)
- time_t next, new = time(NULL);
-#endif
- xmlDoc *doc;
- xmlOutputBuffer *xml_buffer;
-
- doc = getDocPtr(data);
- /* doc will only be NULL if data is */
- CRM_CHECK(doc != NULL, return);
-
- xml_buffer = xmlAllocOutputBuffer(NULL);
- CRM_ASSERT(xml_buffer != NULL);
-
- /* XXX we could setup custom allocation scheme for the particular
- buffer, but it's subsumed with crm_xml_init that needs to
- be invoked prior to entering this function as such, since
- its other branch vitally depends on it -- what can be done
- about this all is to have a facade parsing functions that
- would 100% mark entering libxml code for us, since we don't
- do anything as crazy as swapping out the binary form of the
- parsed tree (but those would need to be strictly used as
- opposed to libxml's raw functions) */
-
- xmlNodeDumpOutput(xml_buffer, doc, data, 0,
- pcmk_is_set(options, pcmk__xml_fmt_pretty), NULL);
- /* attempt adding final NL - failing shouldn't be fatal here */
- (void) xmlOutputBufferWrite(xml_buffer, sizeof("\n") - 1, "\n");
- if (xml_buffer->buffer != NULL) {
- g_string_append(buffer,
- (const gchar *) xmlBufContent(xml_buffer->buffer));
- }
-
-#if (PCMK__XMLDUMP_STATS - 0)
- next = time(NULL);
- if ((now + 1) < next) {
- crm_log_xml_trace(data, "Long time");
- crm_err("xmlNodeDumpOutput() -> %lld bytes took %ds",
- (long long) buffer->len, next - now);
- }
-#endif
-
- /* asserted allocation before so there should be something to remove */
- (void) xmlOutputBufferClose(xml_buffer);
- return;
- }
-
switch(data->type) {
case XML_ELEMENT_NODE:
/* Handle below */
@@ -1642,11 +1612,6 @@ pcmk__xml2text(xmlNodePtr data, uint32_t options, GString *buffer, int depth)
break;
case XML_TEXT_NODE:
if (pcmk_is_set(options, pcmk__xml_fmt_text)) {
- /* @COMPAT: Remove when log_data_element() is removed. There are
- * no other internal code paths that set pcmk__xml_fmt_text.
- * Keep an empty case handler so that we don't log an unhandled
- * type warning.
- */
dump_xml_text(data, options, buffer, depth);
}
break;
@@ -1657,39 +1622,23 @@ pcmk__xml2text(xmlNodePtr data, uint32_t options, GString *buffer, int depth)
dump_xml_cdata(data, options, buffer, depth);
break;
default:
- crm_warn("Unhandled type: %d", data->type);
+ crm_warn("Cannot convert XML %s node to text " CRM_XS " type=%d",
+ xml_element_type2str(data->type), data->type);
break;
-
- /*
- XML_ATTRIBUTE_NODE = 2
- XML_ENTITY_REF_NODE = 5
- XML_ENTITY_NODE = 6
- XML_PI_NODE = 7
- XML_DOCUMENT_NODE = 9
- XML_DOCUMENT_TYPE_NODE = 10
- XML_DOCUMENT_FRAG_NODE = 11
- XML_NOTATION_NODE = 12
- XML_HTML_DOCUMENT_NODE = 13
- XML_DTD_NODE = 14
- XML_ELEMENT_DECL = 15
- XML_ATTRIBUTE_DECL = 16
- XML_ENTITY_DECL = 17
- XML_NAMESPACE_DECL = 18
- XML_XINCLUDE_START = 19
- XML_XINCLUDE_END = 20
- XML_DOCB_DOCUMENT_NODE = 21
- */
}
}
char *
-dump_xml_formatted_with_text(xmlNode * an_xml_node)
+dump_xml_formatted_with_text(const xmlNode *xml)
{
+ /* libxml's xmlNodeDumpOutput() would work here since we're not specifically
+ * filtering out any nodes. However, use pcmk__xml2text() for consistency,
+ * to escape attribute values, and to allow a const argument.
+ */
char *buffer = NULL;
GString *g_buffer = g_string_sized_new(1024);
- pcmk__xml2text(an_xml_node, pcmk__xml_fmt_pretty|pcmk__xml_fmt_full,
- g_buffer, 0);
+ pcmk__xml2text(xml, pcmk__xml_fmt_pretty|pcmk__xml_fmt_text, g_buffer, 0);
pcmk__str_update(&buffer, g_buffer->str);
g_string_free(g_buffer, TRUE);
@@ -1697,12 +1646,12 @@ dump_xml_formatted_with_text(xmlNode * an_xml_node)
}
char *
-dump_xml_formatted(xmlNode * an_xml_node)
+dump_xml_formatted(const xmlNode *xml)
{
char *buffer = NULL;
GString *g_buffer = g_string_sized_new(1024);
- pcmk__xml2text(an_xml_node, pcmk__xml_fmt_pretty, g_buffer, 0);
+ pcmk__xml2text(xml, pcmk__xml_fmt_pretty, g_buffer, 0);
pcmk__str_update(&buffer, g_buffer->str);
g_string_free(g_buffer, TRUE);
@@ -1710,30 +1659,46 @@ dump_xml_formatted(xmlNode * an_xml_node)
}
char *
-dump_xml_unformatted(xmlNode * an_xml_node)
+dump_xml_unformatted(const xmlNode *xml)
{
char *buffer = NULL;
GString *g_buffer = g_string_sized_new(1024);
- pcmk__xml2text(an_xml_node, 0, g_buffer, 0);
+ pcmk__xml2text(xml, 0, g_buffer, 0);
pcmk__str_update(&buffer, g_buffer->str);
g_string_free(g_buffer, TRUE);
return buffer;
}
-gboolean
-xml_has_children(const xmlNode * xml_root)
+int
+pcmk__xml2fd(int fd, xmlNode *cur)
{
- if (xml_root != NULL && xml_root->children != NULL) {
- return TRUE;
+ bool success;
+
+ xmlOutputBuffer *fd_out = xmlOutputBufferCreateFd(fd, NULL);
+ CRM_ASSERT(fd_out != NULL);
+ xmlNodeDumpOutput(fd_out, cur->doc, cur, 0, pcmk__xml_fmt_pretty, NULL);
+
+ success = xmlOutputBufferWrite(fd_out, sizeof("\n") - 1, "\n") != -1;
+
+ success = xmlOutputBufferClose(fd_out) != -1 && success;
+
+ if (!success) {
+ return EIO;
}
- return FALSE;
+
+ fsync(fd);
+ return pcmk_rc_ok;
}
void
xml_remove_prop(xmlNode * obj, const char *name)
{
+ if (crm_element_value(obj, name) == NULL) {
+ return;
+ }
+
if (pcmk__check_acl(obj, NULL, pcmk__xf_acl_write) == FALSE) {
crm_trace("Cannot remove %s from %s", name, obj->name);
@@ -1750,7 +1715,7 @@ xml_remove_prop(xmlNode * obj, const char *name)
}
void
-save_xml_to_file(xmlNode * xml, const char *desc, const char *filename)
+save_xml_to_file(const xmlNode *xml, const char *desc, const char *filename)
{
char *f = NULL;
@@ -1864,7 +1829,7 @@ mark_attr_moved(xmlNode *new_xml, const char *element, xmlAttr *old_attr,
old_attr->name, p_old, p_new, element);
// Mark document, element, and all element's parents as changed
- mark_xml_node_dirty(new_xml);
+ pcmk__mark_xml_node_dirty(new_xml);
// Mark attribute as changed
pcmk__set_xml_flags(nodepriv, pcmk__xf_dirty|pcmk__xf_moved);
@@ -1886,10 +1851,10 @@ xml_diff_old_attrs(xmlNode *old_xml, xmlNode *new_xml)
xmlAttr *attr_iter = pcmk__xe_first_attr(old_xml);
while (attr_iter != NULL) {
+ const char *name = (const char *) attr_iter->name;
xmlAttr *old_attr = attr_iter;
xmlAttr *new_attr = xmlHasProp(new_xml, attr_iter->name);
- const char *name = (const char *) attr_iter->name;
- const char *old_value = crm_element_value(old_xml, name);
+ const char *old_value = pcmk__xml_attr_value(attr_iter);
attr_iter = attr_iter->next;
if (new_attr == NULL) {
@@ -1943,7 +1908,7 @@ mark_created_attrs(xmlNode *new_xml)
const char *attr_name = (const char *) new_attr->name;
crm_trace("Created new attribute %s=%s in %s",
- attr_name, crm_element_value(new_xml, attr_name),
+ attr_name, pcmk__xml_attr_value(new_attr),
new_xml->name);
/* Check ACLs (we can't use the remove-then-create trick because it
@@ -2017,7 +1982,7 @@ mark_child_moved(xmlNode *old_child, xmlNode *new_parent, xmlNode *new_child,
crm_trace("Child element %s with id='%s' moved from position %d to %d under %s",
new_child->name, (ID(new_child)? ID(new_child) : "<no id>"),
p_old, p_new, new_parent->name);
- mark_xml_node_dirty(new_parent);
+ pcmk__mark_xml_node_dirty(new_parent);
pcmk__set_xml_flags(nodepriv, pcmk__xf_moved);
if (p_old > p_new) {
@@ -2102,9 +2067,10 @@ xml_calculate_significant_changes(xmlNode *old_xml, xmlNode *new_xml)
void
xml_calculate_changes(xmlNode *old_xml, xmlNode *new_xml)
{
- CRM_CHECK(pcmk__str_eq(crm_element_name(old_xml), crm_element_name(new_xml), pcmk__str_casei),
+ CRM_CHECK((old_xml != NULL) && (new_xml != NULL)
+ && pcmk__xe_is(old_xml, (const char *) new_xml->name)
+ && pcmk__str_eq(ID(old_xml), ID(new_xml), pcmk__str_none),
return);
- CRM_CHECK(pcmk__str_eq(ID(old_xml), ID(new_xml), pcmk__str_casei), return);
if(xml_tracking_changes(new_xml) == FALSE) {
xml_track_changes(new_xml, NULL, NULL, FALSE);
@@ -2118,10 +2084,13 @@ can_prune_leaf(xmlNode * xml_node)
{
xmlNode *cIter = NULL;
gboolean can_prune = TRUE;
- const char *name = crm_element_name(xml_node);
- if (pcmk__strcase_any_of(name, XML_TAG_RESOURCE_REF, XML_CIB_TAG_OBJ_REF,
- XML_ACL_TAG_ROLE_REF, XML_ACL_TAG_ROLE_REFv1, NULL)) {
+ CRM_CHECK(xml_node != NULL, return FALSE);
+
+ if (pcmk__strcase_any_of((const char *) xml_node->name,
+ XML_TAG_RESOURCE_REF, XML_CIB_TAG_OBJ_REF,
+ XML_ACL_TAG_ROLE_REF, XML_ACL_TAG_ROLE_REFv1,
+ NULL)) {
return FALSE;
}
@@ -2257,7 +2226,7 @@ pcmk__xml_update(xmlNode *parent, xmlNode *target, xmlNode *update,
return;
}
- object_name = crm_element_name(update);
+ object_name = (const char *) update->name;
object_href_val = ID(update);
if (object_href_val != NULL) {
object_href = XML_ATTR_ID;
@@ -2294,9 +2263,7 @@ pcmk__xml_update(xmlNode *parent, xmlNode *target, xmlNode *update,
#endif
}
- CRM_CHECK(pcmk__str_eq(crm_element_name(target), crm_element_name(update),
- pcmk__str_casei),
- return);
+ CRM_CHECK(pcmk__xe_is(target, (const char *) update->name), return);
if (as_diff == FALSE) {
/* So that expand_plus_plus() gets called */
@@ -2345,7 +2312,7 @@ update_xml_child(xmlNode * child, xmlNode * to_update)
CRM_CHECK(child != NULL, return FALSE);
CRM_CHECK(to_update != NULL, return FALSE);
- if (!pcmk__str_eq(crm_element_name(to_update), crm_element_name(child), pcmk__str_none)) {
+ if (!pcmk__xe_is(to_update, (const char *) child->name)) {
can_update = FALSE;
} else if (!pcmk__str_eq(ID(to_update), ID(child), pcmk__str_none)) {
@@ -2379,7 +2346,7 @@ find_xml_children(xmlNode ** children, xmlNode * root,
CRM_CHECK(root != NULL, return FALSE);
CRM_CHECK(children != NULL, return FALSE);
- if (tag != NULL && !pcmk__str_eq(tag, crm_element_name(root), pcmk__str_casei)) {
+ if ((tag != NULL) && !pcmk__xe_is(root, tag)) {
} else if (value != NULL && !pcmk__str_eq(value, crm_element_value(root, field), pcmk__str_casei)) {
@@ -2422,7 +2389,7 @@ replace_xml_child(xmlNode * parent, xmlNode * child, xmlNode * update, gboolean
if (up_id == NULL || (child_id && strcmp(child_id, up_id) == 0)) {
can_delete = TRUE;
}
- if (!pcmk__str_eq(crm_element_name(update), crm_element_name(child), pcmk__str_casei)) {
+ if (!pcmk__xe_is(update, (const char *) child->name)) {
can_delete = FALSE;
}
if (can_delete && delete_only) {
@@ -2444,23 +2411,23 @@ replace_xml_child(xmlNode * parent, xmlNode * child, xmlNode * update, gboolean
free_xml(child);
} else {
- xmlNode *tmp = copy_xml(update);
- xmlDoc *doc = tmp->doc;
- xmlNode *old = NULL;
+ xmlNode *old = child;
+ xmlNode *new = xmlCopyNode(update, 1);
- xml_accept_changes(tmp);
- old = xmlReplaceNode(child, tmp);
+ CRM_ASSERT(new != NULL);
- if(xml_tracking_changes(tmp)) {
- /* Replaced sections may have included relevant ACLs */
- pcmk__apply_acl(tmp);
- }
+ // May be unnecessary but avoids slight changes to some test outputs
+ reset_xml_node_flags(new);
- xml_calculate_changes(old, tmp);
- xmlDocSetRootElement(doc, old);
- free_xml(old);
+ old = xmlReplaceNode(old, new);
+
+ if (xml_tracking_changes(new)) {
+ // Replaced sections may have included relevant ACLs
+ pcmk__apply_acl(new);
+ }
+ xml_calculate_changes(old, new);
+ xmlFreeNode(old);
}
- child = NULL;
return TRUE;
} else if (can_delete) {
@@ -2491,14 +2458,10 @@ sorted_xml(xmlNode *input, xmlNode *parent, gboolean recursive)
xmlNode *child = NULL;
GSList *nvpairs = NULL;
xmlNode *result = NULL;
- const char *name = NULL;
CRM_CHECK(input != NULL, return NULL);
- name = crm_element_name(input);
- CRM_CHECK(name != NULL, return NULL);
-
- result = create_xml_node(parent, name);
+ result = create_xml_node(parent, (const char *) input->name);
nvpairs = pcmk_xml_attrs2nvpairs(input);
nvpairs = pcmk_sort_nvpairs(nvpairs);
pcmk_nvpairs2xml_attrs(nvpairs, result);
@@ -2547,10 +2510,9 @@ xmlNode *
crm_next_same_xml(const xmlNode *sibling)
{
xmlNode *match = pcmk__xe_next(sibling);
- const char *name = crm_element_name(sibling);
while (match != NULL) {
- if (!strcmp(crm_element_name(match), name)) {
+ if (pcmk__xe_is(match, (const char *) sibling->name)) {
return match;
}
match = pcmk__xe_next(match);
@@ -2592,7 +2554,6 @@ crm_xml_cleanup(void)
xmlNode *
expand_idref(xmlNode * input, xmlNode * top)
{
- const char *tag = NULL;
const char *ref = NULL;
xmlNode *result = input;
@@ -2603,12 +2564,10 @@ expand_idref(xmlNode * input, xmlNode * top)
top = input;
}
- tag = crm_element_name(result);
ref = crm_element_value(result, XML_ATTR_IDREF);
-
if (ref != NULL) {
char *xpath_string = crm_strdup_printf("//%s[@" XML_ATTR_ID "='%s']",
- tag, ref);
+ result->name, ref);
result = get_xpath_object(xpath_string, top, LOG_ERR);
if (result == NULL) {
@@ -2630,7 +2589,7 @@ pcmk__xml_artefact_root(enum pcmk__xml_artefact_ns ns)
char *ret = NULL;
if (base == NULL) {
- base = getenv("PCMK_schema_directory");
+ base = pcmk__env_option(PCMK__ENV_SCHEMA_DIRECTORY);
}
if (pcmk__str_empty(base)) {
base = CRM_SCHEMA_DIRECTORY;
@@ -2741,6 +2700,21 @@ crm_destroy_xml(gpointer data)
free_xml(data);
}
+xmlDoc *
+getDocPtr(xmlNode *node)
+{
+ xmlDoc *doc = NULL;
+
+ CRM_CHECK(node != NULL, return NULL);
+
+ doc = node->doc;
+ if (doc == NULL) {
+ doc = xmlNewDoc((pcmkXmlStr) "1.0");
+ xmlDocSetRootElement(doc, node);
+ }
+ return doc;
+}
+
int
add_node_nocopy(xmlNode *parent, const char *name, xmlNode *child)
{
@@ -2749,5 +2723,14 @@ add_node_nocopy(xmlNode *parent, const char *name, xmlNode *child)
return 1;
}
+gboolean
+xml_has_children(const xmlNode * xml_root)
+{
+ if (xml_root != NULL && xml_root->children != NULL) {
+ return TRUE;
+ }
+ return FALSE;
+}
+
// LCOV_EXCL_STOP
// End deprecated API
diff --git a/lib/common/xml_attr.c b/lib/common/xml_attr.c
new file mode 100644
index 0000000..427d267
--- /dev/null
+++ b/lib/common/xml_attr.c
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2004-2023 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU Lesser General Public License
+ * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
+ */
+
+#include <crm_internal.h>
+
+#include <stdio.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <time.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <bzlib.h>
+
+#include <libxml/parser.h>
+#include <libxml/tree.h>
+#include <libxml/xmlIO.h> /* xmlAllocOutputBuffer */
+
+#include <crm/crm.h>
+#include <crm/msg_xml.h>
+#include <crm/common/xml.h>
+#include <crm/common/xml_internal.h> // PCMK__XML_LOG_BASE, etc.
+#include "crmcommon_private.h"
+
+void
+pcmk__mark_xml_attr_dirty(xmlAttr *a)
+{
+ xmlNode *parent = a->parent;
+ xml_node_private_t *nodepriv = a->_private;
+
+ pcmk__set_xml_flags(nodepriv, pcmk__xf_dirty|pcmk__xf_modified);
+ pcmk__clear_xml_flags(nodepriv, pcmk__xf_deleted);
+ pcmk__mark_xml_node_dirty(parent);
+}
+
+// This also clears attribute's flags if not marked as deleted
+bool
+pcmk__marked_as_deleted(xmlAttrPtr a, void *user_data)
+{
+ xml_node_private_t *nodepriv = a->_private;
+
+ if (pcmk_is_set(nodepriv->flags, pcmk__xf_deleted)) {
+ return true;
+ }
+ nodepriv->flags = pcmk__xf_none;
+ return false;
+}
+
+/*!
+ * \internal
+ * \brief Append an XML attribute to a buffer
+ *
+ * \param[in] attr Attribute to append
+ * \param[in,out] buffer Where to append the content (must not be \p NULL)
+ */
+void
+pcmk__dump_xml_attr(const xmlAttr *attr, GString *buffer)
+{
+ char *p_value = NULL;
+ const char *p_name = NULL;
+ xml_node_private_t *nodepriv = NULL;
+
+ if (attr == NULL || attr->children == NULL) {
+ return;
+ }
+
+ nodepriv = attr->_private;
+ if (nodepriv && pcmk_is_set(nodepriv->flags, pcmk__xf_deleted)) {
+ return;
+ }
+
+ p_name = (const char *) attr->name;
+ p_value = crm_xml_escape((const char *)attr->children->content);
+ pcmk__g_strcat(buffer, " ", p_name, "=\"", pcmk__s(p_value, "<null>"), "\"",
+ NULL);
+
+ free(p_value);
+} \ No newline at end of file
diff --git a/lib/common/xml_display.c b/lib/common/xml_display.c
index e2d46ce..18cd3b9 100644
--- a/lib/common/xml_display.c
+++ b/lib/common/xml_display.c
@@ -92,7 +92,6 @@ static int
show_xml_element(pcmk__output_t *out, GString *buffer, const char *prefix,
const xmlNode *data, int depth, uint32_t options)
{
- const char *name = crm_element_name(data);
int spaces = pcmk_is_set(options, pcmk__xml_fmt_pretty)? (2 * depth) : 0;
int rc = pcmk_rc_no_output;
@@ -104,7 +103,7 @@ show_xml_element(pcmk__output_t *out, GString *buffer, const char *prefix,
for (int lpc = 0; lpc < spaces; lpc++) {
g_string_append_c(buffer, ' ');
}
- pcmk__g_strcat(buffer, "<", name, NULL);
+ pcmk__g_strcat(buffer, "<", data->name, NULL);
for (const xmlAttr *attr = pcmk__xe_first_attr(data); attr != NULL;
attr = attr->next) {
@@ -138,7 +137,7 @@ show_xml_element(pcmk__output_t *out, GString *buffer, const char *prefix,
free(p_copy);
}
- if (xml_has_children(data)
+ if ((data->children != NULL)
&& pcmk_is_set(options, pcmk__xml_fmt_children)) {
g_string_append_c(buffer, '>');
@@ -151,7 +150,7 @@ show_xml_element(pcmk__output_t *out, GString *buffer, const char *prefix,
buffer->str);
}
- if (!xml_has_children(data)) {
+ if (data->children == NULL) {
return rc;
}
@@ -171,7 +170,7 @@ show_xml_element(pcmk__output_t *out, GString *buffer, const char *prefix,
int temp_rc = out->info(out, "%s%s%*s</%s>",
pcmk__s(prefix, ""),
pcmk__str_empty(prefix)? "" : " ",
- spaces, "", name);
+ spaces, "", data->name);
rc = pcmk__output_select_rc(rc, temp_rc);
}
@@ -304,14 +303,14 @@ show_xml_changes_recursive(pcmk__output_t *out, const xmlNode *data, int depth,
nodepriv = attr->_private;
if (pcmk_is_set(nodepriv->flags, pcmk__xf_deleted)) {
- const char *value = crm_element_value(data, name);
+ const char *value = pcmk__xml_attr_value(attr);
temp_rc = out->info(out, "%s %*s @%s=%s",
PCMK__XML_PREFIX_DELETED, spaces, "", name,
value);
} else if (pcmk_is_set(nodepriv->flags, pcmk__xf_dirty)) {
- const char *value = crm_element_value(data, name);
+ const char *value = pcmk__xml_attr_value(attr);
if (pcmk_is_set(nodepriv->flags, pcmk__xf_created)) {
prefix = PCMK__XML_PREFIX_CREATED;
@@ -447,9 +446,6 @@ log_data_element(int log_level, const char *file, const char *function,
if (pcmk_is_set(legacy_options, xml_log_option_formatted)) {
options |= pcmk__xml_fmt_pretty;
}
- if (pcmk_is_set(legacy_options, xml_log_option_full_fledged)) {
- options |= pcmk__xml_fmt_full;
- }
if (pcmk_is_set(legacy_options, xml_log_option_open)) {
options |= pcmk__xml_fmt_open;
}
@@ -480,7 +476,7 @@ log_data_element(int log_level, const char *file, const char *function,
}
if (pcmk_is_set(options, pcmk__xml_fmt_pretty)
- && (!xml_has_children(data)
+ && ((data->children == NULL)
|| (crm_element_value(data, XML_DIFF_MARKER) != NULL))) {
if (pcmk_is_set(options, pcmk__xml_fmt_diff_plus)) {
diff --git a/lib/common/xpath.c b/lib/common/xpath.c
index 1f5c0a8..d90f1c5 100644
--- a/lib/common/xpath.c
+++ b/lib/common/xpath.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2004-2022 the Pacemaker project contributors
+ * Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -136,9 +136,8 @@ dedupXpathResults(xmlXPathObjectPtr xpathObj)
/* the caller needs to check if the result contains a xmlDocPtr or xmlNodePtr */
xmlXPathObjectPtr
-xpath_search(xmlNode * xml_top, const char *path)
+xpath_search(const xmlNode *xml_top, const char *path)
{
- xmlDocPtr doc = NULL;
xmlXPathObjectPtr xpathObj = NULL;
xmlXPathContextPtr xpathCtx = NULL;
const xmlChar *xpathExpr = (pcmkXmlStr) path;
@@ -147,9 +146,7 @@ xpath_search(xmlNode * xml_top, const char *path)
CRM_CHECK(xml_top != NULL, return NULL);
CRM_CHECK(strlen(path) > 0, return NULL);
- doc = getDocPtr(xml_top);
-
- xpathCtx = xmlXPathNewContext(doc);
+ xpathCtx = xmlXPathNewContext(xml_top->doc);
CRM_ASSERT(xpathCtx != NULL);
xpathObj = xmlXPathEvalExpression(xpathExpr, xpathCtx);
@@ -298,9 +295,9 @@ pcmk__element_xpath(const xmlNode *xml)
if (parent == NULL) {
g_string_append_c(xpath, '/');
} else if (parent->parent == NULL) {
- g_string_append(xpath, TYPE(xml));
+ g_string_append(xpath, (const gchar *) xml->name);
} else {
- pcmk__g_strcat(xpath, "/", TYPE(xml), NULL);
+ pcmk__g_strcat(xpath, "/", (const char *) xml->name, NULL);
}
id = ID(xml);
diff --git a/lib/fencing/Makefile.am b/lib/fencing/Makefile.am
index a72b7d6..5302035 100644
--- a/lib/fencing/Makefile.am
+++ b/lib/fencing/Makefile.am
@@ -14,15 +14,19 @@ noinst_HEADERS = fencing_private.h
lib_LTLIBRARIES = libstonithd.la
-libstonithd_la_LDFLAGS = -version-info 34:3:8
+libstonithd_la_LDFLAGS = -version-info 34:4:8
libstonithd_la_CFLAGS = $(CFLAGS_HARDENED_LIB)
libstonithd_la_LDFLAGS += $(LDFLAGS_HARDENED_LIB)
-libstonithd_la_LIBADD = $(top_builddir)/lib/common/libcrmcommon.la
-libstonithd_la_LIBADD += $(top_builddir)/lib/services/libcrmservice.la
+libstonithd_la_LIBADD = $(top_builddir)/lib/services/libcrmservice.la
+libstonithd_la_LIBADD += $(top_builddir)/lib/common/libcrmcommon.la
-libstonithd_la_SOURCES = st_actions.c st_client.c st_output.c st_rhcs.c
+## Library sources (*must* use += format for bumplibs)
+libstonithd_la_SOURCES = st_actions.c
+libstonithd_la_SOURCES += st_client.c
if BUILD_LHA_SUPPORT
libstonithd_la_SOURCES += st_lha.c
endif
+libstonithd_la_SOURCES += st_output.c
+libstonithd_la_SOURCES += st_rhcs.c
diff --git a/lib/fencing/st_client.c b/lib/fencing/st_client.c
index e2783d5..1d32cc1 100644
--- a/lib/fencing/st_client.c
+++ b/lib/fencing/st_client.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2004-2022 the Pacemaker project contributors
+ * Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -515,7 +515,7 @@ stonith_api_device_metadata(stonith_t *stonith, int call_options,
enum stonith_namespace ns = stonith_get_namespace(agent, namespace);
if (timeout_sec <= 0) {
- timeout_sec = CRMD_METADATA_CALL_TIMEOUT;
+ timeout_sec = PCMK_DEFAULT_METADATA_TIMEOUT_MS;
}
crm_trace("Looking up metadata for %s agent %s",
@@ -553,7 +553,7 @@ stonith_api_query(stonith_t * stonith, int call_options, const char *target,
data = create_xml_node(NULL, F_STONITH_DEVICE);
crm_xml_add(data, F_STONITH_ORIGIN, __func__);
crm_xml_add(data, F_STONITH_TARGET, target);
- crm_xml_add(data, F_STONITH_ACTION, "off");
+ crm_xml_add(data, F_STONITH_ACTION, PCMK_ACTION_OFF);
rc = stonith_send_command(stonith, STONITH_OP_QUERY, data, &output, call_options, timeout);
if (rc < 0) {
@@ -625,7 +625,8 @@ stonith_api_list(stonith_t * stonith, int call_options, const char *id, char **l
int rc;
xmlNode *output = NULL;
- rc = stonith_api_call(stonith, call_options, id, "list", NULL, timeout, &output);
+ rc = stonith_api_call(stonith, call_options, id, PCMK_ACTION_LIST, NULL,
+ timeout, &output);
if (output && list_info) {
const char *list_str;
@@ -647,14 +648,16 @@ stonith_api_list(stonith_t * stonith, int call_options, const char *id, char **l
static int
stonith_api_monitor(stonith_t * stonith, int call_options, const char *id, int timeout)
{
- return stonith_api_call(stonith, call_options, id, "monitor", NULL, timeout, NULL);
+ return stonith_api_call(stonith, call_options, id, PCMK_ACTION_MONITOR,
+ NULL, timeout, NULL);
}
static int
stonith_api_status(stonith_t * stonith, int call_options, const char *id, const char *port,
int timeout)
{
- return stonith_api_call(stonith, call_options, id, "status", port, timeout, NULL);
+ return stonith_api_call(stonith, call_options, id, PCMK_ACTION_STATUS, port,
+ timeout, NULL);
}
static int
@@ -689,7 +692,8 @@ static int
stonith_api_confirm(stonith_t * stonith, int call_options, const char *target)
{
stonith__set_call_options(call_options, target, st_opt_manual_ack);
- return stonith_api_fence(stonith, call_options, target, "off", 0, 0);
+ return stonith_api_fence(stonith, call_options, target, PCMK_ACTION_OFF, 0,
+ 0);
}
static int
@@ -1105,13 +1109,20 @@ stonith_api_signon(stonith_t * stonith, const char *name, int *stonith_fd)
if (stonith_fd) {
/* No mainloop */
native->ipc = crm_ipc_new("stonith-ng", 0);
-
- if (native->ipc && crm_ipc_connect(native->ipc)) {
- *stonith_fd = crm_ipc_get_fd(native->ipc);
- } else if (native->ipc) {
- crm_ipc_close(native->ipc);
- crm_ipc_destroy(native->ipc);
- native->ipc = NULL;
+ if (native->ipc != NULL) {
+ rc = pcmk__connect_generic_ipc(native->ipc);
+ if (rc == pcmk_rc_ok) {
+ rc = pcmk__ipc_fd(native->ipc, stonith_fd);
+ if (rc != pcmk_rc_ok) {
+ crm_debug("Couldn't get file descriptor for IPC: %s",
+ pcmk_rc_str(rc));
+ }
+ }
+ if (rc != pcmk_rc_ok) {
+ crm_ipc_close(native->ipc);
+ crm_ipc_destroy(native->ipc);
+ native->ipc = NULL;
+ }
}
} else {
@@ -1765,7 +1776,7 @@ stonith_api_validate(stonith_t *st, int call_options, const char *rsc_id,
}
if (timeout_sec <= 0) {
- timeout_sec = CRMD_METADATA_CALL_TIMEOUT; // Questionable
+ timeout_sec = PCMK_DEFAULT_METADATA_TIMEOUT_MS; // Questionable
}
switch (stonith_get_namespace(agent, namespace_s)) {
@@ -1961,7 +1972,7 @@ stonith_api_kick(uint32_t nodeid, const char *uname, int timeout, bool off)
{
int rc = pcmk_ok;
stonith_t *st = stonith_api_new();
- const char *action = off? "off" : "reboot";
+ const char *action = off? PCMK_ACTION_OFF : PCMK_ACTION_REBOOT;
api_log_open();
if (st == NULL) {
@@ -2098,9 +2109,9 @@ stonith_action_str(const char *action)
{
if (action == NULL) {
return "fencing";
- } else if (!strcmp(action, "on")) {
+ } else if (strcmp(action, PCMK_ACTION_ON) == 0) {
return "unfencing";
- } else if (!strcmp(action, "off")) {
+ } else if (strcmp(action, PCMK_ACTION_OFF) == 0) {
return "turning off";
} else {
return action;
@@ -2160,7 +2171,8 @@ parse_list_line(const char *line, int len, GList **output)
line + entry_start, entry_start, i);
free(entry);
- } else if (pcmk__strcase_any_of(entry, "on", "off", NULL)) {
+ } else if (pcmk__strcase_any_of(entry, PCMK_ACTION_ON,
+ PCMK_ACTION_OFF, NULL)) {
/* Some agents print the target status in the list output,
* though none are known now (the separate list-status command
* is used for this, but it can also print "UNKNOWN"). To handle
diff --git a/lib/fencing/st_lha.c b/lib/fencing/st_lha.c
index d477ded..fd26217 100644
--- a/lib/fencing/st_lha.c
+++ b/lib/fencing/st_lha.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2004-2022 the Pacemaker project contributors
+ * Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -41,10 +41,10 @@ static const char META_TEMPLATE[] =
" <shortdesc lang=\"en\">%s</shortdesc>\n"
"%s\n"
" <actions>\n"
- " <action name=\"start\" timeout=\"20\" />\n"
+ " <action name=\"start\" timeout=\"%s\" />\n"
" <action name=\"stop\" timeout=\"15\" />\n"
- " <action name=\"status\" timeout=\"20\" />\n"
- " <action name=\"monitor\" timeout=\"20\" interval=\"3600\"/>\n"
+ " <action name=\"status\" timeout=\"%s\" />\n"
+ " <action name=\"monitor\" timeout=\"%s\" interval=\"3600\"/>\n"
" <action name=\"meta-data\" timeout=\"15\" />\n"
" </actions>\n"
" <special tag=\"heartbeat\">\n"
@@ -200,6 +200,7 @@ stonith__lha_metadata(const char *agent, int timeout, char **output)
char *meta_param = NULL;
char *meta_longdesc = NULL;
char *meta_shortdesc = NULL;
+ const char *timeout_str = NULL;
stonith_obj = (*st_new_fn) (agent);
if (stonith_obj) {
@@ -236,8 +237,10 @@ stonith__lha_metadata(const char *agent, int timeout, char **output)
xml_meta_shortdesc =
(char *)xmlEncodeEntitiesReentrant(NULL, (const unsigned char *)meta_shortdesc);
+ timeout_str = pcmk__readable_interval(PCMK_DEFAULT_ACTION_TIMEOUT_MS);
buffer = crm_strdup_printf(META_TEMPLATE, agent, xml_meta_longdesc,
- xml_meta_shortdesc, meta_param);
+ xml_meta_shortdesc, meta_param,
+ timeout_str, timeout_str, timeout_str);
xmlFree(xml_meta_longdesc);
xmlFree(xml_meta_shortdesc);
diff --git a/lib/fencing/st_rhcs.c b/lib/fencing/st_rhcs.c
index ec80793..854d333 100644
--- a/lib/fencing/st_rhcs.c
+++ b/lib/fencing/st_rhcs.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2004-2022 the Pacemaker project contributors
+ * Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -180,14 +180,17 @@ stonith__rhcs_get_metadata(const char *agent, int timeout_sec,
xpathObj = xpath_search(xml, "//action[@name='stop']");
if (numXpathResults(xpathObj) <= 0) {
xmlNode *tmp = NULL;
+ const char *timeout_str = NULL;
+
+ timeout_str = pcmk__readable_interval(PCMK_DEFAULT_ACTION_TIMEOUT_MS);
tmp = create_xml_node(actions, "action");
- crm_xml_add(tmp, "name", "stop");
- crm_xml_add(tmp, "timeout", CRM_DEFAULT_OP_TIMEOUT_S);
+ crm_xml_add(tmp, "name", PCMK_ACTION_STOP);
+ crm_xml_add(tmp, "timeout", timeout_str);
tmp = create_xml_node(actions, "action");
- crm_xml_add(tmp, "name", "start");
- crm_xml_add(tmp, "timeout", CRM_DEFAULT_OP_TIMEOUT_S);
+ crm_xml_add(tmp, "name", PCMK_ACTION_START);
+ crm_xml_add(tmp, "timeout", timeout_str);
}
freeXpathObject(xpathObj);
@@ -292,7 +295,7 @@ stonith__rhcs_validate(stonith_t *st, int call_options, const char *target,
host_arg = NULL;
}
- action = stonith__action_create(agent, "validate-all", target, 0,
+ action = stonith__action_create(agent, PCMK_ACTION_VALIDATE_ALL, target, 0,
remaining_timeout, params, NULL, host_arg);
rc = stonith__execute(action);
diff --git a/lib/lrmd/Makefile.am b/lib/lrmd/Makefile.am
index e9ac906..a9b9c67 100644
--- a/lib/lrmd/Makefile.am
+++ b/lib/lrmd/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2012-2020 the Pacemaker project contributors
+# Copyright 2012-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -10,12 +10,17 @@ include $(top_srcdir)/mk/common.mk
lib_LTLIBRARIES = liblrmd.la
-liblrmd_la_LDFLAGS = -version-info 29:6:1
+liblrmd_la_LDFLAGS = -version-info 30:0:2
liblrmd_la_CFLAGS = $(CFLAGS_HARDENED_LIB)
liblrmd_la_LDFLAGS += $(LDFLAGS_HARDENED_LIB)
-liblrmd_la_LIBADD = $(top_builddir)/lib/common/libcrmcommon.la \
- $(top_builddir)/lib/services/libcrmservice.la \
- $(top_builddir)/lib/fencing/libstonithd.la
-liblrmd_la_SOURCES = lrmd_client.c proxy_common.c lrmd_alerts.c lrmd_output.c
+liblrmd_la_LIBADD = $(top_builddir)/lib/fencing/libstonithd.la
+liblrmd_la_LIBADD += $(top_builddir)/lib/services/libcrmservice.la
+liblrmd_la_LIBADD += $(top_builddir)/lib/common/libcrmcommon.la
+
+## Library sources (*must* use += format for bumplibs)
+liblrmd_la_SOURCES = lrmd_alerts.c
+liblrmd_la_SOURCES += lrmd_client.c
+liblrmd_la_SOURCES += lrmd_output.c
+liblrmd_la_SOURCES += proxy_common.c
diff --git a/lib/lrmd/lrmd_alerts.c b/lib/lrmd/lrmd_alerts.c
index 588ff97..2a8c988 100644
--- a/lib/lrmd/lrmd_alerts.c
+++ b/lib/lrmd/lrmd_alerts.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2015-2022 the Pacemaker project contributors
+ * Copyright 2015-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -355,7 +355,7 @@ lrmd_send_resource_alert(lrmd_t *lrmd, const GList *alert_list,
target_rc = rsc_op_expected_rc(op);
if ((op->interval_ms == 0) && (target_rc == op->rc)
- && pcmk__str_eq(op->op_type, RSC_STATUS, pcmk__str_casei)) {
+ && pcmk__str_eq(op->op_type, PCMK_ACTION_MONITOR, pcmk__str_casei)) {
/* Don't send alerts for probes with the expected result. Leave it up to
* the agent whether to alert for 'failed' probes. (Even if we find a
diff --git a/lib/lrmd/lrmd_client.c b/lib/lrmd/lrmd_client.c
index c565728..400d3b0 100644
--- a/lib/lrmd/lrmd_client.c
+++ b/lib/lrmd/lrmd_client.c
@@ -544,7 +544,20 @@ lrmd_ipc_connection_destroy(gpointer userdata)
lrmd_t *lrmd = userdata;
lrmd_private_t *native = lrmd->lrmd_private;
- crm_info("IPC connection destroyed");
+ switch (native->type) {
+ case pcmk__client_ipc:
+ crm_info("Disconnected from local executor");
+ break;
+#ifdef HAVE_GNUTLS_GNUTLS_H
+ case pcmk__client_tls:
+ crm_info("Disconnected from remote executor on %s",
+ native->remote_nodename);
+ break;
+#endif
+ default:
+ crm_err("Unsupported executor connection type %d (bug?)",
+ native->type);
+ }
/* Prevent these from being cleaned up in lrmd_api_disconnect() */
native->ipc = NULL;
@@ -588,7 +601,9 @@ lrmd_tls_connection_destroy(gpointer userdata)
}
free(native->remote->buffer);
+ free(native->remote->start_state);
native->remote->buffer = NULL;
+ native->remote->start_state = NULL;
native->source = 0;
native->sock = 0;
native->psk_cred_c = NULL;
@@ -980,6 +995,7 @@ lrmd_handshake(lrmd_t * lrmd, const char *name)
const char *version = crm_element_value(reply, F_LRMD_PROTOCOL_VERSION);
const char *msg_type = crm_element_value(reply, F_LRMD_OPERATION);
const char *tmp_ticket = crm_element_value(reply, F_LRMD_CLIENTID);
+ const char *start_state = crm_element_value(reply, PCMK__XA_NODE_START_STATE);
long long uptime = -1;
crm_element_value_int(reply, F_LRMD_RC, &rc);
@@ -992,6 +1008,10 @@ lrmd_handshake(lrmd_t * lrmd, const char *name)
crm_element_value_ll(reply, PCMK__XA_UPTIME, &uptime);
native->remote->uptime = uptime;
+ if (start_state) {
+ native->remote->start_state = strdup(start_state);
+ }
+
if (rc == -EPROTO) {
crm_err("Executor protocol version mismatch between client (%s) and server (%s)",
LRMD_PROTOCOL_VERSION, version);
@@ -1038,11 +1058,15 @@ lrmd_ipc_connect(lrmd_t * lrmd, int *fd)
if (fd) {
/* No mainloop */
native->ipc = crm_ipc_new(CRM_SYSTEM_LRMD, 0);
- if (native->ipc && crm_ipc_connect(native->ipc)) {
- *fd = crm_ipc_get_fd(native->ipc);
- } else if (native->ipc) {
- crm_perror(LOG_ERR, "Connection to executor failed");
- rc = -ENOTCONN;
+ if (native->ipc != NULL) {
+ rc = pcmk__connect_generic_ipc(native->ipc);
+ if (rc == pcmk_rc_ok) {
+ rc = pcmk__ipc_fd(native->ipc, fd);
+ }
+ if (rc != pcmk_rc_ok) {
+ crm_err("Connection to executor failed: %s", pcmk_rc_str(rc));
+ rc = -ENOTCONN;
+ }
}
} else {
native->source = mainloop_add_ipc_client(CRM_SYSTEM_LRMD, G_PRIORITY_HIGH, 0, lrmd, &lrmd_callbacks);
@@ -1238,7 +1262,7 @@ lrmd__init_remote_key(gnutls_datum_t *key)
bool env_is_fallback = false;
if (need_env) {
- env_location = getenv("PCMK_authkey_location");
+ env_location = pcmk__env_option(PCMK__ENV_AUTHKEY_LOCATION);
need_env = false;
}
@@ -1657,15 +1681,15 @@ lrmd_api_disconnect(lrmd_t * lrmd)
lrmd_private_t *native = lrmd->lrmd_private;
int rc = pcmk_ok;
- crm_info("Disconnecting %s %s executor connection",
- pcmk__client_type_str(native->type),
- (native->remote_nodename? native->remote_nodename : "local"));
switch (native->type) {
case pcmk__client_ipc:
+ crm_debug("Disconnecting from local executor");
lrmd_ipc_disconnect(lrmd);
break;
#ifdef HAVE_GNUTLS_GNUTLS_H
case pcmk__client_tls:
+ crm_debug("Disconnecting from remote executor on %s",
+ native->remote_nodename);
lrmd_tls_disconnect(lrmd);
break;
#endif
@@ -1964,8 +1988,8 @@ lrmd_api_get_metadata_params(lrmd_t *lrmd, const char *standard,
g_hash_table_insert(params_table, strdup(param->key), strdup(param->value));
}
action = services__create_resource_action(type, standard, provider, type,
- CRMD_ACTION_METADATA, 0,
- CRMD_METADATA_CALL_TIMEOUT,
+ PCMK_ACTION_META_DATA, 0,
+ PCMK_DEFAULT_METADATA_TIMEOUT_MS,
params_table, 0);
lrmd_key_value_freeall(params);
@@ -2421,14 +2445,15 @@ lrmd__metadata_async(const lrmd_rsc_info_t *rsc,
if (strcmp(rsc->standard, PCMK_RESOURCE_CLASS_STONITH) == 0) {
return stonith__metadata_async(rsc->type,
- CRMD_METADATA_CALL_TIMEOUT / 1000,
+ PCMK_DEFAULT_METADATA_TIMEOUT_MS / 1000,
callback, user_data);
}
action = services__create_resource_action(pcmk__s(rsc->id, rsc->type),
rsc->standard, rsc->provider,
- rsc->type, CRMD_ACTION_METADATA,
- 0, CRMD_METADATA_CALL_TIMEOUT,
+ rsc->type,
+ PCMK_ACTION_META_DATA, 0,
+ PCMK_DEFAULT_METADATA_TIMEOUT_MS,
NULL, 0);
if (action == NULL) {
pcmk__set_result(&result, PCMK_OCF_UNKNOWN_ERROR, PCMK_EXEC_ERROR,
@@ -2531,3 +2556,15 @@ lrmd__uptime(lrmd_t *lrmd)
return native->remote->uptime;
}
}
+
+const char *
+lrmd__node_start_state(lrmd_t *lrmd)
+{
+ lrmd_private_t *native = lrmd->lrmd_private;
+
+ if (native->remote == NULL) {
+ return NULL;
+ } else {
+ return native->remote->start_state;
+ }
+}
diff --git a/lib/pacemaker/Makefile.am b/lib/pacemaker/Makefile.am
index ebf3b6d..06f8dfb 100644
--- a/lib/pacemaker/Makefile.am
+++ b/lib/pacemaker/Makefile.am
@@ -16,24 +16,24 @@ noinst_HEADERS = libpacemaker_private.h
## libraries
lib_LTLIBRARIES = libpacemaker.la
-## SOURCES
-
-libpacemaker_la_LDFLAGS = -version-info 7:0:6
+libpacemaker_la_LDFLAGS = -version-info 8:0:7
libpacemaker_la_CFLAGS = $(CFLAGS_HARDENED_LIB)
libpacemaker_la_LDFLAGS += $(LDFLAGS_HARDENED_LIB)
-libpacemaker_la_LIBADD = $(top_builddir)/lib/pengine/libpe_status.la \
- $(top_builddir)/lib/cib/libcib.la \
- $(top_builddir)/lib/lrmd/liblrmd.la \
- $(top_builddir)/lib/fencing/libstonithd.la \
- $(top_builddir)/lib/services/libcrmservice.la \
- $(top_builddir)/lib/common/libcrmcommon.la
+libpacemaker_la_LIBADD = $(top_builddir)/lib/pengine/libpe_status.la
+libpacemaker_la_LIBADD += $(top_builddir)/lib/cib/libcib.la
+libpacemaker_la_LIBADD += $(top_builddir)/lib/lrmd/liblrmd.la
+libpacemaker_la_LIBADD += $(top_builddir)/lib/fencing/libstonithd.la
+libpacemaker_la_LIBADD += $(top_builddir)/lib/services/libcrmservice.la
+libpacemaker_la_LIBADD += $(top_builddir)/lib/common/libcrmcommon.la
# -L$(top_builddir)/lib/pils -lpils -export-dynamic -module -avoid-version
-# Use += rather than backlashed continuation lines for parsing by bumplibs
+
+## Library sources (*must* use += format for bumplibs)
libpacemaker_la_SOURCES =
libpacemaker_la_SOURCES += pcmk_acl.c
+libpacemaker_la_SOURCES += pcmk_agents.c
libpacemaker_la_SOURCES += pcmk_cluster_queries.c
libpacemaker_la_SOURCES += pcmk_fence.c
libpacemaker_la_SOURCES += pcmk_graph_consumer.c
diff --git a/lib/pacemaker/libpacemaker_private.h b/lib/pacemaker/libpacemaker_private.h
index 192d5a7..c4a0c90 100644
--- a/lib/pacemaker/libpacemaker_private.h
+++ b/lib/pacemaker/libpacemaker_private.h
@@ -14,7 +14,20 @@
* declared with G_GNUC_INTERNAL for efficiency.
*/
-#include <crm/pengine/pe_types.h> // pe_action_t, pe_node_t, pe_working_set_t
+#include <crm/lrmd_events.h> // lrmd_event_data_t
+#include <crm/common/scheduler.h> // pcmk_action_t, pcmk_node_t, etc.
+#include <crm/pengine/internal.h> // pe__location_t
+
+// Colocation flags
+enum pcmk__coloc_flags {
+ pcmk__coloc_none = 0U,
+
+ // Primary is affected even if already active
+ pcmk__coloc_influence = (1U << 0),
+
+ // Colocation was explicitly configured in CIB
+ pcmk__coloc_explicit = (1U << 1),
+};
// Flags to modify the behavior of add_colocated_node_scores()
enum pcmk__coloc_select {
@@ -52,18 +65,30 @@ enum pcmk__updated {
(flags_to_clear), #flags_to_clear); \
} while (0)
-// Resource allocation methods
+// Resource assignment methods
struct resource_alloc_functions_s {
/*!
* \internal
* \brief Assign a resource to a node
*
- * \param[in,out] rsc Resource to assign to a node
- * \param[in] prefer Node to prefer, if all else is equal
+ * \param[in,out] rsc Resource to assign to a node
+ * \param[in] prefer Node to prefer, if all else is equal
+ * \param[in] stop_if_fail If \c true and \p rsc can't be assigned to a
+ * node, set next role to stopped and update
+ * existing actions (if \p rsc is not a
+ * primitive, this applies to its primitive
+ * descendants instead)
*
* \return Node that \p rsc is assigned to, if assigned entirely to one node
+ *
+ * \note If \p stop_if_fail is \c false, then \c pcmk__unassign_resource()
+ * can completely undo the assignment. A successful assignment can be
+ * either undone or left alone as final. A failed assignment has the
+ * same effect as calling pcmk__unassign_resource(); there are no side
+ * effects on roles or actions.
*/
- pe_node_t *(*assign)(pe_resource_t *rsc, const pe_node_t *prefer);
+ pcmk_node_t *(*assign)(pcmk_resource_t *rsc, const pcmk_node_t *prefer,
+ bool stop_if_fail);
/*!
* \internal
@@ -71,7 +96,7 @@ struct resource_alloc_functions_s {
*
* \param[in,out] rsc Resource to create actions for
*/
- void (*create_actions)(pe_resource_t *rsc);
+ void (*create_actions)(pcmk_resource_t *rsc);
/*!
* \internal
@@ -82,7 +107,7 @@ struct resource_alloc_functions_s {
*
* \return true if any probe was created, otherwise false
*/
- bool (*create_probe)(pe_resource_t *rsc, pe_node_t *node);
+ bool (*create_probe)(pcmk_resource_t *rsc, pcmk_node_t *node);
/*!
* \internal
@@ -90,14 +115,14 @@ struct resource_alloc_functions_s {
*
* \param[in,out] rsc Resource to create implicit constraints for
*/
- void (*internal_constraints)(pe_resource_t *rsc);
+ void (*internal_constraints)(pcmk_resource_t *rsc);
/*!
* \internal
- * \brief Apply a colocation's score to node weights or resource priority
+ * \brief Apply a colocation's score to node scores or resource priority
*
* Given a colocation constraint, apply its score to the dependent's
- * allowed node weights (if we are still placing resources) or priority (if
+ * allowed node scores (if we are still placing resources) or priority (if
* we are choosing promotable clone instance roles).
*
* \param[in,out] dependent Dependent resource in colocation
@@ -105,17 +130,17 @@ struct resource_alloc_functions_s {
* \param[in] colocation Colocation constraint to apply
* \param[in] for_dependent true if called on behalf of dependent
*/
- void (*apply_coloc_score) (pe_resource_t *dependent,
- const pe_resource_t *primary,
- const pcmk__colocation_t *colocation,
- bool for_dependent);
+ void (*apply_coloc_score)(pcmk_resource_t *dependent,
+ const pcmk_resource_t *primary,
+ const pcmk__colocation_t *colocation,
+ bool for_dependent);
/*!
* \internal
* \brief Create list of all resources in colocations with a given resource
*
* Given a resource, create a list of all resources involved in mandatory
- * colocations with it, whether directly or indirectly via chained colocations.
+ * colocations with it, whether directly or via chained colocations.
*
* \param[in] rsc Resource to add to colocated list
* \param[in] orig_rsc Resource originally requested
@@ -127,8 +152,8 @@ struct resource_alloc_functions_s {
* \p colocated_rscs and \p orig_rsc, and the desired resource as
* \p rsc. The recursive calls will use other values.
*/
- GList *(*colocated_resources)(const pe_resource_t *rsc,
- const pe_resource_t *orig_rsc,
+ GList *(*colocated_resources)(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc,
GList *colocated_rscs);
/*!
@@ -148,8 +173,9 @@ struct resource_alloc_functions_s {
* \note The pcmk__with_this_colocations() wrapper should usually be used
* instead of using this method directly.
*/
- void (*with_this_colocations)(const pe_resource_t *rsc,
- const pe_resource_t *orig_rsc, GList **list);
+ void (*with_this_colocations)(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc,
+ GList **list);
/*!
* \internal
@@ -169,8 +195,9 @@ struct resource_alloc_functions_s {
* \note The pcmk__this_with_colocations() wrapper should usually be used
* instead of using this method directly.
*/
- void (*this_with_colocations)(const pe_resource_t *rsc,
- const pe_resource_t *orig_rsc, GList **list);
+ void (*this_with_colocations)(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc,
+ GList **list);
/*!
* \internal
@@ -180,17 +207,31 @@ struct resource_alloc_functions_s {
* scores of the best nodes matching the attribute used for each of the
* resource's relevant colocations.
*
- * \param[in,out] rsc Resource to check colocations for
- * \param[in] log_id Resource ID to use in logs (if NULL, use \p rsc ID)
- * \param[in,out] nodes Nodes to update
- * \param[in] attr Colocation attribute (NULL to use default)
- * \param[in] factor Incorporate scores multiplied by this factor
- * \param[in] flags Bitmask of enum pcmk__coloc_select values
+ * \param[in,out] source_rsc Resource whose node scores to add
+ * \param[in] target_rsc Resource on whose behalf to update \p *nodes
+ * \param[in] log_id Resource ID for logs (if \c NULL, use
+ * \p source_rsc ID)
+ * \param[in,out] nodes Nodes to update (set initial contents to
+ * \c NULL to copy allowed nodes from
+ * \p source_rsc)
+ * \param[in] colocation Original colocation constraint (used to get
+ * configured primary resource's stickiness, and
+ * to get colocation node attribute; if \c NULL,
+ * <tt>source_rsc</tt>'s own matching node scores
+ * will not be added, and \p *nodes must be
+ * \c NULL as well)
+ * \param[in] factor Incorporate scores multiplied by this factor
+ * \param[in] flags Bitmask of enum pcmk__coloc_select values
*
+ * \note \c NULL \p target_rsc, \c NULL \p *nodes, \c NULL \p colocation,
+ * and the \c pcmk__coloc_select_this_with flag are used together (and
+ * only by \c cmp_resources()).
* \note The caller remains responsible for freeing \p *nodes.
*/
- void (*add_colocated_node_scores)(pe_resource_t *rsc, const char *log_id,
- GHashTable **nodes, const char *attr,
+ void (*add_colocated_node_scores)(pcmk_resource_t *source_rsc,
+ const pcmk_resource_t *target_rsc,
+ const char *log_id, GHashTable **nodes,
+ const pcmk__colocation_t *colocation,
float factor, uint32_t flags);
/*!
@@ -200,7 +241,7 @@ struct resource_alloc_functions_s {
* \param[in,out] rsc Resource to apply constraint to
* \param[in,out] location Location constraint to apply
*/
- void (*apply_location)(pe_resource_t *rsc, pe__location_t *location);
+ void (*apply_location)(pcmk_resource_t *rsc, pe__location_t *location);
/*!
* \internal
@@ -214,8 +255,7 @@ struct resource_alloc_functions_s {
* of node. For collective resources, the flags can differ due to
* multiple instances possibly being involved.
*/
- enum pe_action_flags (*action_flags)(pe_action_t *action,
- const pe_node_t *node);
+ uint32_t (*action_flags)(pcmk_action_t *action, const pcmk_node_t *node);
/*!
* \internal
@@ -226,26 +266,33 @@ struct resource_alloc_functions_s {
* ordering. Effects may cascade to other orderings involving the actions as
* well.
*
- * \param[in,out] first 'First' action in an ordering
- * \param[in,out] then 'Then' action in an ordering
- * \param[in] node If not NULL, limit scope of ordering to this
- * node (only used when interleaving instances)
- * \param[in] flags Action flags for \p first for ordering purposes
- * \param[in] filter Action flags to limit scope of certain updates
- * (may include pe_action_optional to affect only
- * mandatory actions, and pe_action_runnable to
- * affect only runnable actions)
- * \param[in] type Group of enum pe_ordering flags to apply
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] first 'First' action in an ordering
+ * \param[in,out] then 'Then' action in an ordering
+ * \param[in] node If not NULL, limit scope of ordering to this
+ * node (only used when interleaving instances)
+ * \param[in] flags Action flags for \p first for ordering purposes
+ * \param[in] filter Action flags to limit scope of certain updates
+ * (may include pcmk_action_optional to affect
+ * only mandatory actions and pcmk_action_runnable
+ * to affect only runnable actions)
+ * \param[in] type Group of enum pcmk__action_relation_flags
+ * \param[in,out] scheduler Scheduler data
*
* \return Group of enum pcmk__updated flags indicating what was updated
*/
- uint32_t (*update_ordered_actions)(pe_action_t *first, pe_action_t *then,
- const pe_node_t *node, uint32_t flags,
+ uint32_t (*update_ordered_actions)(pcmk_action_t *first,
+ pcmk_action_t *then,
+ const pcmk_node_t *node, uint32_t flags,
uint32_t filter, uint32_t type,
- pe_working_set_t *data_set);
+ pcmk_scheduler_t *scheduler);
- void (*output_actions)(pe_resource_t *rsc);
+ /*!
+ * \internal
+ * \brief Output a summary of scheduled actions for a resource
+ *
+ * \param[in,out] rsc Resource to output actions for
+ */
+ void (*output_actions)(pcmk_resource_t *rsc);
/*!
* \internal
@@ -253,7 +300,7 @@ struct resource_alloc_functions_s {
*
* \param[in,out] rsc Resource whose actions should be added
*/
- void (*add_actions_to_graph)(pe_resource_t *rsc);
+ void (*add_actions_to_graph)(pcmk_resource_t *rsc);
/*!
* \internal
@@ -265,7 +312,7 @@ struct resource_alloc_functions_s {
* \param[in] rsc Resource whose meta-attributes should be added
* \param[in,out] xml Transition graph action attributes XML to add to
*/
- void (*add_graph_meta)(const pe_resource_t *rsc, xmlNode *xml);
+ void (*add_graph_meta)(const pcmk_resource_t *rsc, xmlNode *xml);
/*!
* \internal
@@ -275,15 +322,15 @@ struct resource_alloc_functions_s {
* resources colocated with it, to determine whether a node has sufficient
* capacity. Given a resource and a table of utilization values, it will add
* the resource's utilization to the existing values, if the resource has
- * not yet been allocated to a node.
+ * not yet been assigned to a node.
*
* \param[in] rsc Resource with utilization to add
- * \param[in] orig_rsc Resource being allocated (for logging only)
+ * \param[in] orig_rsc Resource being assigned (for logging only)
* \param[in] all_rscs List of all resources that will be summed
* \param[in,out] utilization Table of utilization values to add to
*/
- void (*add_utilization)(const pe_resource_t *rsc,
- const pe_resource_t *orig_rsc, GList *all_rscs,
+ void (*add_utilization)(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc, GList *all_rscs,
GHashTable *utilization);
/*!
@@ -292,95 +339,98 @@ struct resource_alloc_functions_s {
*
* \param[in,out] rsc Resource to check for shutdown lock
*/
- void (*shutdown_lock)(pe_resource_t *rsc);
+ void (*shutdown_lock)(pcmk_resource_t *rsc);
};
// Actions (pcmk_sched_actions.c)
G_GNUC_INTERNAL
-void pcmk__update_action_for_orderings(pe_action_t *action,
- pe_working_set_t *data_set);
+void pcmk__update_action_for_orderings(pcmk_action_t *action,
+ pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
-uint32_t pcmk__update_ordered_actions(pe_action_t *first, pe_action_t *then,
- const pe_node_t *node, uint32_t flags,
+uint32_t pcmk__update_ordered_actions(pcmk_action_t *first, pcmk_action_t *then,
+ const pcmk_node_t *node, uint32_t flags,
uint32_t filter, uint32_t type,
- pe_working_set_t *data_set);
+ pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
-void pcmk__log_action(const char *pre_text, const pe_action_t *action,
+void pcmk__log_action(const char *pre_text, const pcmk_action_t *action,
bool details);
G_GNUC_INTERNAL
-pe_action_t *pcmk__new_cancel_action(pe_resource_t *rsc, const char *name,
- guint interval_ms, const pe_node_t *node);
+pcmk_action_t *pcmk__new_cancel_action(pcmk_resource_t *rsc, const char *name,
+ guint interval_ms,
+ const pcmk_node_t *node);
G_GNUC_INTERNAL
-pe_action_t *pcmk__new_shutdown_action(pe_node_t *node);
+pcmk_action_t *pcmk__new_shutdown_action(pcmk_node_t *node);
G_GNUC_INTERNAL
-bool pcmk__action_locks_rsc_to_node(const pe_action_t *action);
+bool pcmk__action_locks_rsc_to_node(const pcmk_action_t *action);
G_GNUC_INTERNAL
-void pcmk__deduplicate_action_inputs(pe_action_t *action);
+void pcmk__deduplicate_action_inputs(pcmk_action_t *action);
G_GNUC_INTERNAL
-void pcmk__output_actions(pe_working_set_t *data_set);
+void pcmk__output_actions(pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
-bool pcmk__check_action_config(pe_resource_t *rsc, pe_node_t *node,
+bool pcmk__check_action_config(pcmk_resource_t *rsc, pcmk_node_t *node,
const xmlNode *xml_op);
G_GNUC_INTERNAL
-void pcmk__handle_rsc_config_changes(pe_working_set_t *data_set);
+void pcmk__handle_rsc_config_changes(pcmk_scheduler_t *scheduler);
// Recurring actions (pcmk_sched_recurring.c)
G_GNUC_INTERNAL
-void pcmk__create_recurring_actions(pe_resource_t *rsc);
+void pcmk__create_recurring_actions(pcmk_resource_t *rsc);
G_GNUC_INTERNAL
-void pcmk__schedule_cancel(pe_resource_t *rsc, const char *call_id,
+void pcmk__schedule_cancel(pcmk_resource_t *rsc, const char *call_id,
const char *task, guint interval_ms,
- const pe_node_t *node, const char *reason);
+ const pcmk_node_t *node, const char *reason);
G_GNUC_INTERNAL
-void pcmk__reschedule_recurring(pe_resource_t *rsc, const char *task,
- guint interval_ms, pe_node_t *node);
+void pcmk__reschedule_recurring(pcmk_resource_t *rsc, const char *task,
+ guint interval_ms, pcmk_node_t *node);
G_GNUC_INTERNAL
-bool pcmk__action_is_recurring(const pe_action_t *action);
+bool pcmk__action_is_recurring(const pcmk_action_t *action);
// Producing transition graphs (pcmk_graph_producer.c)
G_GNUC_INTERNAL
-bool pcmk__graph_has_loop(const pe_action_t *init_action,
- const pe_action_t *action,
- pe_action_wrapper_t *input);
+bool pcmk__graph_has_loop(const pcmk_action_t *init_action,
+ const pcmk_action_t *action,
+ pcmk__related_action_t *input);
G_GNUC_INTERNAL
-void pcmk__add_rsc_actions_to_graph(pe_resource_t *rsc);
+void pcmk__add_rsc_actions_to_graph(pcmk_resource_t *rsc);
G_GNUC_INTERNAL
-void pcmk__create_graph(pe_working_set_t *data_set);
+void pcmk__create_graph(pcmk_scheduler_t *scheduler);
// Fencing (pcmk_sched_fencing.c)
G_GNUC_INTERNAL
-void pcmk__order_vs_fence(pe_action_t *stonith_op, pe_working_set_t *data_set);
+void pcmk__order_vs_fence(pcmk_action_t *stonith_op,
+ pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
-void pcmk__order_vs_unfence(const pe_resource_t *rsc, pe_node_t *node,
- pe_action_t *action, enum pe_ordering order);
+void pcmk__order_vs_unfence(const pcmk_resource_t *rsc, pcmk_node_t *node,
+ pcmk_action_t *action,
+ enum pcmk__action_relation_flags order);
G_GNUC_INTERNAL
-void pcmk__fence_guest(pe_node_t *node);
+void pcmk__fence_guest(pcmk_node_t *node);
G_GNUC_INTERNAL
-bool pcmk__node_unfenced(const pe_node_t *node);
+bool pcmk__node_unfenced(const pcmk_node_t *node);
G_GNUC_INTERNAL
void pcmk__order_restart_vs_unfence(gpointer data, gpointer user_data);
@@ -388,48 +438,48 @@ void pcmk__order_restart_vs_unfence(gpointer data, gpointer user_data);
// Injected scheduler inputs (pcmk_sched_injections.c)
-void pcmk__inject_scheduler_input(pe_working_set_t *data_set, cib_t *cib,
+void pcmk__inject_scheduler_input(pcmk_scheduler_t *scheduler, cib_t *cib,
const pcmk_injections_t *injections);
// Constraints of any type (pcmk_sched_constraints.c)
G_GNUC_INTERNAL
-pe_resource_t *pcmk__find_constraint_resource(GList *rsc_list, const char *id);
+pcmk_resource_t *pcmk__find_constraint_resource(GList *rsc_list,
+ const char *id);
G_GNUC_INTERNAL
xmlNode *pcmk__expand_tags_in_sets(xmlNode *xml_obj,
- const pe_working_set_t *data_set);
+ const pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
-bool pcmk__valid_resource_or_tag(const pe_working_set_t *data_set,
- const char *id, pe_resource_t **rsc,
- pe_tag_t **tag);
+bool pcmk__valid_resource_or_tag(const pcmk_scheduler_t *scheduler,
+ const char *id, pcmk_resource_t **rsc,
+ pcmk_tag_t **tag);
G_GNUC_INTERNAL
bool pcmk__tag_to_set(xmlNode *xml_obj, xmlNode **rsc_set, const char *attr,
- bool convert_rsc, const pe_working_set_t *data_set);
+ bool convert_rsc, const pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
-void pcmk__create_internal_constraints(pe_working_set_t *data_set);
+void pcmk__create_internal_constraints(pcmk_scheduler_t *scheduler);
// Location constraints
G_GNUC_INTERNAL
-void pcmk__unpack_location(xmlNode *xml_obj, pe_working_set_t *data_set);
+void pcmk__unpack_location(xmlNode *xml_obj, pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
-pe__location_t *pcmk__new_location(const char *id, pe_resource_t *rsc,
- int node_weight, const char *discover_mode,
- pe_node_t *foo_node,
- pe_working_set_t *data_set);
+pe__location_t *pcmk__new_location(const char *id, pcmk_resource_t *rsc,
+ int node_score, const char *discover_mode,
+ pcmk_node_t *foo_node);
G_GNUC_INTERNAL
-void pcmk__apply_locations(pe_working_set_t *data_set);
+void pcmk__apply_locations(pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
-void pcmk__apply_location(pe_resource_t *rsc, pe__location_t *constraint);
+void pcmk__apply_location(pcmk_resource_t *rsc, pe__location_t *constraint);
// Colocation constraints (pcmk_sched_colocation.c)
@@ -440,54 +490,104 @@ enum pcmk__coloc_affects {
pcmk__coloc_affects_role,
};
+/*!
+ * \internal
+ * \brief Get the value of a colocation's node attribute
+ *
+ * When looking up a colocation node attribute on a bundle node for a bundle
+ * primitive, we should always look on the bundle node's assigned host,
+ * regardless of the value of XML_RSC_ATTR_TARGET. At most one resource (the
+ * bundle primitive, if any) can run on a bundle node, so any colocation must
+ * necessarily be evaluated with respect to the bundle node (the container).
+ *
+ * \param[in] node Node on which to look up the attribute
+ * \param[in] attr Name of attribute to look up
+ * \param[in] rsc Resource on whose behalf to look up the attribute
+ *
+ * \return Value of \p attr on \p node or on the host of \p node, as appropriate
+ */
+static inline const char *
+pcmk__colocation_node_attr(const pcmk_node_t *node, const char *attr,
+ const pcmk_resource_t *rsc)
+{
+ const pcmk_resource_t *top = pe__const_top_resource(rsc, false);
+ const bool force_host = pe__is_bundle_node(node)
+ && pe_rsc_is_bundled(rsc)
+ && (top == pe__bundled_resource(rsc));
+
+ return pe__node_attribute_calculated(node, attr, rsc,
+ pcmk__rsc_node_assigned, force_host);
+}
+
G_GNUC_INTERNAL
-enum pcmk__coloc_affects pcmk__colocation_affects(const pe_resource_t *dependent,
- const pe_resource_t *primary,
- const pcmk__colocation_t *colocation,
+enum pcmk__coloc_affects pcmk__colocation_affects(const pcmk_resource_t
+ *dependent,
+ const pcmk_resource_t
+ *primary,
+ const pcmk__colocation_t
+ *colocation,
bool preview);
G_GNUC_INTERNAL
-void pcmk__apply_coloc_to_weights(pe_resource_t *dependent,
- const pe_resource_t *primary,
- const pcmk__colocation_t *colocation);
+void pcmk__apply_coloc_to_scores(pcmk_resource_t *dependent,
+ const pcmk_resource_t *primary,
+ const pcmk__colocation_t *colocation);
G_GNUC_INTERNAL
-void pcmk__apply_coloc_to_priority(pe_resource_t *dependent,
- const pe_resource_t *primary,
+void pcmk__apply_coloc_to_priority(pcmk_resource_t *dependent,
+ const pcmk_resource_t *primary,
const pcmk__colocation_t *colocation);
G_GNUC_INTERNAL
-void pcmk__add_colocated_node_scores(pe_resource_t *rsc, const char *log_id,
- GHashTable **nodes, const char *attr,
+void pcmk__add_colocated_node_scores(pcmk_resource_t *source_rsc,
+ const pcmk_resource_t *target_rsc,
+ const char *log_id, GHashTable **nodes,
+ const pcmk__colocation_t *colocation,
float factor, uint32_t flags);
G_GNUC_INTERNAL
void pcmk__add_dependent_scores(gpointer data, gpointer user_data);
G_GNUC_INTERNAL
-void pcmk__unpack_colocation(xmlNode *xml_obj, pe_working_set_t *data_set);
+void pcmk__colocation_intersect_nodes(pcmk_resource_t *dependent,
+ const pcmk_resource_t *primary,
+ const pcmk__colocation_t *colocation,
+ const GList *primary_nodes,
+ bool merge_scores);
G_GNUC_INTERNAL
-void pcmk__add_this_with(GList **list, const pcmk__colocation_t *colocation);
+void pcmk__unpack_colocation(xmlNode *xml_obj, pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
-void pcmk__add_this_with_list(GList **list, GList *addition);
+void pcmk__add_this_with(GList **list, const pcmk__colocation_t *colocation,
+ const pcmk_resource_t *rsc);
G_GNUC_INTERNAL
-void pcmk__add_with_this(GList **list, const pcmk__colocation_t *colocation);
+void pcmk__add_this_with_list(GList **list, GList *addition,
+ const pcmk_resource_t *rsc);
G_GNUC_INTERNAL
-void pcmk__add_with_this_list(GList **list, GList *addition);
+void pcmk__add_with_this(GList **list, const pcmk__colocation_t *colocation,
+ const pcmk_resource_t *rsc);
+
+G_GNUC_INTERNAL
+void pcmk__add_with_this_list(GList **list, GList *addition,
+ const pcmk_resource_t *rsc);
+
+G_GNUC_INTERNAL
+GList *pcmk__with_this_colocations(const pcmk_resource_t *rsc);
+
+G_GNUC_INTERNAL
+GList *pcmk__this_with_colocations(const pcmk_resource_t *rsc);
G_GNUC_INTERNAL
void pcmk__new_colocation(const char *id, const char *node_attr, int score,
- pe_resource_t *dependent, pe_resource_t *primary,
+ pcmk_resource_t *dependent, pcmk_resource_t *primary,
const char *dependent_role, const char *primary_role,
- bool influence, pe_working_set_t *data_set);
+ uint32_t flags);
G_GNUC_INTERNAL
-void pcmk__block_colocation_dependents(pe_action_t *action,
- pe_working_set_t *data_set);
+void pcmk__block_colocation_dependents(pcmk_action_t *action);
/*!
* \internal
@@ -503,7 +603,7 @@ void pcmk__block_colocation_dependents(pe_action_t *action,
*/
static inline bool
pcmk__colocation_has_influence(const pcmk__colocation_t *colocation,
- const pe_resource_t *rsc)
+ const pcmk_resource_t *rsc)
{
if (rsc == NULL) {
rsc = colocation->primary;
@@ -521,8 +621,9 @@ pcmk__colocation_has_influence(const pcmk__colocation_t *colocation,
* This also avoids problematic scenarios where two containers want to
* perpetually swap places.
*/
- if (pcmk_is_set(colocation->dependent->flags, pe_rsc_allow_remote_remotes)
- && !pcmk_is_set(rsc->flags, pe_rsc_failed)
+ if (pcmk_is_set(colocation->dependent->flags,
+ pcmk_rsc_remote_nesting_allowed)
+ && !pcmk_is_set(rsc->flags, pcmk_rsc_failed)
&& pcmk__list_of_1(rsc->running_on)) {
return false;
}
@@ -530,33 +631,34 @@ pcmk__colocation_has_influence(const pcmk__colocation_t *colocation,
/* The dependent in a colocation influences the primary's location
* if the influence option is true or the primary is not yet active.
*/
- return colocation->influence || (rsc->running_on == NULL);
+ return pcmk_is_set(colocation->flags, pcmk__coloc_influence)
+ || (rsc->running_on == NULL);
}
// Ordering constraints (pcmk_sched_ordering.c)
G_GNUC_INTERNAL
-void pcmk__new_ordering(pe_resource_t *first_rsc, char *first_task,
- pe_action_t *first_action, pe_resource_t *then_rsc,
- char *then_task, pe_action_t *then_action,
- uint32_t flags, pe_working_set_t *data_set);
+void pcmk__new_ordering(pcmk_resource_t *first_rsc, char *first_task,
+ pcmk_action_t *first_action, pcmk_resource_t *then_rsc,
+ char *then_task, pcmk_action_t *then_action,
+ uint32_t flags, pcmk_scheduler_t *sched);
G_GNUC_INTERNAL
-void pcmk__unpack_ordering(xmlNode *xml_obj, pe_working_set_t *data_set);
+void pcmk__unpack_ordering(xmlNode *xml_obj, pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
-void pcmk__disable_invalid_orderings(pe_working_set_t *data_set);
+void pcmk__disable_invalid_orderings(pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
-void pcmk__order_stops_before_shutdown(pe_node_t *node,
- pe_action_t *shutdown_op);
+void pcmk__order_stops_before_shutdown(pcmk_node_t *node,
+ pcmk_action_t *shutdown_op);
G_GNUC_INTERNAL
-void pcmk__apply_orderings(pe_working_set_t *data_set);
+void pcmk__apply_orderings(pcmk_scheduler_t *sched);
G_GNUC_INTERNAL
-void pcmk__order_after_each(pe_action_t *after, GList *list);
+void pcmk__order_after_each(pcmk_action_t *after, GList *list);
/*!
@@ -567,7 +669,7 @@ void pcmk__order_after_each(pe_action_t *after, GList *list);
* \param[in,out] first_task Action key for 'first' action
* \param[in] then_rsc Resource for 'then' action
* \param[in,out] then_task Action key for 'then' action
- * \param[in] flags Bitmask of enum pe_ordering flags
+ * \param[in] flags Group of enum pcmk__action_relation_flags
*/
#define pcmk__order_resource_actions(first_rsc, first_task, \
then_rsc, then_task, flags) \
@@ -579,260 +681,329 @@ void pcmk__order_after_each(pe_action_t *after, GList *list);
NULL, (flags), (first_rsc)->cluster)
#define pcmk__order_starts(rsc1, rsc2, flags) \
- pcmk__order_resource_actions((rsc1), CRMD_ACTION_START, \
- (rsc2), CRMD_ACTION_START, (flags))
+ pcmk__order_resource_actions((rsc1), PCMK_ACTION_START, \
+ (rsc2), PCMK_ACTION_START, (flags))
#define pcmk__order_stops(rsc1, rsc2, flags) \
- pcmk__order_resource_actions((rsc1), CRMD_ACTION_STOP, \
- (rsc2), CRMD_ACTION_STOP, (flags))
+ pcmk__order_resource_actions((rsc1), PCMK_ACTION_STOP, \
+ (rsc2), PCMK_ACTION_STOP, (flags))
// Ticket constraints (pcmk_sched_tickets.c)
G_GNUC_INTERNAL
-void pcmk__unpack_rsc_ticket(xmlNode *xml_obj, pe_working_set_t *data_set);
+void pcmk__unpack_rsc_ticket(xmlNode *xml_obj, pcmk_scheduler_t *scheduler);
// Promotable clone resources (pcmk_sched_promotable.c)
G_GNUC_INTERNAL
-void pcmk__add_promotion_scores(pe_resource_t *rsc);
+void pcmk__add_promotion_scores(pcmk_resource_t *rsc);
G_GNUC_INTERNAL
-void pcmk__require_promotion_tickets(pe_resource_t *rsc);
+void pcmk__require_promotion_tickets(pcmk_resource_t *rsc);
G_GNUC_INTERNAL
-void pcmk__set_instance_roles(pe_resource_t *rsc);
+void pcmk__set_instance_roles(pcmk_resource_t *rsc);
G_GNUC_INTERNAL
-void pcmk__create_promotable_actions(pe_resource_t *clone);
+void pcmk__create_promotable_actions(pcmk_resource_t *clone);
G_GNUC_INTERNAL
-void pcmk__promotable_restart_ordering(pe_resource_t *rsc);
+void pcmk__promotable_restart_ordering(pcmk_resource_t *rsc);
G_GNUC_INTERNAL
-void pcmk__order_promotable_instances(pe_resource_t *clone);
+void pcmk__order_promotable_instances(pcmk_resource_t *clone);
G_GNUC_INTERNAL
-void pcmk__update_dependent_with_promotable(const pe_resource_t *primary,
- pe_resource_t *dependent,
- const pcmk__colocation_t *colocation);
+void pcmk__update_dependent_with_promotable(const pcmk_resource_t *primary,
+ pcmk_resource_t *dependent,
+ const pcmk__colocation_t
+ *colocation);
G_GNUC_INTERNAL
-void pcmk__update_promotable_dependent_priority(const pe_resource_t *primary,
- pe_resource_t *dependent,
- const pcmk__colocation_t *colocation);
+void pcmk__update_promotable_dependent_priority(const pcmk_resource_t *primary,
+ pcmk_resource_t *dependent,
+ const pcmk__colocation_t
+ *colocation);
// Pacemaker Remote nodes (pcmk_sched_remote.c)
G_GNUC_INTERNAL
-bool pcmk__is_failed_remote_node(const pe_node_t *node);
+bool pcmk__is_failed_remote_node(const pcmk_node_t *node);
G_GNUC_INTERNAL
-void pcmk__order_remote_connection_actions(pe_working_set_t *data_set);
+void pcmk__order_remote_connection_actions(pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
-bool pcmk__rsc_corresponds_to_guest(const pe_resource_t *rsc,
- const pe_node_t *node);
+bool pcmk__rsc_corresponds_to_guest(const pcmk_resource_t *rsc,
+ const pcmk_node_t *node);
G_GNUC_INTERNAL
-pe_node_t *pcmk__connection_host_for_action(const pe_action_t *action);
+pcmk_node_t *pcmk__connection_host_for_action(const pcmk_action_t *action);
G_GNUC_INTERNAL
-void pcmk__substitute_remote_addr(pe_resource_t *rsc, GHashTable *params);
+void pcmk__substitute_remote_addr(pcmk_resource_t *rsc, GHashTable *params);
G_GNUC_INTERNAL
-void pcmk__add_bundle_meta_to_xml(xmlNode *args_xml, const pe_action_t *action);
+void pcmk__add_bundle_meta_to_xml(xmlNode *args_xml,
+ const pcmk_action_t *action);
// Primitives (pcmk_sched_primitive.c)
G_GNUC_INTERNAL
-pe_node_t *pcmk__primitive_assign(pe_resource_t *rsc, const pe_node_t *prefer);
+pcmk_node_t *pcmk__primitive_assign(pcmk_resource_t *rsc,
+ const pcmk_node_t *prefer,
+ bool stop_if_fail);
G_GNUC_INTERNAL
-void pcmk__primitive_create_actions(pe_resource_t *rsc);
+void pcmk__primitive_create_actions(pcmk_resource_t *rsc);
G_GNUC_INTERNAL
-void pcmk__primitive_internal_constraints(pe_resource_t *rsc);
+void pcmk__primitive_internal_constraints(pcmk_resource_t *rsc);
G_GNUC_INTERNAL
-enum pe_action_flags pcmk__primitive_action_flags(pe_action_t *action,
- const pe_node_t *node);
+uint32_t pcmk__primitive_action_flags(pcmk_action_t *action,
+ const pcmk_node_t *node);
G_GNUC_INTERNAL
-void pcmk__primitive_apply_coloc_score(pe_resource_t *dependent,
- const pe_resource_t *primary,
+void pcmk__primitive_apply_coloc_score(pcmk_resource_t *dependent,
+ const pcmk_resource_t *primary,
const pcmk__colocation_t *colocation,
bool for_dependent);
G_GNUC_INTERNAL
-void pcmk__with_primitive_colocations(const pe_resource_t *rsc,
- const pe_resource_t *orig_rsc,
+void pcmk__with_primitive_colocations(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc,
GList **list);
G_GNUC_INTERNAL
-void pcmk__primitive_with_colocations(const pe_resource_t *rsc,
- const pe_resource_t *orig_rsc,
+void pcmk__primitive_with_colocations(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc,
GList **list);
G_GNUC_INTERNAL
-void pcmk__schedule_cleanup(pe_resource_t *rsc, const pe_node_t *node,
+void pcmk__schedule_cleanup(pcmk_resource_t *rsc, const pcmk_node_t *node,
bool optional);
G_GNUC_INTERNAL
-void pcmk__primitive_add_graph_meta(const pe_resource_t *rsc, xmlNode *xml);
+void pcmk__primitive_add_graph_meta(const pcmk_resource_t *rsc, xmlNode *xml);
G_GNUC_INTERNAL
-void pcmk__primitive_add_utilization(const pe_resource_t *rsc,
- const pe_resource_t *orig_rsc,
+void pcmk__primitive_add_utilization(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc,
GList *all_rscs, GHashTable *utilization);
G_GNUC_INTERNAL
-void pcmk__primitive_shutdown_lock(pe_resource_t *rsc);
+void pcmk__primitive_shutdown_lock(pcmk_resource_t *rsc);
// Groups (pcmk_sched_group.c)
G_GNUC_INTERNAL
-pe_node_t *pcmk__group_assign(pe_resource_t *rsc, const pe_node_t *prefer);
+pcmk_node_t *pcmk__group_assign(pcmk_resource_t *rsc, const pcmk_node_t *prefer,
+ bool stop_if_fail);
G_GNUC_INTERNAL
-void pcmk__group_create_actions(pe_resource_t *rsc);
+void pcmk__group_create_actions(pcmk_resource_t *rsc);
G_GNUC_INTERNAL
-void pcmk__group_internal_constraints(pe_resource_t *rsc);
+void pcmk__group_internal_constraints(pcmk_resource_t *rsc);
G_GNUC_INTERNAL
-void pcmk__group_apply_coloc_score(pe_resource_t *dependent,
- const pe_resource_t *primary,
+void pcmk__group_apply_coloc_score(pcmk_resource_t *dependent,
+ const pcmk_resource_t *primary,
const pcmk__colocation_t *colocation,
bool for_dependent);
G_GNUC_INTERNAL
-void pcmk__with_group_colocations(const pe_resource_t *rsc,
- const pe_resource_t *orig_rsc, GList **list);
+void pcmk__with_group_colocations(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc,
+ GList **list);
G_GNUC_INTERNAL
-void pcmk__group_with_colocations(const pe_resource_t *rsc,
- const pe_resource_t *orig_rsc, GList **list);
+void pcmk__group_with_colocations(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc,
+ GList **list);
G_GNUC_INTERNAL
-void pcmk__group_add_colocated_node_scores(pe_resource_t *rsc,
+void pcmk__group_add_colocated_node_scores(pcmk_resource_t *source_rsc,
+ const pcmk_resource_t *target_rsc,
const char *log_id,
- GHashTable **nodes, const char *attr,
+ GHashTable **nodes,
+ const pcmk__colocation_t *colocation,
float factor, uint32_t flags);
G_GNUC_INTERNAL
-void pcmk__group_apply_location(pe_resource_t *rsc, pe__location_t *location);
+void pcmk__group_apply_location(pcmk_resource_t *rsc, pe__location_t *location);
G_GNUC_INTERNAL
-enum pe_action_flags pcmk__group_action_flags(pe_action_t *action,
- const pe_node_t *node);
+uint32_t pcmk__group_action_flags(pcmk_action_t *action,
+ const pcmk_node_t *node);
G_GNUC_INTERNAL
-uint32_t pcmk__group_update_ordered_actions(pe_action_t *first,
- pe_action_t *then,
- const pe_node_t *node,
+uint32_t pcmk__group_update_ordered_actions(pcmk_action_t *first,
+ pcmk_action_t *then,
+ const pcmk_node_t *node,
uint32_t flags, uint32_t filter,
uint32_t type,
- pe_working_set_t *data_set);
+ pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
-GList *pcmk__group_colocated_resources(const pe_resource_t *rsc,
- const pe_resource_t *orig_rsc,
+GList *pcmk__group_colocated_resources(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc,
GList *colocated_rscs);
G_GNUC_INTERNAL
-void pcmk__group_add_utilization(const pe_resource_t *rsc,
- const pe_resource_t *orig_rsc, GList *all_rscs,
- GHashTable *utilization);
+void pcmk__group_add_utilization(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc,
+ GList *all_rscs, GHashTable *utilization);
G_GNUC_INTERNAL
-void pcmk__group_shutdown_lock(pe_resource_t *rsc);
+void pcmk__group_shutdown_lock(pcmk_resource_t *rsc);
// Clones (pcmk_sched_clone.c)
G_GNUC_INTERNAL
-pe_node_t *pcmk__clone_assign(pe_resource_t *rsc, const pe_node_t *prefer);
+pcmk_node_t *pcmk__clone_assign(pcmk_resource_t *rsc, const pcmk_node_t *prefer,
+ bool stop_if_fail);
G_GNUC_INTERNAL
-void pcmk__clone_apply_coloc_score(pe_resource_t *dependent,
- const pe_resource_t *primary,
+void pcmk__clone_create_actions(pcmk_resource_t *rsc);
+
+G_GNUC_INTERNAL
+bool pcmk__clone_create_probe(pcmk_resource_t *rsc, pcmk_node_t *node);
+
+G_GNUC_INTERNAL
+void pcmk__clone_internal_constraints(pcmk_resource_t *rsc);
+
+G_GNUC_INTERNAL
+void pcmk__clone_apply_coloc_score(pcmk_resource_t *dependent,
+ const pcmk_resource_t *primary,
const pcmk__colocation_t *colocation,
bool for_dependent);
G_GNUC_INTERNAL
-void pcmk__with_clone_colocations(const pe_resource_t *rsc,
- const pe_resource_t *orig_rsc, GList **list);
+void pcmk__with_clone_colocations(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc,
+ GList **list);
+
+G_GNUC_INTERNAL
+void pcmk__clone_with_colocations(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc,
+ GList **list);
G_GNUC_INTERNAL
-void pcmk__clone_with_colocations(const pe_resource_t *rsc,
- const pe_resource_t *orig_rsc, GList **list);
+void pcmk__clone_apply_location(pcmk_resource_t *rsc,
+ pe__location_t *constraint);
+
+G_GNUC_INTERNAL
+uint32_t pcmk__clone_action_flags(pcmk_action_t *action,
+ const pcmk_node_t *node);
+
+G_GNUC_INTERNAL
+void pcmk__clone_add_actions_to_graph(pcmk_resource_t *rsc);
+
+G_GNUC_INTERNAL
+void pcmk__clone_add_graph_meta(const pcmk_resource_t *rsc, xmlNode *xml);
+
+G_GNUC_INTERNAL
+void pcmk__clone_add_utilization(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc,
+ GList *all_rscs, GHashTable *utilization);
+
+G_GNUC_INTERNAL
+void pcmk__clone_shutdown_lock(pcmk_resource_t *rsc);
// Bundles (pcmk_sched_bundle.c)
G_GNUC_INTERNAL
-const pe_resource_t *pcmk__get_rsc_in_container(const pe_resource_t *instance);
+pcmk_node_t *pcmk__bundle_assign(pcmk_resource_t *rsc,
+ const pcmk_node_t *prefer, bool stop_if_fail);
G_GNUC_INTERNAL
-void pcmk__bundle_apply_coloc_score(pe_resource_t *dependent,
- const pe_resource_t *primary,
+void pcmk__bundle_create_actions(pcmk_resource_t *rsc);
+
+G_GNUC_INTERNAL
+bool pcmk__bundle_create_probe(pcmk_resource_t *rsc, pcmk_node_t *node);
+
+G_GNUC_INTERNAL
+void pcmk__bundle_internal_constraints(pcmk_resource_t *rsc);
+
+G_GNUC_INTERNAL
+void pcmk__bundle_apply_coloc_score(pcmk_resource_t *dependent,
+ const pcmk_resource_t *primary,
const pcmk__colocation_t *colocation,
bool for_dependent);
G_GNUC_INTERNAL
-void pcmk__with_bundle_colocations(const pe_resource_t *rsc,
- const pe_resource_t *orig_rsc, GList **list);
+void pcmk__with_bundle_colocations(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc,
+ GList **list);
G_GNUC_INTERNAL
-void pcmk__bundle_with_colocations(const pe_resource_t *rsc,
- const pe_resource_t *orig_rsc, GList **list);
+void pcmk__bundle_with_colocations(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc,
+ GList **list);
G_GNUC_INTERNAL
-void pcmk__output_bundle_actions(pe_resource_t *rsc);
+void pcmk__bundle_apply_location(pcmk_resource_t *rsc,
+ pe__location_t *constraint);
+
+G_GNUC_INTERNAL
+uint32_t pcmk__bundle_action_flags(pcmk_action_t *action,
+ const pcmk_node_t *node);
+
+G_GNUC_INTERNAL
+void pcmk__output_bundle_actions(pcmk_resource_t *rsc);
+
+G_GNUC_INTERNAL
+void pcmk__bundle_add_actions_to_graph(pcmk_resource_t *rsc);
+
+G_GNUC_INTERNAL
+void pcmk__bundle_add_utilization(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc,
+ GList *all_rscs, GHashTable *utilization);
+
+G_GNUC_INTERNAL
+void pcmk__bundle_shutdown_lock(pcmk_resource_t *rsc);
// Clone instances or bundle replica containers (pcmk_sched_instances.c)
G_GNUC_INTERNAL
-void pcmk__assign_instances(pe_resource_t *collective, GList *instances,
+void pcmk__assign_instances(pcmk_resource_t *collective, GList *instances,
int max_total, int max_per_node);
G_GNUC_INTERNAL
-void pcmk__create_instance_actions(pe_resource_t *rsc, GList *instances);
+void pcmk__create_instance_actions(pcmk_resource_t *rsc, GList *instances);
G_GNUC_INTERNAL
-bool pcmk__instance_matches(const pe_resource_t *instance,
- const pe_node_t *node, enum rsc_role_e role,
+bool pcmk__instance_matches(const pcmk_resource_t *instance,
+ const pcmk_node_t *node, enum rsc_role_e role,
bool current);
G_GNUC_INTERNAL
-pe_resource_t *pcmk__find_compatible_instance(const pe_resource_t *match_rsc,
- const pe_resource_t *rsc,
- enum rsc_role_e role,
- bool current);
+pcmk_resource_t *pcmk__find_compatible_instance(const pcmk_resource_t *match_rsc,
+ const pcmk_resource_t *rsc,
+ enum rsc_role_e role,
+ bool current);
G_GNUC_INTERNAL
-uint32_t pcmk__instance_update_ordered_actions(pe_action_t *first,
- pe_action_t *then,
- const pe_node_t *node,
+uint32_t pcmk__instance_update_ordered_actions(pcmk_action_t *first,
+ pcmk_action_t *then,
+ const pcmk_node_t *node,
uint32_t flags, uint32_t filter,
uint32_t type,
- pe_working_set_t *data_set);
+ pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
-enum pe_action_flags pcmk__collective_action_flags(pe_action_t *action,
- const GList *instances,
- const pe_node_t *node);
-
-G_GNUC_INTERNAL
-void pcmk__add_collective_constraints(GList **list,
- const pe_resource_t *instance,
- const pe_resource_t *collective,
- bool with_this);
+uint32_t pcmk__collective_action_flags(pcmk_action_t *action,
+ const GList *instances,
+ const pcmk_node_t *node);
// Injections (pcmk_injections.c)
@@ -865,7 +1036,7 @@ xmlNode *pcmk__inject_action_result(xmlNode *cib_resource,
// Nodes (pcmk_sched_nodes.c)
G_GNUC_INTERNAL
-bool pcmk__node_available(const pe_node_t *node, bool consider_score,
+bool pcmk__node_available(const pcmk_node_t *node, bool consider_score,
bool consider_guest);
G_GNUC_INTERNAL
@@ -875,55 +1046,59 @@ G_GNUC_INTERNAL
GHashTable *pcmk__copy_node_table(GHashTable *nodes);
G_GNUC_INTERNAL
-GList *pcmk__sort_nodes(GList *nodes, pe_node_t *active_node);
+void pcmk__copy_node_tables(const pcmk_resource_t *rsc, GHashTable **copy);
+
+G_GNUC_INTERNAL
+void pcmk__restore_node_tables(pcmk_resource_t *rsc, GHashTable *backup);
G_GNUC_INTERNAL
-void pcmk__apply_node_health(pe_working_set_t *data_set);
+GList *pcmk__sort_nodes(GList *nodes, pcmk_node_t *active_node);
G_GNUC_INTERNAL
-pe_node_t *pcmk__top_allowed_node(const pe_resource_t *rsc,
- const pe_node_t *node);
+void pcmk__apply_node_health(pcmk_scheduler_t *scheduler);
+
+G_GNUC_INTERNAL
+pcmk_node_t *pcmk__top_allowed_node(const pcmk_resource_t *rsc,
+ const pcmk_node_t *node);
// Functions applying to more than one variant (pcmk_sched_resource.c)
G_GNUC_INTERNAL
-void pcmk__set_allocation_methods(pe_working_set_t *data_set);
+void pcmk__set_assignment_methods(pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
-bool pcmk__rsc_agent_changed(pe_resource_t *rsc, pe_node_t *node,
+bool pcmk__rsc_agent_changed(pcmk_resource_t *rsc, pcmk_node_t *node,
const xmlNode *rsc_entry, bool active_on_node);
G_GNUC_INTERNAL
-GList *pcmk__rscs_matching_id(const char *id, const pe_working_set_t *data_set);
+GList *pcmk__rscs_matching_id(const char *id,
+ const pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
-GList *pcmk__colocated_resources(const pe_resource_t *rsc,
- const pe_resource_t *orig_rsc,
+GList *pcmk__colocated_resources(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc,
GList *colocated_rscs);
G_GNUC_INTERNAL
-void pcmk__noop_add_graph_meta(const pe_resource_t *rsc, xmlNode *xml);
-
-G_GNUC_INTERNAL
-void pcmk__output_resource_actions(pe_resource_t *rsc);
+void pcmk__noop_add_graph_meta(const pcmk_resource_t *rsc, xmlNode *xml);
G_GNUC_INTERNAL
-bool pcmk__finalize_assignment(pe_resource_t *rsc, pe_node_t *chosen,
- bool force);
+void pcmk__output_resource_actions(pcmk_resource_t *rsc);
G_GNUC_INTERNAL
-bool pcmk__assign_resource(pe_resource_t *rsc, pe_node_t *node, bool force);
+bool pcmk__assign_resource(pcmk_resource_t *rsc, pcmk_node_t *node, bool force,
+ bool stop_if_fail);
G_GNUC_INTERNAL
-void pcmk__unassign_resource(pe_resource_t *rsc);
+void pcmk__unassign_resource(pcmk_resource_t *rsc);
G_GNUC_INTERNAL
-bool pcmk__threshold_reached(pe_resource_t *rsc, const pe_node_t *node,
- pe_resource_t **failed);
+bool pcmk__threshold_reached(pcmk_resource_t *rsc, const pcmk_node_t *node,
+ pcmk_resource_t **failed);
G_GNUC_INTERNAL
-void pcmk__sort_resources(pe_working_set_t *data_set);
+void pcmk__sort_resources(pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
gint pcmk__cmp_instance(gconstpointer a, gconstpointer b);
@@ -935,26 +1110,27 @@ gint pcmk__cmp_instance_number(gconstpointer a, gconstpointer b);
// Functions related to probes (pcmk_sched_probes.c)
G_GNUC_INTERNAL
-bool pcmk__probe_rsc_on_node(pe_resource_t *rsc, pe_node_t *node);
+bool pcmk__probe_rsc_on_node(pcmk_resource_t *rsc, pcmk_node_t *node);
G_GNUC_INTERNAL
-void pcmk__order_probes(pe_working_set_t *data_set);
+void pcmk__order_probes(pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
-bool pcmk__probe_resource_list(GList *rscs, pe_node_t *node);
+bool pcmk__probe_resource_list(GList *rscs, pcmk_node_t *node);
G_GNUC_INTERNAL
-void pcmk__schedule_probes(pe_working_set_t *data_set);
+void pcmk__schedule_probes(pcmk_scheduler_t *scheduler);
// Functions related to live migration (pcmk_sched_migration.c)
-void pcmk__create_migration_actions(pe_resource_t *rsc,
- const pe_node_t *current);
+void pcmk__create_migration_actions(pcmk_resource_t *rsc,
+ const pcmk_node_t *current);
void pcmk__abort_dangling_migration(void *data, void *user_data);
-bool pcmk__rsc_can_migrate(const pe_resource_t *rsc, const pe_node_t *current);
+bool pcmk__rsc_can_migrate(const pcmk_resource_t *rsc,
+ const pcmk_node_t *current);
void pcmk__order_migration_equivalents(pe__ordering_t *order);
@@ -962,25 +1138,25 @@ void pcmk__order_migration_equivalents(pe__ordering_t *order);
// Functions related to node utilization (pcmk_sched_utilization.c)
G_GNUC_INTERNAL
-int pcmk__compare_node_capacities(const pe_node_t *node1,
- const pe_node_t *node2);
+int pcmk__compare_node_capacities(const pcmk_node_t *node1,
+ const pcmk_node_t *node2);
G_GNUC_INTERNAL
void pcmk__consume_node_capacity(GHashTable *current_utilization,
- const pe_resource_t *rsc);
+ const pcmk_resource_t *rsc);
G_GNUC_INTERNAL
void pcmk__release_node_capacity(GHashTable *current_utilization,
- const pe_resource_t *rsc);
+ const pcmk_resource_t *rsc);
G_GNUC_INTERNAL
-const pe_node_t *pcmk__ban_insufficient_capacity(pe_resource_t *rsc);
+const pcmk_node_t *pcmk__ban_insufficient_capacity(pcmk_resource_t *rsc);
G_GNUC_INTERNAL
-void pcmk__create_utilization_constraints(pe_resource_t *rsc,
+void pcmk__create_utilization_constraints(pcmk_resource_t *rsc,
const GList *allowed_nodes);
G_GNUC_INTERNAL
-void pcmk__show_node_capacities(const char *desc, pe_working_set_t *data_set);
+void pcmk__show_node_capacities(const char *desc, pcmk_scheduler_t *scheduler);
#endif // PCMK__LIBPACEMAKER_PRIVATE__H
diff --git a/lib/pacemaker/pcmk_acl.c b/lib/pacemaker/pcmk_acl.c
index c2072dc..85c461e 100644
--- a/lib/pacemaker/pcmk_acl.c
+++ b/lib/pacemaker/pcmk_acl.c
@@ -53,7 +53,10 @@ static const xmlChar *NS_DENIED = (const xmlChar *) ACL_NS_PREFIX "denied";
* \param[in,out] ns_recycle_denied
*/
static void
-pcmk__acl_mark_node_with_namespace(xmlNode *i_node, const xmlChar *ns, int *ret, xmlNs **ns_recycle_writable, xmlNs **ns_recycle_readable, xmlNs **ns_recycle_denied)
+pcmk__acl_mark_node_with_namespace(xmlNode *i_node, const xmlChar *ns, int *ret,
+ xmlNs **ns_recycle_writable,
+ xmlNs **ns_recycle_readable,
+ xmlNs **ns_recycle_denied)
{
if (ns == NS_WRITABLE)
{
@@ -88,10 +91,10 @@ pcmk__acl_mark_node_with_namespace(xmlNode *i_node, const xmlChar *ns, int *ret,
}
/*!
- * \brief This function takes some XML, and annotates it with XML
- * namespaces to indicate the ACL permissions.
+ * \brief Annotate a given XML element or property and its siblings with
+ * XML namespaces to indicate ACL permissions
*
- * \param[in,out] xml_modify
+ * \param[in,out] xml_modify XML to annotate
*
* \return A standard Pacemaker return code
* Namely:
@@ -104,7 +107,7 @@ pcmk__acl_mark_node_with_namespace(xmlNode *i_node, const xmlChar *ns, int *ret,
* \note This function is recursive
*/
static int
-pcmk__acl_annotate_permissions_recursive(xmlNode *xml_modify)
+annotate_with_siblings(xmlNode *xml_modify)
{
static xmlNs *ns_recycle_writable = NULL,
@@ -123,61 +126,74 @@ pcmk__acl_annotate_permissions_recursive(xmlNode *xml_modify)
for (i_node = xml_modify; i_node != NULL; i_node = i_node->next) {
switch (i_node->type) {
- case XML_ELEMENT_NODE:
- pcmk__set_xml_doc_flag(i_node, pcmk__xf_tracking);
-
- if (!pcmk__check_acl(i_node, NULL, pcmk__xf_acl_read)) {
- ns = NS_DENIED;
- } else if (!pcmk__check_acl(i_node, NULL, pcmk__xf_acl_write)) {
- ns = NS_READABLE;
- } else {
- ns = NS_WRITABLE;
- }
- pcmk__acl_mark_node_with_namespace(i_node, ns, &ret, &ns_recycle_writable, &ns_recycle_readable, &ns_recycle_denied);
- /* XXX recursion can be turned into plain iteration to save stack */
- if (i_node->properties != NULL) {
- /* this is not entirely clear, but relies on the very same
- class-hierarchy emulation that libxml2 has firmly baked in
- its API/ABI */
- ret |= pcmk__acl_annotate_permissions_recursive((xmlNodePtr) i_node->properties);
- }
- if (i_node->children != NULL) {
- ret |= pcmk__acl_annotate_permissions_recursive(i_node->children);
- }
- break;
- case XML_ATTRIBUTE_NODE:
- /* we can utilize that parent has already been assigned the ns */
- if (!pcmk__check_acl(i_node->parent,
- (const char *) i_node->name,
- pcmk__xf_acl_read)) {
- ns = NS_DENIED;
- } else if (!pcmk__check_acl(i_node,
- (const char *) i_node->name,
- pcmk__xf_acl_write)) {
- ns = NS_READABLE;
- } else {
- ns = NS_WRITABLE;
- }
- pcmk__acl_mark_node_with_namespace(i_node, ns, &ret, &ns_recycle_writable, &ns_recycle_readable, &ns_recycle_denied);
- break;
- case XML_COMMENT_NODE:
- /* we can utilize that parent has already been assigned the ns */
- if (!pcmk__check_acl(i_node->parent, (const char *) i_node->name, pcmk__xf_acl_read))
- {
- ns = NS_DENIED;
- }
- else if (!pcmk__check_acl(i_node->parent, (const char *) i_node->name, pcmk__xf_acl_write))
- {
- ns = NS_READABLE;
- }
- else
- {
- ns = NS_WRITABLE;
- }
- pcmk__acl_mark_node_with_namespace(i_node, ns, &ret, &ns_recycle_writable, &ns_recycle_readable, &ns_recycle_denied);
- break;
- default:
- break;
+ case XML_ELEMENT_NODE:
+ pcmk__set_xml_doc_flag(i_node, pcmk__xf_tracking);
+
+ if (!pcmk__check_acl(i_node, NULL, pcmk__xf_acl_read)) {
+ ns = NS_DENIED;
+ } else if (!pcmk__check_acl(i_node, NULL, pcmk__xf_acl_write)) {
+ ns = NS_READABLE;
+ } else {
+ ns = NS_WRITABLE;
+ }
+ pcmk__acl_mark_node_with_namespace(i_node, ns, &ret,
+ &ns_recycle_writable,
+ &ns_recycle_readable,
+ &ns_recycle_denied);
+ // @TODO Could replace recursion with iteration to save stack
+ if (i_node->properties != NULL) {
+ /* This is not entirely clear, but relies on the very same
+ * class-hierarchy emulation that libxml2 has firmly baked
+ * in its API/ABI
+ */
+ ret |= annotate_with_siblings((xmlNodePtr)
+ i_node->properties);
+ }
+ if (i_node->children != NULL) {
+ ret |= annotate_with_siblings(i_node->children);
+ }
+ break;
+
+ case XML_ATTRIBUTE_NODE:
+ // We can utilize that parent has already been assigned the ns
+ if (!pcmk__check_acl(i_node->parent,
+ (const char *) i_node->name,
+ pcmk__xf_acl_read)) {
+ ns = NS_DENIED;
+ } else if (!pcmk__check_acl(i_node,
+ (const char *) i_node->name,
+ pcmk__xf_acl_write)) {
+ ns = NS_READABLE;
+ } else {
+ ns = NS_WRITABLE;
+ }
+ pcmk__acl_mark_node_with_namespace(i_node, ns, &ret,
+ &ns_recycle_writable,
+ &ns_recycle_readable,
+ &ns_recycle_denied);
+ break;
+
+ case XML_COMMENT_NODE:
+ // We can utilize that parent has already been assigned the ns
+ if (!pcmk__check_acl(i_node->parent,
+ (const char *) i_node->name,
+ pcmk__xf_acl_read)) {
+ ns = NS_DENIED;
+ } else if (!pcmk__check_acl(i_node->parent,
+ (const char *) i_node->name,
+ pcmk__xf_acl_write)) {
+ ns = NS_READABLE;
+ } else {
+ ns = NS_WRITABLE;
+ }
+ pcmk__acl_mark_node_with_namespace(i_node, ns, &ret,
+ &ns_recycle_writable,
+ &ns_recycle_readable,
+ &ns_recycle_denied);
+ break;
+
+ default:
+ break;
}
}
@@ -222,10 +238,12 @@ pcmk__acl_annotate_permissions(const char *cred, const xmlDoc *cib_doc,
pcmk__enable_acl(target, target, cred);
- ret = pcmk__acl_annotate_permissions_recursive(target);
+ ret = annotate_with_siblings(target);
if (ret == pcmk_rc_ok) {
- char* credentials = crm_strdup_printf("ACLs as evaluated for user %s", cred);
+ char *credentials = crm_strdup_printf("ACLs as evaluated for user %s",
+ cred);
+
comment = xmlNewDocComment(target->doc, (pcmkXmlStr) credentials);
free(credentials);
if (comment == NULL) {
diff --git a/lib/pacemaker/pcmk_agents.c b/lib/pacemaker/pcmk_agents.c
new file mode 100644
index 0000000..6fec140
--- /dev/null
+++ b/lib/pacemaker/pcmk_agents.c
@@ -0,0 +1,243 @@
+/*
+ * Copyright 2023 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU Lesser General Public License
+ * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
+ */
+
+#include <crm_internal.h>
+
+#include <crm/lrmd_internal.h>
+#include <pacemaker.h>
+#include <pacemaker-internal.h>
+
+int
+pcmk__list_alternatives(pcmk__output_t *out, const char *agent_spec)
+{
+ int rc = pcmk_rc_ok;
+ lrmd_t *lrmd_conn = NULL;
+ lrmd_list_t *list = NULL;
+
+ CRM_ASSERT(out != NULL && agent_spec != NULL);
+
+ rc = lrmd__new(&lrmd_conn, NULL, NULL, 0);
+ if (rc != pcmk_rc_ok) {
+ goto error;
+ }
+
+ rc = lrmd_conn->cmds->list_ocf_providers(lrmd_conn, agent_spec, &list);
+
+ if (rc > 0) {
+ rc = out->message(out, "alternatives-list", list, agent_spec);
+ } else {
+ rc = pcmk_rc_error;
+ }
+
+error:
+ if (rc != pcmk_rc_ok) {
+ out->err(out, _("No %s found for %s"), "OCF providers", agent_spec);
+ rc = ENXIO;
+ }
+
+ lrmd_api_delete(lrmd_conn);
+ return rc;
+}
+
+// Documented in pacemaker.h
+int
+pcmk_list_alternatives(xmlNodePtr *xml, const char *agent_spec)
+{
+ pcmk__output_t *out = NULL;
+ int rc = pcmk_rc_ok;
+
+ rc = pcmk__xml_output_new(&out, xml);
+ if (rc != pcmk_rc_ok) {
+ return rc;
+ }
+
+ lrmd__register_messages(out);
+
+ rc = pcmk__list_alternatives(out, agent_spec);
+ pcmk__xml_output_finish(out, xml);
+ return rc;
+}
+
+/*!
+ * \internal
+ * \brief List all agents available for the named standard and/or provider
+ *
+ * \param[in,out] out Output object
+ * \param[in] agent_spec STD[:PROV]
+ *
+ * \return Standard Pacemaker return code
+ */
+int
+pcmk__list_agents(pcmk__output_t *out, char *agent_spec)
+{
+ int rc = pcmk_rc_ok;
+ char *provider = NULL;
+ lrmd_t *lrmd_conn = NULL;
+ lrmd_list_t *list = NULL;
+
+ CRM_ASSERT(out != NULL && agent_spec != NULL);
+
+ rc = lrmd__new(&lrmd_conn, NULL, NULL, 0);
+ if (rc != pcmk_rc_ok) {
+ goto error;
+ }
+
+ provider = strchr(agent_spec, ':');
+
+ if (provider) {
+ *provider++ = 0;
+ }
+
+ rc = lrmd_conn->cmds->list_agents(lrmd_conn, &list, agent_spec, provider);
+
+ if (rc > 0) {
+ rc = out->message(out, "agents-list", list, agent_spec, provider);
+ } else {
+ rc = pcmk_rc_error;
+ }
+
+error:
+ if (rc != pcmk_rc_ok) {
+ if (provider == NULL) {
+ out->err(out, _("No agents found for standard '%s'"), agent_spec);
+ } else {
+ out->err(out, _("No agents found for standard '%s' and provider '%s'"),
+ agent_spec, provider);
+ }
+ }
+
+ lrmd_api_delete(lrmd_conn);
+ return rc;
+}
+
+// Documented in pacemaker.h
+int
+pcmk_list_agents(xmlNodePtr *xml, char *agent_spec)
+{
+ pcmk__output_t *out = NULL;
+ int rc = pcmk_rc_ok;
+
+ rc = pcmk__xml_output_new(&out, xml);
+ if (rc != pcmk_rc_ok) {
+ return rc;
+ }
+
+ lrmd__register_messages(out);
+
+ rc = pcmk__list_agents(out, agent_spec);
+ pcmk__xml_output_finish(out, xml);
+ return rc;
+}
+
+int
+pcmk__list_providers(pcmk__output_t *out, const char *agent_spec)
+{
+ int rc = pcmk_rc_ok;
+ lrmd_t *lrmd_conn = NULL;
+ lrmd_list_t *list = NULL;
+
+ CRM_ASSERT(out != NULL);
+
+ rc = lrmd__new(&lrmd_conn, NULL, NULL, 0);
+ if (rc != pcmk_rc_ok) {
+ goto error;
+ }
+
+ rc = lrmd_conn->cmds->list_ocf_providers(lrmd_conn, agent_spec, &list);
+
+ if (rc > 0) {
+ rc = out->message(out, "providers-list", list, agent_spec);
+ } else {
+ rc = pcmk_rc_error;
+ }
+
+error:
+ if (rc != pcmk_rc_ok) {
+ if (agent_spec == NULL) {
+ out->err(out, _("No %s found"), "OCF providers");
+ } else {
+ out->err(out, _("No %s found for %s"), "OCF providers", agent_spec);
+ }
+
+ rc = ENXIO;
+ }
+
+ lrmd_api_delete(lrmd_conn);
+ return rc;
+}
+
+// Documented in pacemaker.h
+int
+pcmk_list_providers(xmlNodePtr *xml, const char *agent_spec)
+{
+ pcmk__output_t *out = NULL;
+ int rc = pcmk_rc_ok;
+
+ rc = pcmk__xml_output_new(&out, xml);
+ if (rc != pcmk_rc_ok) {
+ return rc;
+ }
+
+ lrmd__register_messages(out);
+
+ rc = pcmk__list_providers(out, agent_spec);
+ pcmk__xml_output_finish(out, xml);
+ return rc;
+}
+
+int
+pcmk__list_standards(pcmk__output_t *out)
+{
+ int rc = pcmk_rc_ok;
+ lrmd_t *lrmd_conn = NULL;
+ lrmd_list_t *list = NULL;
+
+ CRM_ASSERT(out != NULL);
+
+ rc = lrmd__new(&lrmd_conn, NULL, NULL, 0);
+ if (rc != pcmk_rc_ok) {
+ goto error;
+ }
+
+ rc = lrmd_conn->cmds->list_standards(lrmd_conn, &list);
+
+ if (rc > 0) {
+ rc = out->message(out, "standards-list", list);
+ } else {
+ rc = pcmk_rc_error;
+ }
+
+error:
+ if (rc != pcmk_rc_ok) {
+ out->err(out, _("No %s found"), "standards");
+ rc = ENXIO;
+ }
+
+ lrmd_api_delete(lrmd_conn);
+ return rc;
+}
+
+// Documented in pacemaker.h
+int
+pcmk_list_standards(xmlNodePtr *xml)
+{
+ pcmk__output_t *out = NULL;
+ int rc = pcmk_rc_ok;
+
+ rc = pcmk__xml_output_new(&out, xml);
+ if (rc != pcmk_rc_ok) {
+ return rc;
+ }
+
+ lrmd__register_messages(out);
+
+ rc = pcmk__list_standards(out);
+ pcmk__xml_output_finish(out, xml);
+ return rc;
+}
diff --git a/lib/pacemaker/pcmk_cluster_queries.c b/lib/pacemaker/pcmk_cluster_queries.c
index 6002cd4..6a12c45 100644
--- a/lib/pacemaker/pcmk_cluster_queries.c
+++ b/lib/pacemaker/pcmk_cluster_queries.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2020-2022 the Pacemaker project contributors
+ * Copyright 2020-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -9,7 +9,6 @@
#include <crm_internal.h>
-#include <glib.h> // gboolean, GMainLoop, etc.
#include <libxml/tree.h> // xmlNode
#include <pacemaker.h>
@@ -362,8 +361,7 @@ ipc_connect(data_t *data, enum pcmk_ipc_server server, pcmk_ipc_callback_t cb,
pcmk_register_ipc_callback(api, cb, data);
}
- rc = pcmk_connect_ipc(api, dispatch_type);
-
+ rc = pcmk__connect_ipc(api, dispatch_type, 5);
if (rc != pcmk_rc_ok) {
if (rc == EREMOTEIO) {
data->pcmkd_state = pcmk_pacemakerd_state_remote;
@@ -371,6 +369,9 @@ ipc_connect(data_t *data, enum pcmk_ipc_server server, pcmk_ipc_callback_t cb,
/* EREMOTEIO may be expected and acceptable for some callers
* on a Pacemaker Remote node
*/
+ crm_debug("Ignoring %s connection failure: No "
+ "Pacemaker Remote connection",
+ pcmk_ipc_name(api, true));
rc = pcmk_rc_ok;
} else {
out->err(out, "error: Could not connect to %s: %s",
@@ -402,7 +403,7 @@ poll_until_reply(data_t *data, pcmk_ipc_api_t *api, const char *on_node)
pcmk__output_t *out = data->out;
uint64_t start_nsec = qb_util_nano_current_get();
- uint64_t end_nsec = start_nsec;
+ uint64_t end_nsec = 0;
uint64_t elapsed_ms = 0;
uint64_t remaining_ms = data->message_timeout_ms;
@@ -806,7 +807,7 @@ struct node_data {
int found;
const char *field; /* XML attribute to check for node name */
const char *type;
- gboolean bash_export;
+ bool bash_export;
};
static void
@@ -819,16 +820,13 @@ remote_node_print_helper(xmlNode *result, void *user_data)
// node name and node id are the same for remote/guest nodes
out->message(out, "crmadmin-node", data->type,
- name ? name : id,
- id,
- data->bash_export);
+ pcmk__s(name, id), id, data->bash_export);
data->found++;
}
// \return Standard Pacemaker return code
int
-pcmk__list_nodes(pcmk__output_t *out, const char *node_types,
- gboolean bash_export)
+pcmk__list_nodes(pcmk__output_t *out, const char *node_types, bool bash_export)
{
xmlNode *xml_node = NULL;
int rc;
@@ -862,7 +860,8 @@ pcmk__list_nodes(pcmk__output_t *out, const char *node_types,
remote_node_print_helper, &data);
}
- if (pcmk__str_empty(node_types) || !pcmk__strcmp(node_types, ",|^remote", pcmk__str_regex)) {
+ if (pcmk__str_empty(node_types)
+ || pcmk__str_eq(node_types, ",|^remote", pcmk__str_regex)) {
data.field = "id";
data.type = "remote";
crm_foreach_xpath_result(xml_node, PCMK__XP_REMOTE_NODE_CONFIG,
diff --git a/lib/pacemaker/pcmk_fence.c b/lib/pacemaker/pcmk_fence.c
index 7a0490f..9f86e46 100644
--- a/lib/pacemaker/pcmk_fence.c
+++ b/lib/pacemaker/pcmk_fence.c
@@ -95,11 +95,12 @@ reduce_fence_history(stonith_history_t *history)
for (np = new; ; np = np->next) {
if ((hp->state == st_done) || (hp->state == st_failed)) {
/* action not in progress */
- if (pcmk__str_eq(hp->target, np->target, pcmk__str_casei) &&
- pcmk__str_eq(hp->action, np->action, pcmk__str_none) &&
- (hp->state == np->state) &&
- ((hp->state == st_done) ||
- pcmk__str_eq(hp->delegate, np->delegate, pcmk__str_casei))) {
+ if (pcmk__str_eq(hp->target, np->target, pcmk__str_casei)
+ && pcmk__str_eq(hp->action, np->action, pcmk__str_none)
+ && (hp->state == np->state)
+ && ((hp->state == st_done)
+ || pcmk__str_eq(hp->delegate, np->delegate,
+ pcmk__str_casei))) {
/* purge older hp */
stonith_history_free(hp);
break;
@@ -146,6 +147,7 @@ async_fence_helper(gpointer user_data)
stonith_t *st = async_fence_data.st;
int call_id = 0;
int rc = stonith_api_connect_retry(st, async_fence_data.name, 10);
+ int timeout = 0;
if (rc != pcmk_ok) {
g_main_loop_quit(mainloop);
@@ -154,7 +156,8 @@ async_fence_helper(gpointer user_data)
return TRUE;
}
- st->cmds->register_notification(st, T_STONITH_NOTIFY_FENCE, notify_callback);
+ st->cmds->register_notification(st, T_STONITH_NOTIFY_FENCE,
+ notify_callback);
call_id = st->cmds->fence_with_delay(st,
st_opt_allow_suicide,
@@ -171,12 +174,12 @@ async_fence_helper(gpointer user_data)
return TRUE;
}
- st->cmds->register_callback(st,
- call_id,
- (async_fence_data.timeout/1000
- + (async_fence_data.delay > 0 ? async_fence_data.delay : 0)),
- st_opt_timeout_updates, NULL, "callback", fence_callback);
-
+ timeout = async_fence_data.timeout / 1000;
+ if (async_fence_data.delay > 0) {
+ timeout += async_fence_data.delay;
+ }
+ st->cmds->register_callback(st, call_id, timeout, st_opt_timeout_updates,
+ NULL, "callback", fence_callback);
return TRUE;
}
@@ -251,9 +254,10 @@ pcmk__fence_history(pcmk__output_t *out, stonith_t *st, const char *target,
if (broadcast) {
stonith__set_call_options(opts, target, st_opt_broadcast);
}
- rc = st->cmds->history(st, opts,
- pcmk__str_eq(target, "*", pcmk__str_none)? NULL : target,
- &history, timeout/1000);
+ if (pcmk__str_eq(target, "*", pcmk__str_none)) {
+ target = NULL;
+ }
+ rc = st->cmds->history(st, opts, target, &history, (timeout / 1000));
if (cleanup) {
// Cleanup doesn't return a history list
@@ -314,7 +318,8 @@ pcmk_fence_history(xmlNodePtr *xml, stonith_t *st, const char *target,
out->quiet = quiet;
- rc = pcmk__fence_history(out, st, target, timeout, verbose, broadcast, cleanup);
+ rc = pcmk__fence_history(out, st, target, timeout, verbose, broadcast,
+ cleanup);
pcmk__xml_output_finish(out, xml);
return rc;
}
@@ -326,15 +331,17 @@ pcmk__fence_installed(pcmk__output_t *out, stonith_t *st, unsigned int timeout)
stonith_key_value_t *devices = NULL;
int rc = pcmk_rc_ok;
- rc = st->cmds->list_agents(st, st_opt_sync_call, NULL, &devices, timeout/1000);
- /* list_agents returns a negative error code or a positive number of agents. */
+ rc = st->cmds->list_agents(st, st_opt_sync_call, NULL, &devices,
+ (timeout / 1000));
+ // rc is a negative error code or a positive number of agents
if (rc < 0) {
return pcmk_legacy2rc(rc);
}
- out->begin_list(out, "fence device", "fence devices", "Installed fence devices");
- for (stonith_key_value_t *dIter = devices; dIter; dIter = dIter->next) {
- out->list_item(out, "device", "%s", dIter->value);
+ out->begin_list(out, "fence device", "fence devices",
+ "Installed fence devices");
+ for (stonith_key_value_t *iter = devices; iter != NULL; iter = iter->next) {
+ out->list_item(out, "device", "%s", iter->value);
}
out->end_list(out);
@@ -498,9 +505,10 @@ pcmk__fence_registered(pcmk__output_t *out, stonith_t *st, const char *target,
return pcmk_legacy2rc(rc);
}
- out->begin_list(out, "fence device", "fence devices", "Registered fence devices");
- for (stonith_key_value_t *dIter = devices; dIter; dIter = dIter->next) {
- out->list_item(out, "device", "%s", dIter->value);
+ out->begin_list(out, "fence device", "fence devices",
+ "Registered fence devices");
+ for (stonith_key_value_t *iter = devices; iter != NULL; iter = iter->next) {
+ out->list_item(out, "device", "%s", iter->value);
}
out->end_list(out);
@@ -609,7 +617,8 @@ pcmk__get_fencing_history(stonith_t *st, stonith_history_t **stonith_history,
if ((st == NULL) || (st->state == stonith_disconnected)) {
rc = ENOTCONN;
} else if (fence_history != pcmk__fence_history_none) {
- rc = st->cmds->history(st, st_opt_sync_call, NULL, stonith_history, 120);
+ rc = st->cmds->history(st, st_opt_sync_call, NULL, stonith_history,
+ 120);
rc = pcmk_legacy2rc(rc);
if (rc != pcmk_rc_ok) {
diff --git a/lib/pacemaker/pcmk_graph_consumer.c b/lib/pacemaker/pcmk_graph_consumer.c
index f2f172e..0daa00d 100644
--- a/lib/pacemaker/pcmk_graph_consumer.c
+++ b/lib/pacemaker/pcmk_graph_consumer.c
@@ -47,7 +47,10 @@ update_synapse_ready(pcmk__graph_synapse_t *synapse, int action_id)
if (pcmk_is_set(synapse->flags, pcmk__synapse_ready)) {
return; // All inputs have already been confirmed
}
- pcmk__set_synapse_flags(synapse, pcmk__synapse_ready); // Presume ready until proven otherwise
+
+ // Presume ready until proven otherwise
+ pcmk__set_synapse_flags(synapse, pcmk__synapse_ready);
+
for (GList *lpc = synapse->inputs; lpc != NULL; lpc = lpc->next) {
pcmk__graph_action_t *prereq = (pcmk__graph_action_t *) lpc->data;
@@ -56,7 +59,7 @@ update_synapse_ready(pcmk__graph_synapse_t *synapse, int action_id)
action_id, synapse->id);
pcmk__set_graph_action_flags(prereq, pcmk__graph_action_confirmed);
- } else if (!(pcmk_is_set(prereq->flags, pcmk__graph_action_confirmed))) {
+ } else if (!pcmk_is_set(prereq->flags, pcmk__graph_action_confirmed)) {
pcmk__clear_synapse_flags(synapse, pcmk__synapse_ready);
crm_trace("Synapse %d still not ready after action %d",
synapse->id, action_id);
@@ -87,14 +90,16 @@ update_synapse_confirmed(pcmk__graph_synapse_t *synapse, int action_id)
action_id, synapse->id);
pcmk__set_graph_action_flags(action, pcmk__graph_action_confirmed);
- } else if (all_confirmed && !(pcmk_is_set(action->flags, pcmk__graph_action_confirmed))) {
+ } else if (all_confirmed &&
+ !pcmk_is_set(action->flags, pcmk__graph_action_confirmed)) {
all_confirmed = false;
crm_trace("Synapse %d still not confirmed after action %d",
synapse->id, action_id);
}
}
- if (all_confirmed && !(pcmk_is_set(synapse->flags, pcmk__synapse_confirmed))) {
+ if (all_confirmed
+ && !pcmk_is_set(synapse->flags, pcmk__synapse_confirmed)) {
crm_trace("Confirmed synapse %d", synapse->id);
pcmk__set_synapse_flags(synapse, pcmk__synapse_confirmed);
}
@@ -113,13 +118,15 @@ pcmk__update_graph(pcmk__graph_t *graph, const pcmk__graph_action_t *action)
for (GList *lpc = graph->synapses; lpc != NULL; lpc = lpc->next) {
pcmk__graph_synapse_t *synapse = (pcmk__graph_synapse_t *) lpc->data;
- if (pcmk_any_flags_set(synapse->flags, pcmk__synapse_confirmed|pcmk__synapse_failed)) {
+ if (pcmk_any_flags_set(synapse->flags,
+ pcmk__synapse_confirmed|pcmk__synapse_failed)) {
continue; // This synapse already completed
} else if (pcmk_is_set(synapse->flags, pcmk__synapse_executed)) {
update_synapse_confirmed(synapse, action->id);
- } else if (!(pcmk_is_set(action->flags, pcmk__graph_action_failed)) || (synapse->priority == INFINITY)) {
+ } else if (!pcmk_is_set(action->flags, pcmk__graph_action_failed)
+ || (synapse->priority == INFINITY)) {
update_synapse_ready(synapse, action->id);
}
}
@@ -179,7 +186,9 @@ should_fire_synapse(pcmk__graph_t *graph, pcmk__graph_synapse_t *synapse)
pcmk__clear_synapse_flags(synapse, pcmk__synapse_ready);
break;
- } else if (pcmk_is_set(prereq->flags, pcmk__graph_action_failed) && !(pcmk_is_set(prereq->flags, pcmk__graph_action_can_fail))) {
+ } else if (pcmk_is_set(prereq->flags, pcmk__graph_action_failed)
+ && !pcmk_is_set(prereq->flags,
+ pcmk__graph_action_can_fail)) {
crm_trace("Input %d for synapse %d confirmed but failed",
prereq->id, synapse->id);
pcmk__clear_synapse_flags(synapse, pcmk__synapse_ready);
@@ -244,7 +253,7 @@ initiate_action(pcmk__graph_t *graph, pcmk__graph_action_t *action)
case pcmk__cluster_graph_action:
if (pcmk__str_eq(crm_element_value(action->xml, XML_LRM_ATTR_TASK),
- CRM_OP_FENCE, pcmk__str_casei)) {
+ PCMK_ACTION_STONITH, pcmk__str_none)) {
crm_trace("Executing fencing action %d (%s)",
action->id, id);
return graph_fns->fence(graph, action);
@@ -255,7 +264,7 @@ initiate_action(pcmk__graph_t *graph, pcmk__graph_action_t *action)
default:
crm_err("Unsupported graph action type <%s " XML_ATTR_ID "='%s'> "
"(bug?)",
- crm_element_name(action->xml), id);
+ action->xml->name, id);
return EINVAL;
}
}
@@ -280,7 +289,7 @@ fire_synapse(pcmk__graph_t *graph, pcmk__graph_synapse_t *synapse)
if (rc != pcmk_rc_ok) {
crm_err("Failed initiating <%s " XML_ATTR_ID "=%d> in synapse %d: "
"%s",
- crm_element_name(action->xml), action->id, synapse->id,
+ action->xml->name, action->id, synapse->id,
pcmk_rc_str(rc));
pcmk__set_synapse_flags(synapse, pcmk__synapse_confirmed);
pcmk__set_graph_action_flags(action,
@@ -374,7 +383,8 @@ pcmk__execute_graph(pcmk__graph_t *graph)
if (pcmk_is_set(synapse->flags, pcmk__synapse_confirmed)) {
graph->completed++;
- } else if (!(pcmk_is_set(synapse->flags, pcmk__synapse_failed)) && pcmk_is_set(synapse->flags, pcmk__synapse_executed)) {
+ } else if (!pcmk_is_set(synapse->flags, pcmk__synapse_failed)
+ && pcmk_is_set(synapse->flags, pcmk__synapse_executed)) {
graph->pending++;
}
}
@@ -396,7 +406,9 @@ pcmk__execute_graph(pcmk__graph_t *graph)
graph->skipped++;
continue;
- } else if (pcmk_any_flags_set(synapse->flags, pcmk__synapse_confirmed|pcmk__synapse_executed)) {
+ } else if (pcmk_any_flags_set(synapse->flags,
+ pcmk__synapse_confirmed
+ |pcmk__synapse_executed)) {
continue; // Already handled
} else if (should_fire_synapse(graph, synapse)) {
@@ -470,7 +482,6 @@ unpack_action(pcmk__graph_synapse_t *parent, xmlNode *xml_action)
{
enum pcmk__graph_action_type action_type;
pcmk__graph_action_t *action = NULL;
- const char *element = TYPE(xml_action);
const char *value = ID(xml_action);
if (value == NULL) {
@@ -479,20 +490,18 @@ unpack_action(pcmk__graph_synapse_t *parent, xmlNode *xml_action)
return NULL;
}
- if (pcmk__str_eq(element, XML_GRAPH_TAG_RSC_OP, pcmk__str_casei)) {
+ if (pcmk__xe_is(xml_action, XML_GRAPH_TAG_RSC_OP)) {
action_type = pcmk__rsc_graph_action;
- } else if (pcmk__str_eq(element, XML_GRAPH_TAG_PSEUDO_EVENT,
- pcmk__str_casei)) {
+ } else if (pcmk__xe_is(xml_action, XML_GRAPH_TAG_PSEUDO_EVENT)) {
action_type = pcmk__pseudo_graph_action;
- } else if (pcmk__str_eq(element, XML_GRAPH_TAG_CRM_EVENT,
- pcmk__str_casei)) {
+ } else if (pcmk__xe_is(xml_action, XML_GRAPH_TAG_CRM_EVENT)) {
action_type = pcmk__cluster_graph_action;
} else {
crm_err("Ignoring transition graph action of unknown type '%s' (bug?)",
- element);
+ xml_action->name);
crm_log_xml_trace(xml_action, "invalid");
return NULL;
}
@@ -531,10 +540,9 @@ unpack_action(pcmk__graph_synapse_t *parent, xmlNode *xml_action)
value = g_hash_table_lookup(action->params, "CRM_meta_can_fail");
if (value != NULL) {
+ int can_fail = 0;
- gboolean can_fail = FALSE;
- crm_str_to_boolean(value, &can_fail);
- if (can_fail) {
+ if ((crm_str_to_boolean(value, &can_fail) > 0) && (can_fail > 0)) {
pcmk__set_graph_action_flags(action, pcmk__graph_action_can_fail);
} else {
pcmk__clear_graph_action_flags(action, pcmk__graph_action_can_fail);
diff --git a/lib/pacemaker/pcmk_graph_logging.c b/lib/pacemaker/pcmk_graph_logging.c
index b922a3e..f6fc179 100644
--- a/lib/pacemaker/pcmk_graph_logging.c
+++ b/lib/pacemaker/pcmk_graph_logging.c
@@ -68,18 +68,15 @@ find_graph_action_by_id(const pcmk__graph_t *graph, int id)
return NULL;
}
- for (const GList *sIter = graph->synapses; sIter != NULL;
- sIter = sIter->next) {
+ for (const GList *synapse_iter = graph->synapses;
+ synapse_iter != NULL; synapse_iter = synapse_iter->next) {
- const pcmk__graph_synapse_t *synapse = NULL;
+ const pcmk__graph_synapse_t *synapse = synapse_iter->data;
- synapse = (const pcmk__graph_synapse_t *) sIter->data;
- for (const GList *aIter = synapse->actions; aIter != NULL;
- aIter = aIter->next) {
+ for (const GList *action_iter = synapse->actions;
+ action_iter != NULL; action_iter = action_iter->next) {
- const pcmk__graph_action_t *action = NULL;
-
- action = (const pcmk__graph_action_t *) aIter->data;
+ const pcmk__graph_action_t *action = action_iter->data;
if (action->id == id) {
return action;
}
diff --git a/lib/pacemaker/pcmk_graph_producer.c b/lib/pacemaker/pcmk_graph_producer.c
index 5484e8b..59b6176 100644
--- a/lib/pacemaker/pcmk_graph_producer.c
+++ b/lib/pacemaker/pcmk_graph_producer.c
@@ -24,13 +24,13 @@
// Convenience macros for logging action properties
#define action_type_str(flags) \
- (pcmk_is_set((flags), pe_action_pseudo)? "pseudo-action" : "action")
+ (pcmk_is_set((flags), pcmk_action_pseudo)? "pseudo-action" : "action")
#define action_optional_str(flags) \
- (pcmk_is_set((flags), pe_action_optional)? "optional" : "required")
+ (pcmk_is_set((flags), pcmk_action_optional)? "optional" : "required")
#define action_runnable_str(flags) \
- (pcmk_is_set((flags), pe_action_runnable)? "runnable" : "unrunnable")
+ (pcmk_is_set((flags), pcmk_action_runnable)? "runnable" : "unrunnable")
#define action_node_str(a) \
(((a)->node == NULL)? "no node" : (a)->node->details->uname)
@@ -61,46 +61,48 @@ add_node_to_xml_by_id(const char *id, xmlNode *xml)
* \param[in,out] xml XML to add node to
*/
static void
-add_node_to_xml(const pe_node_t *node, void *xml)
+add_node_to_xml(const pcmk_node_t *node, void *xml)
{
add_node_to_xml_by_id(node->details->id, (xmlNode *) xml);
}
/*!
* \internal
- * \brief Add XML with nodes that need an update of their maintenance state
+ * \brief Count (optionally add to XML) nodes needing maintenance state update
*
- * \param[in,out] xml Parent XML tag to add to
- * \param[in] data_set Working set for cluster
+ * \param[in,out] xml Parent XML tag to add to, if any
+ * \param[in] scheduler Scheduler data
+ *
+ * \return Count of nodes added
+ * \note Only Pacemaker Remote nodes are considered currently
*/
static int
-add_maintenance_nodes(xmlNode *xml, const pe_working_set_t *data_set)
+add_maintenance_nodes(xmlNode *xml, const pcmk_scheduler_t *scheduler)
{
- GList *gIter = NULL;
- xmlNode *maintenance =
- xml?create_xml_node(xml, XML_GRAPH_TAG_MAINTENANCE):NULL;
+ xmlNode *maintenance = NULL;
int count = 0;
- for (gIter = data_set->nodes; gIter != NULL;
- gIter = gIter->next) {
- pe_node_t *node = (pe_node_t *) gIter->data;
- struct pe_node_shared_s *details = node->details;
+ if (xml != NULL) {
+ maintenance = create_xml_node(xml, XML_GRAPH_TAG_MAINTENANCE);
+ }
+ for (const GList *iter = scheduler->nodes;
+ iter != NULL; iter = iter->next) {
+ const pcmk_node_t *node = iter->data;
- if (!pe__is_guest_or_remote_node(node)) {
- continue; /* just remote nodes need to know atm */
- }
+ if (pe__is_guest_or_remote_node(node) &&
+ (node->details->maintenance != node->details->remote_maintenance)) {
- if (details->maintenance != details->remote_maintenance) {
- if (maintenance) {
- crm_xml_add(
- add_node_to_xml_by_id(node->details->id, maintenance),
- XML_NODE_IS_MAINTENANCE, details->maintenance?"1":"0");
+ if (maintenance != NULL) {
+ crm_xml_add(add_node_to_xml_by_id(node->details->id,
+ maintenance),
+ XML_NODE_IS_MAINTENANCE,
+ (node->details->maintenance? "1" : "0"));
}
count++;
}
}
- crm_trace("%s %d nodes to adjust maintenance-mode "
- "to transition", maintenance?"Added":"Counted", count);
+ crm_trace("%s %d nodes in need of maintenance mode update in state",
+ ((maintenance == NULL)? "Counted" : "Added"), count);
return count;
}
@@ -108,17 +110,16 @@ add_maintenance_nodes(xmlNode *xml, const pe_working_set_t *data_set)
* \internal
* \brief Add pseudo action with nodes needing maintenance state update
*
- * \param[in,out] data_set Working set for cluster
+ * \param[in,out] scheduler Scheduler data
*/
static void
-add_maintenance_update(pe_working_set_t *data_set)
+add_maintenance_update(pcmk_scheduler_t *scheduler)
{
- pe_action_t *action = NULL;
+ pcmk_action_t *action = NULL;
- if (add_maintenance_nodes(NULL, data_set)) {
- crm_trace("adding maintenance state update pseudo action");
- action = get_pseudo_op(CRM_OP_MAINTENANCE_NODES, data_set);
- pe__set_action_flags(action, pe_action_print_always);
+ if (add_maintenance_nodes(NULL, scheduler) != 0) {
+ action = get_pseudo_op(PCMK_ACTION_MAINTENANCE_NODES, scheduler);
+ pe__set_action_flags(action, pcmk_action_always_in_graph);
}
}
@@ -132,21 +133,21 @@ add_maintenance_update(pe_working_set_t *data_set)
*
* \param[in,out] xml Parent XML tag to add to
* \param[in] action Action to check for downed nodes
- * \param[in] data_set Working set for cluster
*/
static void
-add_downed_nodes(xmlNode *xml, const pe_action_t *action,
- const pe_working_set_t *data_set)
+add_downed_nodes(xmlNode *xml, const pcmk_action_t *action)
{
- CRM_CHECK(xml && action && action->node && data_set, return);
+ CRM_CHECK((xml != NULL) && (action != NULL) && (action->node != NULL),
+ return);
- if (pcmk__str_eq(action->task, CRM_OP_SHUTDOWN, pcmk__str_casei)) {
+ if (pcmk__str_eq(action->task, PCMK_ACTION_DO_SHUTDOWN, pcmk__str_none)) {
/* Shutdown makes the action's node down */
xmlNode *downed = create_xml_node(xml, XML_GRAPH_TAG_DOWNED);
add_node_to_xml_by_id(action->node->details->id, downed);
- } else if (pcmk__str_eq(action->task, CRM_OP_FENCE, pcmk__str_casei)) {
+ } else if (pcmk__str_eq(action->task, PCMK_ACTION_STONITH,
+ pcmk__str_none)) {
/* Fencing makes the action's node and any hosted guest nodes down */
const char *fence = g_hash_table_lookup(action->meta, "stonith_action");
@@ -154,24 +155,28 @@ add_downed_nodes(xmlNode *xml, const pe_action_t *action,
if (pcmk__is_fencing_action(fence)) {
xmlNode *downed = create_xml_node(xml, XML_GRAPH_TAG_DOWNED);
add_node_to_xml_by_id(action->node->details->id, downed);
- pe_foreach_guest_node(data_set, action->node, add_node_to_xml, downed);
+ pe_foreach_guest_node(action->node->details->data_set,
+ action->node, add_node_to_xml, downed);
}
} else if (action->rsc && action->rsc->is_remote_node
- && pcmk__str_eq(action->task, CRMD_ACTION_STOP, pcmk__str_casei)) {
+ && pcmk__str_eq(action->task, PCMK_ACTION_STOP,
+ pcmk__str_none)) {
/* Stopping a remote connection resource makes connected node down,
* unless it's part of a migration
*/
GList *iter;
- pe_action_t *input;
- gboolean migrating = FALSE;
+ pcmk_action_t *input;
+ bool migrating = false;
for (iter = action->actions_before; iter != NULL; iter = iter->next) {
- input = ((pe_action_wrapper_t *) iter->data)->action;
- if (input->rsc && pcmk__str_eq(action->rsc->id, input->rsc->id, pcmk__str_casei)
- && pcmk__str_eq(input->task, CRMD_ACTION_MIGRATED, pcmk__str_casei)) {
- migrating = TRUE;
+ input = ((pcmk__related_action_t *) iter->data)->action;
+ if ((input->rsc != NULL)
+ && pcmk__str_eq(action->rsc->id, input->rsc->id, pcmk__str_none)
+ && pcmk__str_eq(input->task, PCMK_ACTION_MIGRATE_FROM,
+ pcmk__str_none)) {
+ migrating = true;
break;
}
}
@@ -192,9 +197,9 @@ add_downed_nodes(xmlNode *xml, const pe_action_t *action,
* \return Newly allocated string with transition graph operation key
*/
static char *
-clone_op_key(const pe_action_t *action, guint interval_ms)
+clone_op_key(const pcmk_action_t *action, guint interval_ms)
{
- if (pcmk__str_eq(action->task, RSC_NOTIFY, pcmk__str_none)) {
+ if (pcmk__str_eq(action->task, PCMK_ACTION_NOTIFY, pcmk__str_none)) {
const char *n_type = g_hash_table_lookup(action->meta, "notify_type");
const char *n_task = g_hash_table_lookup(action->meta,
"notify_operation");
@@ -218,9 +223,9 @@ clone_op_key(const pe_action_t *action, guint interval_ms)
* \param[in,out] xml Transition graph action XML for \p action
*/
static void
-add_node_details(const pe_action_t *action, xmlNode *xml)
+add_node_details(const pcmk_action_t *action, xmlNode *xml)
{
- pe_node_t *router_node = pcmk__connection_host_for_action(action);
+ pcmk_node_t *router_node = pcmk__connection_host_for_action(action);
crm_xml_add(xml, XML_LRM_ATTR_TARGET, action->node->details->uname);
crm_xml_add(xml, XML_LRM_ATTR_TARGET_UUID, action->node->details->id);
@@ -237,7 +242,7 @@ add_node_details(const pe_action_t *action, xmlNode *xml)
* \param[in,out] action_xml Transition graph action XML for \p action
*/
static void
-add_resource_details(const pe_action_t *action, xmlNode *action_xml)
+add_resource_details(const pcmk_action_t *action, xmlNode *action_xml)
{
xmlNode *rsc_xml = NULL;
const char *attr_list[] = {
@@ -256,8 +261,9 @@ add_resource_details(const pe_action_t *action, xmlNode *action_xml)
// List affected resource
- rsc_xml = create_xml_node(action_xml, crm_element_name(action->rsc->xml));
- if (pcmk_is_set(action->rsc->flags, pe_rsc_orphan)
+ rsc_xml = create_xml_node(action_xml,
+ (const char *) action->rsc->xml->name);
+ if (pcmk_is_set(action->rsc->flags, pcmk_rsc_removed)
&& (action->rsc->clone_name != NULL)) {
/* Use the numbered instance name here, because if there is more
* than one instance on a node, we need to make sure the command
@@ -272,7 +278,7 @@ add_resource_details(const pe_action_t *action, xmlNode *action_xml)
crm_xml_add(rsc_xml, XML_ATTR_ID, action->rsc->clone_name);
crm_xml_add(rsc_xml, XML_ATTR_ID_LONG, action->rsc->id);
- } else if (!pcmk_is_set(action->rsc->flags, pe_rsc_unique)) {
+ } else if (!pcmk_is_set(action->rsc->flags, pcmk_rsc_unique)) {
const char *xml_id = ID(action->rsc->xml);
crm_debug("Using anonymous clone name %s for %s (aka %s)",
@@ -319,7 +325,7 @@ add_resource_details(const pe_action_t *action, xmlNode *action_xml)
* \param[in,out] action_xml Transition graph action XML for \p action
*/
static void
-add_action_attributes(pe_action_t *action, xmlNode *action_xml)
+add_action_attributes(pcmk_action_t *action, xmlNode *action_xml)
{
xmlNode *args_xml = NULL;
@@ -341,7 +347,8 @@ add_action_attributes(pe_action_t *action, xmlNode *action_xml)
g_hash_table_foreach(params, hash2smartfield, args_xml);
- } else if ((action->rsc != NULL) && (action->rsc->variant <= pe_native)) {
+ } else if ((action->rsc != NULL)
+ && (action->rsc->variant <= pcmk_rsc_variant_primitive)) {
GHashTable *params = pe_rsc_params(action->rsc, NULL,
action->rsc->cluster);
@@ -350,7 +357,7 @@ add_action_attributes(pe_action_t *action, xmlNode *action_xml)
g_hash_table_foreach(action->meta, hash2metafield, args_xml);
if (action->rsc != NULL) {
- pe_resource_t *parent = action->rsc;
+ pcmk_resource_t *parent = action->rsc;
while (parent != NULL) {
parent->cmds->add_graph_meta(parent, args_xml);
@@ -359,7 +366,7 @@ add_action_attributes(pe_action_t *action, xmlNode *action_xml)
pcmk__add_bundle_meta_to_xml(args_xml, action);
- } else if (pcmk__str_eq(action->task, CRM_OP_FENCE, pcmk__str_none)
+ } else if (pcmk__str_eq(action->task, PCMK_ACTION_STONITH, pcmk__str_none)
&& (action->node != NULL)) {
/* Pass the node's attributes as meta-attributes.
*
@@ -367,7 +374,8 @@ add_action_attributes(pe_action_t *action, xmlNode *action_xml)
* added in 33d99707, probably for the libfence-based implementation in
* c9a90bd, which is no longer used.
*/
- g_hash_table_foreach(action->node->details->attrs, hash2metafield, args_xml);
+ g_hash_table_foreach(action->node->details->attrs, hash2metafield,
+ args_xml);
}
sorted_xml(args_xml, action_xml, FALSE);
@@ -381,41 +389,43 @@ add_action_attributes(pe_action_t *action, xmlNode *action_xml)
* \param[in,out] parent Parent XML element to add action to
* \param[in,out] action Scheduled action
* \param[in] skip_details If false, add action details as sub-elements
- * \param[in] data_set Cluster working set
+ * \param[in] scheduler Scheduler data
*/
static void
-create_graph_action(xmlNode *parent, pe_action_t *action, bool skip_details,
- const pe_working_set_t *data_set)
+create_graph_action(xmlNode *parent, pcmk_action_t *action, bool skip_details,
+ const pcmk_scheduler_t *scheduler)
{
bool needs_node_info = true;
bool needs_maintenance_info = false;
xmlNode *action_xml = NULL;
- if ((action == NULL) || (data_set == NULL)) {
+ if ((action == NULL) || (scheduler == NULL)) {
return;
}
// Create the top-level element based on task
- if (pcmk__str_eq(action->task, CRM_OP_FENCE, pcmk__str_casei)) {
+ if (pcmk__str_eq(action->task, PCMK_ACTION_STONITH, pcmk__str_none)) {
/* All fences need node info; guest node fences are pseudo-events */
- action_xml = create_xml_node(parent,
- pcmk_is_set(action->flags, pe_action_pseudo)?
- XML_GRAPH_TAG_PSEUDO_EVENT :
- XML_GRAPH_TAG_CRM_EVENT);
+ if (pcmk_is_set(action->flags, pcmk_action_pseudo)) {
+ action_xml = create_xml_node(parent, XML_GRAPH_TAG_PSEUDO_EVENT);
+ } else {
+ action_xml = create_xml_node(parent, XML_GRAPH_TAG_CRM_EVENT);
+ }
} else if (pcmk__str_any_of(action->task,
- CRM_OP_SHUTDOWN,
- CRM_OP_CLEAR_FAILCOUNT, NULL)) {
+ PCMK_ACTION_DO_SHUTDOWN,
+ PCMK_ACTION_CLEAR_FAILCOUNT, NULL)) {
action_xml = create_xml_node(parent, XML_GRAPH_TAG_CRM_EVENT);
- } else if (pcmk__str_eq(action->task, CRM_OP_LRM_DELETE, pcmk__str_none)) {
+ } else if (pcmk__str_eq(action->task, PCMK_ACTION_LRM_DELETE,
+ pcmk__str_none)) {
// CIB-only clean-up for shutdown locks
action_xml = create_xml_node(parent, XML_GRAPH_TAG_CRM_EVENT);
crm_xml_add(action_xml, PCMK__XA_MODE, XML_TAG_CIB);
- } else if (pcmk_is_set(action->flags, pe_action_pseudo)) {
- if (pcmk__str_eq(action->task, CRM_OP_MAINTENANCE_NODES,
+ } else if (pcmk_is_set(action->flags, pcmk_action_pseudo)) {
+ if (pcmk__str_eq(action->task, PCMK_ACTION_MAINTENANCE_NODES,
pcmk__str_none)) {
needs_maintenance_info = true;
}
@@ -439,7 +449,8 @@ create_graph_action(xmlNode *parent, pe_action_t *action, bool skip_details,
}
clone_key = clone_op_key(action, interval_ms);
crm_xml_add(action_xml, XML_LRM_ATTR_TASK_KEY, clone_key);
- crm_xml_add(action_xml, "internal_" XML_LRM_ATTR_TASK_KEY, action->uuid);
+ crm_xml_add(action_xml, "internal_" XML_LRM_ATTR_TASK_KEY,
+ action->uuid);
free(clone_key);
} else {
crm_xml_add(action_xml, XML_LRM_ATTR_TASK_KEY, action->uuid);
@@ -458,7 +469,7 @@ create_graph_action(xmlNode *parent, pe_action_t *action, bool skip_details,
}
if ((action->rsc != NULL)
- && !pcmk_is_set(action->flags, pe_action_pseudo)) {
+ && !pcmk_is_set(action->flags, pcmk_action_pseudo)) {
// This is a real resource action, so add resource details
add_resource_details(action, action_xml);
@@ -469,11 +480,11 @@ create_graph_action(xmlNode *parent, pe_action_t *action, bool skip_details,
/* List any nodes this action is expected to make down */
if (needs_node_info && (action->node != NULL)) {
- add_downed_nodes(action_xml, action, data_set);
+ add_downed_nodes(action_xml, action);
}
if (needs_maintenance_info) {
- add_maintenance_nodes(action_xml, data_set);
+ add_maintenance_nodes(action_xml, scheduler);
}
}
@@ -486,16 +497,16 @@ create_graph_action(xmlNode *parent, pe_action_t *action, bool skip_details,
* \return true if action should be added to graph, otherwise false
*/
static bool
-should_add_action_to_graph(const pe_action_t *action)
+should_add_action_to_graph(const pcmk_action_t *action)
{
- if (!pcmk_is_set(action->flags, pe_action_runnable)) {
+ if (!pcmk_is_set(action->flags, pcmk_action_runnable)) {
crm_trace("Ignoring action %s (%d): unrunnable",
action->uuid, action->id);
return false;
}
- if (pcmk_is_set(action->flags, pe_action_optional)
- && !pcmk_is_set(action->flags, pe_action_print_always)) {
+ if (pcmk_is_set(action->flags, pcmk_action_optional)
+ && !pcmk_is_set(action->flags, pcmk_action_always_in_graph)) {
crm_trace("Ignoring action %s (%d): optional",
action->uuid, action->id);
return false;
@@ -505,8 +516,9 @@ should_add_action_to_graph(const pe_action_t *action)
* with the exception of monitors and cancellation of recurring monitors.
*/
if ((action->rsc != NULL)
- && !pcmk_is_set(action->rsc->flags, pe_rsc_managed)
- && !pcmk__str_eq(action->task, RSC_STATUS, pcmk__str_none)) {
+ && !pcmk_is_set(action->rsc->flags, pcmk_rsc_managed)
+ && !pcmk__str_eq(action->task, PCMK_ACTION_MONITOR, pcmk__str_none)) {
+
const char *interval_ms_s;
/* A cancellation of a recurring monitor will get here because the task
@@ -526,21 +538,21 @@ should_add_action_to_graph(const pe_action_t *action)
/* Always add pseudo-actions, fence actions, and shutdown actions (already
* determined to be required and runnable by this point)
*/
- if (pcmk_is_set(action->flags, pe_action_pseudo)
- || pcmk__strcase_any_of(action->task, CRM_OP_FENCE, CRM_OP_SHUTDOWN,
- NULL)) {
+ if (pcmk_is_set(action->flags, pcmk_action_pseudo)
+ || pcmk__strcase_any_of(action->task, PCMK_ACTION_STONITH,
+ PCMK_ACTION_DO_SHUTDOWN, NULL)) {
return true;
}
if (action->node == NULL) {
pe_err("Skipping action %s (%d) "
- "because it was not allocated to a node (bug?)",
+ "because it was not assigned to a node (bug?)",
action->uuid, action->id);
- pcmk__log_action("Unallocated", action, false);
+ pcmk__log_action("Unassigned", action, false);
return false;
}
- if (pcmk_is_set(action->flags, pe_action_dc)) {
+ if (pcmk_is_set(action->flags, pcmk_action_on_dc)) {
crm_trace("Action %s (%d) should be dumped: "
"can run on DC instead of %s",
action->uuid, action->id, pe__node_name(action->node));
@@ -577,11 +589,12 @@ should_add_action_to_graph(const pe_action_t *action)
* \return true if ordering has flags that can change an action, false otherwise
*/
static bool
-ordering_can_change_actions(const pe_action_wrapper_t *ordering)
+ordering_can_change_actions(const pcmk__related_action_t *ordering)
{
- return pcmk_any_flags_set(ordering->type, ~(pe_order_implies_first_printed
- |pe_order_implies_then_printed
- |pe_order_optional));
+ return pcmk_any_flags_set(ordering->type,
+ ~(pcmk__ar_then_implies_first_graphed
+ |pcmk__ar_first_implies_then_graphed
+ |pcmk__ar_ordered));
}
/*!
@@ -596,20 +609,21 @@ ordering_can_change_actions(const pe_action_wrapper_t *ordering)
* circumstances (load or anti-colocation orderings that are not needed).
*/
static bool
-should_add_input_to_graph(const pe_action_t *action, pe_action_wrapper_t *input)
+should_add_input_to_graph(const pcmk_action_t *action,
+ pcmk__related_action_t *input)
{
if (input->state == pe_link_dumped) {
return true;
}
- if (input->type == pe_order_none) {
+ if ((uint32_t) input->type == pcmk__ar_none) {
crm_trace("Ignoring %s (%d) input %s (%d): "
"ordering disabled",
action->uuid, action->id,
input->action->uuid, input->action->id);
return false;
- } else if (!pcmk_is_set(input->action->flags, pe_action_runnable)
+ } else if (!pcmk_is_set(input->action->flags, pcmk_action_runnable)
&& !ordering_can_change_actions(input)) {
crm_trace("Ignoring %s (%d) input %s (%d): "
"optional and input unrunnable",
@@ -617,32 +631,32 @@ should_add_input_to_graph(const pe_action_t *action, pe_action_wrapper_t *input)
input->action->uuid, input->action->id);
return false;
- } else if (!pcmk_is_set(input->action->flags, pe_action_runnable)
- && pcmk_is_set(input->type, pe_order_one_or_more)) {
+ } else if (!pcmk_is_set(input->action->flags, pcmk_action_runnable)
+ && pcmk_is_set(input->type, pcmk__ar_min_runnable)) {
crm_trace("Ignoring %s (%d) input %s (%d): "
- "one-or-more and input unrunnable",
+ "minimum number of instances required but input unrunnable",
action->uuid, action->id,
input->action->uuid, input->action->id);
return false;
- } else if (pcmk_is_set(input->type, pe_order_implies_first_migratable)
- && !pcmk_is_set(input->action->flags, pe_action_runnable)) {
+ } else if (pcmk_is_set(input->type, pcmk__ar_unmigratable_then_blocks)
+ && !pcmk_is_set(input->action->flags, pcmk_action_runnable)) {
crm_trace("Ignoring %s (%d) input %s (%d): "
- "implies input migratable but input unrunnable",
+ "input blocked if 'then' unmigratable",
action->uuid, action->id,
input->action->uuid, input->action->id);
return false;
- } else if (pcmk_is_set(input->type, pe_order_apply_first_non_migratable)
- && pcmk_is_set(input->action->flags, pe_action_migrate_runnable)) {
- crm_trace("Ignoring %s (%d) input %s (%d): "
- "only if input unmigratable but input unrunnable",
+ } else if (pcmk_is_set(input->type, pcmk__ar_if_first_unmigratable)
+ && pcmk_is_set(input->action->flags, pcmk_action_migratable)) {
+ crm_trace("Ignoring %s (%d) input %s (%d): ordering applies "
+ "only if input is unmigratable, but it is migratable",
action->uuid, action->id,
input->action->uuid, input->action->id);
return false;
- } else if ((input->type == pe_order_optional)
- && pcmk_is_set(input->action->flags, pe_action_migrate_runnable)
+ } else if (((uint32_t) input->type == pcmk__ar_ordered)
+ && pcmk_is_set(input->action->flags, pcmk_action_migratable)
&& pcmk__ends_with(input->action->uuid, "_stop_0")) {
crm_trace("Ignoring %s (%d) input %s (%d): "
"optional but stop in migration",
@@ -650,74 +664,73 @@ should_add_input_to_graph(const pe_action_t *action, pe_action_wrapper_t *input)
input->action->uuid, input->action->id);
return false;
- } else if (input->type == pe_order_load) {
- pe_node_t *input_node = input->action->node;
+ } else if ((uint32_t) input->type == pcmk__ar_if_on_same_node_or_target) {
+ pcmk_node_t *input_node = input->action->node;
- // load orderings are relevant only if actions are for same node
+ if ((action->rsc != NULL)
+ && pcmk__str_eq(action->task, PCMK_ACTION_MIGRATE_TO,
+ pcmk__str_none)) {
- if (action->rsc && pcmk__str_eq(action->task, RSC_MIGRATE, pcmk__str_casei)) {
- pe_node_t *allocated = action->rsc->allocated_to;
+ pcmk_node_t *assigned = action->rsc->allocated_to;
- /* For load_stopped -> migrate_to orderings, we care about where it
- * has been allocated to, not where it will be executed.
+ /* For load_stopped -> migrate_to orderings, we care about where
+ * the resource has been assigned, not where migrate_to will be
+ * executed.
*/
- if ((input_node == NULL) || (allocated == NULL)
- || (input_node->details != allocated->details)) {
+ if (!pe__same_node(input_node, assigned)) {
crm_trace("Ignoring %s (%d) input %s (%d): "
- "load ordering node mismatch %s vs %s",
+ "migration target %s is not same as input node %s",
action->uuid, action->id,
input->action->uuid, input->action->id,
- (allocated? allocated->details->uname : "<none>"),
+ (assigned? assigned->details->uname : "<none>"),
(input_node? input_node->details->uname : "<none>"));
- input->type = pe_order_none;
+ input->type = (enum pe_ordering) pcmk__ar_none;
return false;
}
- } else if ((input_node == NULL) || (action->node == NULL)
- || (input_node->details != action->node->details)) {
+ } else if (!pe__same_node(input_node, action->node)) {
crm_trace("Ignoring %s (%d) input %s (%d): "
- "load ordering node mismatch %s vs %s",
+ "not on same node (%s vs %s)",
action->uuid, action->id,
input->action->uuid, input->action->id,
(action->node? action->node->details->uname : "<none>"),
(input_node? input_node->details->uname : "<none>"));
- input->type = pe_order_none;
+ input->type = (enum pe_ordering) pcmk__ar_none;
return false;
- } else if (pcmk_is_set(input->action->flags, pe_action_optional)) {
+ } else if (pcmk_is_set(input->action->flags, pcmk_action_optional)) {
crm_trace("Ignoring %s (%d) input %s (%d): "
- "load ordering input optional",
+ "ordering optional",
action->uuid, action->id,
input->action->uuid, input->action->id);
- input->type = pe_order_none;
+ input->type = (enum pe_ordering) pcmk__ar_none;
return false;
}
- } else if (input->type == pe_order_anti_colocation) {
+ } else if ((uint32_t) input->type == pcmk__ar_if_required_on_same_node) {
if (input->action->node && action->node
- && (input->action->node->details != action->node->details)) {
+ && !pe__same_node(input->action->node, action->node)) {
crm_trace("Ignoring %s (%d) input %s (%d): "
- "anti-colocation node mismatch %s vs %s",
+ "not on same node (%s vs %s)",
action->uuid, action->id,
input->action->uuid, input->action->id,
pe__node_name(action->node),
pe__node_name(input->action->node));
- input->type = pe_order_none;
+ input->type = (enum pe_ordering) pcmk__ar_none;
return false;
- } else if (pcmk_is_set(input->action->flags, pe_action_optional)) {
- crm_trace("Ignoring %s (%d) input %s (%d): "
- "anti-colocation input optional",
+ } else if (pcmk_is_set(input->action->flags, pcmk_action_optional)) {
+ crm_trace("Ignoring %s (%d) input %s (%d): optional",
action->uuid, action->id,
input->action->uuid, input->action->id);
- input->type = pe_order_none;
+ input->type = (enum pe_ordering) pcmk__ar_none;
return false;
}
} else if (input->action->rsc
&& input->action->rsc != action->rsc
- && pcmk_is_set(input->action->rsc->flags, pe_rsc_failed)
- && !pcmk_is_set(input->action->rsc->flags, pe_rsc_managed)
+ && pcmk_is_set(input->action->rsc->flags, pcmk_rsc_failed)
+ && !pcmk_is_set(input->action->rsc->flags, pcmk_rsc_managed)
&& pcmk__ends_with(input->action->uuid, "_stop_0")
&& action->rsc && pe_rsc_is_clone(action->rsc)) {
crm_warn("Ignoring requirement that %s complete before %s:"
@@ -725,9 +738,10 @@ should_add_input_to_graph(const pe_action_t *action, pe_action_wrapper_t *input)
input->action->uuid, action->uuid);
return false;
- } else if (pcmk_is_set(input->action->flags, pe_action_optional)
+ } else if (pcmk_is_set(input->action->flags, pcmk_action_optional)
&& !pcmk_any_flags_set(input->action->flags,
- pe_action_print_always|pe_action_dumped)
+ pcmk_action_always_in_graph
+ |pcmk_action_added_to_graph)
&& !should_add_action_to_graph(input->action)) {
crm_trace("Ignoring %s (%d) input %s (%d): "
"input optional",
@@ -758,12 +772,12 @@ should_add_input_to_graph(const pe_action_t *action, pe_action_wrapper_t *input)
* \return true if the ordering creates a loop, otherwise false
*/
bool
-pcmk__graph_has_loop(const pe_action_t *init_action, const pe_action_t *action,
- pe_action_wrapper_t *input)
+pcmk__graph_has_loop(const pcmk_action_t *init_action,
+ const pcmk_action_t *action, pcmk__related_action_t *input)
{
bool has_loop = false;
- if (pcmk_is_set(input->action->flags, pe_action_tracking)) {
+ if (pcmk_is_set(input->action->flags, pcmk_action_detect_loop)) {
crm_trace("Breaking tracking loop: %s@%s -> %s@%s (%#.6x)",
input->action->uuid,
input->action->node? input->action->node->details->uname : "",
@@ -787,7 +801,7 @@ pcmk__graph_has_loop(const pe_action_t *init_action, const pe_action_t *action,
return true;
}
- pe__set_action_flags(input->action, pe_action_tracking);
+ pe__set_action_flags(input->action, pcmk_action_detect_loop);
crm_trace("Checking inputs of action %s@%s input %s@%s (%#.6x)"
"for graph loop with %s@%s ",
@@ -804,14 +818,14 @@ pcmk__graph_has_loop(const pe_action_t *init_action, const pe_action_t *action,
iter != NULL; iter = iter->next) {
if (pcmk__graph_has_loop(init_action, input->action,
- (pe_action_wrapper_t *) iter->data)) {
+ (pcmk__related_action_t *) iter->data)) {
// Recursive call already logged a debug message
has_loop = true;
break;
}
}
- pe__clear_action_flags(input->action, pe_action_tracking);
+ pe__clear_action_flags(input->action, pcmk_action_detect_loop);
if (!has_loop) {
crm_trace("No input loop found in %s@%s -> %s@%s (%#.6x)",
@@ -828,19 +842,19 @@ pcmk__graph_has_loop(const pe_action_t *init_action, const pe_action_t *action,
* \internal
* \brief Create a synapse XML element for a transition graph
*
- * \param[in] action Action that synapse is for
- * \param[in,out] data_set Cluster working set containing graph
+ * \param[in] action Action that synapse is for
+ * \param[in,out] scheduler Scheduler data containing graph
*
* \return Newly added XML element for new graph synapse
*/
static xmlNode *
-create_graph_synapse(const pe_action_t *action, pe_working_set_t *data_set)
+create_graph_synapse(const pcmk_action_t *action, pcmk_scheduler_t *scheduler)
{
int synapse_priority = 0;
- xmlNode *syn = create_xml_node(data_set->graph, "synapse");
+ xmlNode *syn = create_xml_node(scheduler->graph, "synapse");
- crm_xml_add_int(syn, XML_ATTR_ID, data_set->num_synapse);
- data_set->num_synapse++;
+ crm_xml_add_int(syn, XML_ATTR_ID, scheduler->num_synapse);
+ scheduler->num_synapse++;
if (action->rsc != NULL) {
synapse_priority = action->rsc->priority;
@@ -859,10 +873,10 @@ create_graph_synapse(const pe_action_t *action, pe_working_set_t *data_set)
* \brief Add an action to the transition graph XML if appropriate
*
* \param[in,out] data Action to possibly add
- * \param[in,out] user_data Cluster working set
+ * \param[in,out] user_data Scheduler data
*
* \note This will de-duplicate the action inputs, meaning that the
- * pe_action_wrapper_t:type flags can no longer be relied on to retain
+ * pcmk__related_action_t:type flags can no longer be relied on to retain
* their original settings. That means this MUST be called after
* pcmk__apply_orderings() is complete, and nothing after this should rely
* on those type flags. (For example, some code looks for type equal to
@@ -873,8 +887,8 @@ create_graph_synapse(const pe_action_t *action, pe_working_set_t *data_set)
static void
add_action_to_graph(gpointer data, gpointer user_data)
{
- pe_action_t *action = (pe_action_t *) data;
- pe_working_set_t *data_set = (pe_working_set_t *) user_data;
+ pcmk_action_t *action = (pcmk_action_t *) data;
+ pcmk_scheduler_t *scheduler = (pcmk_scheduler_t *) user_data;
xmlNode *syn = NULL;
xmlNode *set = NULL;
@@ -884,36 +898,36 @@ add_action_to_graph(gpointer data, gpointer user_data)
* the action to the graph, so that crm_simulate's dot graphs don't have
* duplicates).
*/
- if (!pcmk_is_set(action->flags, pe_action_dedup)) {
+ if (!pcmk_is_set(action->flags, pcmk_action_inputs_deduplicated)) {
pcmk__deduplicate_action_inputs(action);
- pe__set_action_flags(action, pe_action_dedup);
+ pe__set_action_flags(action, pcmk_action_inputs_deduplicated);
}
- if (pcmk_is_set(action->flags, pe_action_dumped) // Already added, or
- || !should_add_action_to_graph(action)) { // shouldn't be added
- return;
+ if (pcmk_is_set(action->flags, pcmk_action_added_to_graph)
+ || !should_add_action_to_graph(action)) {
+ return; // Already added, or shouldn't be
}
- pe__set_action_flags(action, pe_action_dumped);
+ pe__set_action_flags(action, pcmk_action_added_to_graph);
crm_trace("Adding action %d (%s%s%s) to graph",
action->id, action->uuid,
((action->node == NULL)? "" : " on "),
((action->node == NULL)? "" : action->node->details->uname));
- syn = create_graph_synapse(action, data_set);
+ syn = create_graph_synapse(action, scheduler);
set = create_xml_node(syn, "action_set");
in = create_xml_node(syn, "inputs");
- create_graph_action(set, action, false, data_set);
+ create_graph_action(set, action, false, scheduler);
for (GList *lpc = action->actions_before; lpc != NULL; lpc = lpc->next) {
- pe_action_wrapper_t *input = (pe_action_wrapper_t *) lpc->data;
+ pcmk__related_action_t *input = lpc->data;
if (should_add_input_to_graph(action, input)) {
xmlNode *input_xml = create_xml_node(in, "trigger");
input->state = pe_link_dumped;
- create_graph_action(input_xml, input->action, true, data_set);
+ create_graph_action(input_xml, input->action, true, scheduler);
}
}
}
@@ -960,7 +974,7 @@ pcmk__log_transition_summary(const char *filename)
* \param[in,out] rsc Resource whose actions should be added
*/
void
-pcmk__add_rsc_actions_to_graph(pe_resource_t *rsc)
+pcmk__add_rsc_actions_to_graph(pcmk_resource_t *rsc)
{
GList *iter = NULL;
@@ -972,7 +986,7 @@ pcmk__add_rsc_actions_to_graph(pe_resource_t *rsc)
// Then recursively add its children's actions (appropriate to variant)
for (iter = rsc->children; iter != NULL; iter = iter->next) {
- pe_resource_t *child_rsc = (pe_resource_t *) iter->data;
+ pcmk_resource_t *child_rsc = (pcmk_resource_t *) iter->data;
child_rsc->cmds->add_actions_to_graph(child_rsc);
}
@@ -982,10 +996,10 @@ pcmk__add_rsc_actions_to_graph(pe_resource_t *rsc)
* \internal
* \brief Create a transition graph with all cluster actions needed
*
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*/
void
-pcmk__create_graph(pe_working_set_t *data_set)
+pcmk__create_graph(pcmk_scheduler_t *scheduler)
{
GList *iter = NULL;
const char *value = NULL;
@@ -994,38 +1008,38 @@ pcmk__create_graph(pe_working_set_t *data_set)
transition_id++;
crm_trace("Creating transition graph %d", transition_id);
- data_set->graph = create_xml_node(NULL, XML_TAG_GRAPH);
+ scheduler->graph = create_xml_node(NULL, XML_TAG_GRAPH);
- value = pe_pref(data_set->config_hash, "cluster-delay");
- crm_xml_add(data_set->graph, "cluster-delay", value);
+ value = pe_pref(scheduler->config_hash, "cluster-delay");
+ crm_xml_add(scheduler->graph, "cluster-delay", value);
- value = pe_pref(data_set->config_hash, "stonith-timeout");
- crm_xml_add(data_set->graph, "stonith-timeout", value);
+ value = pe_pref(scheduler->config_hash, "stonith-timeout");
+ crm_xml_add(scheduler->graph, "stonith-timeout", value);
- crm_xml_add(data_set->graph, "failed-stop-offset", "INFINITY");
+ crm_xml_add(scheduler->graph, "failed-stop-offset", "INFINITY");
- if (pcmk_is_set(data_set->flags, pe_flag_start_failure_fatal)) {
- crm_xml_add(data_set->graph, "failed-start-offset", "INFINITY");
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_start_failure_fatal)) {
+ crm_xml_add(scheduler->graph, "failed-start-offset", "INFINITY");
} else {
- crm_xml_add(data_set->graph, "failed-start-offset", "1");
+ crm_xml_add(scheduler->graph, "failed-start-offset", "1");
}
- value = pe_pref(data_set->config_hash, "batch-limit");
- crm_xml_add(data_set->graph, "batch-limit", value);
+ value = pe_pref(scheduler->config_hash, "batch-limit");
+ crm_xml_add(scheduler->graph, "batch-limit", value);
- crm_xml_add_int(data_set->graph, "transition_id", transition_id);
+ crm_xml_add_int(scheduler->graph, "transition_id", transition_id);
- value = pe_pref(data_set->config_hash, "migration-limit");
+ value = pe_pref(scheduler->config_hash, "migration-limit");
if ((pcmk__scan_ll(value, &limit, 0LL) == pcmk_rc_ok) && (limit > 0)) {
- crm_xml_add(data_set->graph, "migration-limit", value);
+ crm_xml_add(scheduler->graph, "migration-limit", value);
}
- if (data_set->recheck_by > 0) {
+ if (scheduler->recheck_by > 0) {
char *recheck_epoch = NULL;
recheck_epoch = crm_strdup_printf("%llu",
- (long long) data_set->recheck_by);
- crm_xml_add(data_set->graph, "recheck-by", recheck_epoch);
+ (long long) scheduler->recheck_by);
+ crm_xml_add(scheduler->graph, "recheck-by", recheck_epoch);
free(recheck_epoch);
}
@@ -1035,44 +1049,48 @@ pcmk__create_graph(pe_working_set_t *data_set)
*/
// Add resource actions to graph
- for (iter = data_set->resources; iter != NULL; iter = iter->next) {
- pe_resource_t *rsc = (pe_resource_t *) iter->data;
+ for (iter = scheduler->resources; iter != NULL; iter = iter->next) {
+ pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
pe_rsc_trace(rsc, "Processing actions for %s", rsc->id);
rsc->cmds->add_actions_to_graph(rsc);
}
// Add pseudo-action for list of nodes with maintenance state update
- add_maintenance_update(data_set);
+ add_maintenance_update(scheduler);
// Add non-resource (node) actions
- for (iter = data_set->actions; iter != NULL; iter = iter->next) {
- pe_action_t *action = (pe_action_t *) iter->data;
+ for (iter = scheduler->actions; iter != NULL; iter = iter->next) {
+ pcmk_action_t *action = (pcmk_action_t *) iter->data;
if ((action->rsc != NULL)
&& (action->node != NULL)
&& action->node->details->shutdown
- && !pcmk_is_set(action->rsc->flags, pe_rsc_maintenance)
+ && !pcmk_is_set(action->rsc->flags, pcmk_rsc_maintenance)
&& !pcmk_any_flags_set(action->flags,
- pe_action_optional|pe_action_runnable)
- && pcmk__str_eq(action->task, RSC_STOP, pcmk__str_none)) {
+ pcmk_action_optional|pcmk_action_runnable)
+ && pcmk__str_eq(action->task, PCMK_ACTION_STOP, pcmk__str_none)) {
/* Eventually we should just ignore the 'fence' case, but for now
* it's the best way to detect (in CTS) when CIB resource updates
* are being lost.
*/
- if (pcmk_is_set(data_set->flags, pe_flag_have_quorum)
- || (data_set->no_quorum_policy == no_quorum_ignore)) {
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_quorate)
+ || (scheduler->no_quorum_policy == pcmk_no_quorum_ignore)) {
+ const bool managed = pcmk_is_set(action->rsc->flags,
+ pcmk_rsc_managed);
+ const bool failed = pcmk_is_set(action->rsc->flags,
+ pcmk_rsc_failed);
+
crm_crit("Cannot %s %s because of %s:%s%s (%s)",
action->node->details->unclean? "fence" : "shut down",
pe__node_name(action->node), action->rsc->id,
- pcmk_is_set(action->rsc->flags, pe_rsc_managed)? " blocked" : " unmanaged",
- pcmk_is_set(action->rsc->flags, pe_rsc_failed)? " failed" : "",
- action->uuid);
+ (managed? " blocked" : " unmanaged"),
+ (failed? " failed" : ""), action->uuid);
}
}
- add_action_to_graph((gpointer) action, (gpointer) data_set);
+ add_action_to_graph((gpointer) action, (gpointer) scheduler);
}
- crm_log_xml_trace(data_set->graph, "graph");
+ crm_log_xml_trace(scheduler->graph, "graph");
}
diff --git a/lib/pacemaker/pcmk_injections.c b/lib/pacemaker/pcmk_injections.c
index ea8fc17..f6b36e8 100644
--- a/lib/pacemaker/pcmk_injections.c
+++ b/lib/pacemaker/pcmk_injections.c
@@ -19,12 +19,12 @@
#include <dirent.h>
#include <crm/crm.h>
-#include <crm/lrmd.h> // lrmd_event_data_t, lrmd_free_event()
#include <crm/cib.h>
#include <crm/cib/internal.h>
#include <crm/common/util.h>
#include <crm/common/iso8601.h>
#include <crm/common/xml_internal.h>
+#include <crm/lrmd_events.h> // lrmd_event_data_t, etc.
#include <crm/lrmd_internal.h>
#include <crm/pengine/status.h>
#include <pacemaker-internal.h>
@@ -35,6 +35,7 @@ bool pcmk__simulate_node_config = false;
#define XPATH_NODE_CONFIG "//" XML_CIB_TAG_NODE "[@" XML_ATTR_UNAME "='%s']"
#define XPATH_NODE_STATE "//" XML_CIB_TAG_STATE "[@" XML_ATTR_UNAME "='%s']"
+#define XPATH_NODE_STATE_BY_ID "//" XML_CIB_TAG_STATE "[@" XML_ATTR_ID "='%s']"
#define XPATH_RSC_HISTORY XPATH_NODE_STATE \
"//" XML_LRM_TAG_RESOURCE "[@" XML_ATTR_ID "='%s']"
@@ -249,7 +250,7 @@ pcmk__inject_node(cib_t *cib_conn, const char *node, const char *uuid)
}
if (found_uuid) {
- char *xpath_by_uuid = crm_strdup_printf("//" XML_CIB_TAG_STATE "[@" XML_ATTR_ID "='%s']",
+ char *xpath_by_uuid = crm_strdup_printf(XPATH_NODE_STATE_BY_ID,
found_uuid);
// It's possible that a node_state entry doesn't have an uname yet.
@@ -257,8 +258,8 @@ pcmk__inject_node(cib_t *cib_conn, const char *node, const char *uuid)
cib_xpath|cib_sync_call|cib_scope_local);
if ((cib_object != NULL) && (ID(cib_object) == NULL)) {
- crm_err("Detected multiple node_state entries for xpath=%s, bailing",
- xpath_by_uuid);
+ crm_err("Can't inject node state for %s because multiple "
+ "state entries found for ID %s", node, found_uuid);
duplicate = true;
free(xpath_by_uuid);
goto done;
@@ -266,7 +267,8 @@ pcmk__inject_node(cib_t *cib_conn, const char *node, const char *uuid)
} else if (cib_object != NULL) {
crm_xml_add(cib_object, XML_ATTR_UNAME, node);
- rc = cib_conn->cmds->modify(cib_conn, XML_CIB_TAG_STATUS, cib_object,
+ rc = cib_conn->cmds->modify(cib_conn, XML_CIB_TAG_STATUS,
+ cib_object,
cib_sync_call|cib_scope_local);
}
@@ -318,17 +320,17 @@ pcmk__inject_node_state_change(cib_t *cib_conn, const char *node, bool up)
if (up) {
pcmk__xe_set_props(cib_node,
- XML_NODE_IN_CLUSTER, XML_BOOLEAN_YES,
- XML_NODE_IS_PEER, ONLINESTATUS,
- XML_NODE_JOIN_STATE, CRMD_JOINSTATE_MEMBER,
- XML_NODE_EXPECTED, CRMD_JOINSTATE_MEMBER,
+ PCMK__XA_IN_CCM, XML_BOOLEAN_YES,
+ PCMK__XA_CRMD, ONLINESTATUS,
+ PCMK__XA_JOIN, CRMD_JOINSTATE_MEMBER,
+ PCMK__XA_EXPECTED, CRMD_JOINSTATE_MEMBER,
NULL);
} else {
pcmk__xe_set_props(cib_node,
- XML_NODE_IN_CLUSTER, XML_BOOLEAN_NO,
- XML_NODE_IS_PEER, OFFLINESTATUS,
- XML_NODE_JOIN_STATE, CRMD_JOINSTATE_DOWN,
- XML_NODE_EXPECTED, CRMD_JOINSTATE_DOWN,
+ PCMK__XA_IN_CCM, XML_BOOLEAN_NO,
+ PCMK__XA_CRMD, OFFLINESTATUS,
+ PCMK__XA_JOIN, CRMD_JOINSTATE_DOWN,
+ PCMK__XA_EXPECTED, CRMD_JOINSTATE_DOWN,
NULL);
}
crm_xml_add(cib_node, XML_ATTR_ORIGIN, crm_system_name);
@@ -400,8 +402,10 @@ pcmk__inject_resource_history(pcmk__output_t *out, xmlNode *cib_node,
if ((rclass == NULL) || (rtype == NULL)) {
// @TODO query configuration for class, provider, type
- out->err(out, "Resource %s not found in the status section of %s."
- " Please supply the class and type to continue", resource, ID(cib_node));
+ out->err(out,
+ "Resource %s not found in the status section of %s "
+ "(supply class and type to continue)",
+ resource, ID(cib_node));
return NULL;
} else if (!pcmk__strcase_any_of(rclass,
@@ -479,7 +483,7 @@ find_ticket_state(pcmk__output_t *out, cib_t *the_cib, const char *ticket_id,
}
crm_log_xml_debug(xml_search, "Match");
- if (xml_has_children(xml_search) && (ticket_id != NULL)) {
+ if ((xml_search->children != NULL) && (ticket_id != NULL)) {
out->err(out, "Multiple ticket_states match ticket_id=%s", ticket_id);
}
*ticket_state_xml = xml_search;
@@ -548,11 +552,11 @@ set_ticket_state_attr(pcmk__output_t *out, const char *ticket_id,
* \param[in,out] out Output object for displaying error messages
* \param[in] spec Action specification to inject
* \param[in,out] cib CIB object for scheduler input
- * \param[in] data_set Cluster working set
+ * \param[in] scheduler Scheduler data
*/
static void
inject_action(pcmk__output_t *out, const char *spec, cib_t *cib,
- const pe_working_set_t *data_set)
+ const pcmk_scheduler_t *scheduler)
{
int rc;
int outcome = PCMK_OCF_OK;
@@ -570,7 +574,7 @@ inject_action(pcmk__output_t *out, const char *spec, cib_t *cib,
xmlNode *cib_op = NULL;
xmlNode *cib_node = NULL;
xmlNode *cib_resource = NULL;
- const pe_resource_t *rsc = NULL;
+ const pcmk_resource_t *rsc = NULL;
lrmd_event_data_t *op = NULL;
out->message(out, "inject-spec", spec);
@@ -586,7 +590,7 @@ inject_action(pcmk__output_t *out, const char *spec, cib_t *cib,
parse_op_key(key, &resource, &task, &interval_ms);
- rsc = pe_find_resource(data_set->resources, resource);
+ rsc = pe_find_resource(scheduler->resources, resource);
if (rsc == NULL) {
out->err(out, "Invalid resource name: %s", resource);
goto done;
@@ -627,18 +631,18 @@ done:
* \internal
* \brief Inject fictitious scheduler inputs
*
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
* \param[in,out] cib CIB object for scheduler input to modify
* \param[in] injections Injections to apply
*/
void
-pcmk__inject_scheduler_input(pe_working_set_t *data_set, cib_t *cib,
+pcmk__inject_scheduler_input(pcmk_scheduler_t *scheduler, cib_t *cib,
const pcmk_injections_t *injections)
{
int rc = pcmk_ok;
const GList *iter = NULL;
xmlNode *cib_node = NULL;
- pcmk__output_t *out = data_set->priv;
+ pcmk__output_t *out = scheduler->priv;
out->message(out, "inject-modify-config", injections->quorum,
injections->watchdog);
@@ -654,9 +658,9 @@ pcmk__inject_scheduler_input(pe_working_set_t *data_set, cib_t *cib,
if (injections->watchdog != NULL) {
rc = cib__update_node_attr(out, cib, cib_sync_call|cib_scope_local,
- XML_CIB_TAG_CRMCONFIG, NULL, NULL, NULL, NULL,
- XML_ATTR_HAVE_WATCHDOG, injections->watchdog,
- NULL, NULL);
+ XML_CIB_TAG_CRMCONFIG, NULL, NULL, NULL,
+ NULL, XML_ATTR_HAVE_WATCHDOG,
+ injections->watchdog, NULL, NULL);
CRM_ASSERT(rc == pcmk_rc_ok);
}
@@ -707,7 +711,7 @@ pcmk__inject_scheduler_input(pe_working_set_t *data_set, cib_t *cib,
out->message(out, "inject-modify-node", "Failing", node);
cib_node = pcmk__inject_node_state_change(cib, node, true);
- crm_xml_add(cib_node, XML_NODE_IN_CLUSTER, XML_BOOLEAN_NO);
+ crm_xml_add(cib_node, PCMK__XA_IN_CCM, XML_BOOLEAN_NO);
CRM_ASSERT(cib_node != NULL);
rc = cib->cmds->modify(cib, XML_CIB_TAG_STATUS, cib_node,
@@ -753,7 +757,7 @@ pcmk__inject_scheduler_input(pe_working_set_t *data_set, cib_t *cib,
}
for (iter = injections->op_inject; iter != NULL; iter = iter->next) {
- inject_action(out, (const char *) iter->data, cib, data_set);
+ inject_action(out, (const char *) iter->data, cib, scheduler);
}
if (!out->is_quiet(out)) {
diff --git a/lib/pacemaker/pcmk_output.c b/lib/pacemaker/pcmk_output.c
index 7379516..85001da 100644
--- a/lib/pacemaker/pcmk_output.c
+++ b/lib/pacemaker/pcmk_output.c
@@ -21,11 +21,11 @@
#include <stdint.h>
static char *
-colocations_header(pe_resource_t *rsc, pcmk__colocation_t *cons,
+colocations_header(pcmk_resource_t *rsc, pcmk__colocation_t *cons,
bool dependents) {
char *retval = NULL;
- if (cons->primary_role > RSC_ROLE_STARTED) {
+ if (cons->primary_role > pcmk_role_started) {
retval = crm_strdup_printf("%s (score=%s, %s role=%s, id=%s)",
rsc->id, pcmk_readable_score(cons->score),
(dependents? "needs" : "with"),
@@ -39,7 +39,7 @@ colocations_header(pe_resource_t *rsc, pcmk__colocation_t *cons,
}
static void
-colocations_xml_node(pcmk__output_t *out, pe_resource_t *rsc,
+colocations_xml_node(pcmk__output_t *out, pcmk_resource_t *rsc,
pcmk__colocation_t *cons) {
xmlNodePtr node = NULL;
@@ -47,26 +47,29 @@ colocations_xml_node(pcmk__output_t *out, pe_resource_t *rsc,
"id", cons->id,
"rsc", cons->dependent->id,
"with-rsc", cons->primary->id,
- "score", pcmk_readable_score(cons->score),
+ "score",
+ pcmk_readable_score(cons->score),
NULL);
if (cons->node_attribute) {
- xmlSetProp(node, (pcmkXmlStr) "node-attribute", (pcmkXmlStr) cons->node_attribute);
+ xmlSetProp(node, (pcmkXmlStr) "node-attribute",
+ (pcmkXmlStr) cons->node_attribute);
}
- if (cons->dependent_role != RSC_ROLE_UNKNOWN) {
+ if (cons->dependent_role != pcmk_role_unknown) {
xmlSetProp(node, (pcmkXmlStr) "rsc-role",
(pcmkXmlStr) role2text(cons->dependent_role));
}
- if (cons->primary_role != RSC_ROLE_UNKNOWN) {
+ if (cons->primary_role != pcmk_role_unknown) {
xmlSetProp(node, (pcmkXmlStr) "with-rsc-role",
(pcmkXmlStr) role2text(cons->primary_role));
}
}
static int
-do_locations_list_xml(pcmk__output_t *out, pe_resource_t *rsc, bool add_header)
+do_locations_list_xml(pcmk__output_t *out, pcmk_resource_t *rsc,
+ bool add_header)
{
GList *lpc = NULL;
GList *list = rsc->rsc_location;
@@ -78,7 +81,7 @@ do_locations_list_xml(pcmk__output_t *out, pe_resource_t *rsc, bool add_header)
GList *lpc2 = NULL;
for (lpc2 = cons->node_list_rh; lpc2 != NULL; lpc2 = lpc2->next) {
- pe_node_t *node = (pe_node_t *) lpc2->data;
+ pcmk_node_t *node = (pcmk_node_t *) lpc2->data;
if (add_header) {
PCMK__OUTPUT_LIST_HEADER(out, false, rc, "locations");
@@ -88,7 +91,8 @@ do_locations_list_xml(pcmk__output_t *out, pe_resource_t *rsc, bool add_header)
"node", node->details->uname,
"rsc", rsc->id,
"id", cons->id,
- "score", pcmk_readable_score(node->weight),
+ "score",
+ pcmk_readable_score(node->weight),
NULL);
}
}
@@ -100,18 +104,18 @@ do_locations_list_xml(pcmk__output_t *out, pe_resource_t *rsc, bool add_header)
return rc;
}
-PCMK__OUTPUT_ARGS("rsc-action-item", "const char *", "pe_resource_t *",
- "pe_node_t *", "pe_node_t *", "pe_action_t *",
- "pe_action_t *")
+PCMK__OUTPUT_ARGS("rsc-action-item", "const char *", "pcmk_resource_t *",
+ "pcmk_node_t *", "pcmk_node_t *", "pcmk_action_t *",
+ "pcmk_action_t *")
static int
rsc_action_item(pcmk__output_t *out, va_list args)
{
const char *change = va_arg(args, const char *);
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
- pe_node_t *origin = va_arg(args, pe_node_t *);
- pe_node_t *destination = va_arg(args, pe_node_t *);
- pe_action_t *action = va_arg(args, pe_action_t *);
- pe_action_t *source = va_arg(args, pe_action_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
+ pcmk_node_t *origin = va_arg(args, pcmk_node_t *);
+ pcmk_node_t *destination = va_arg(args, pcmk_node_t *);
+ pcmk_action_t *action = va_arg(args, pcmk_action_t *);
+ pcmk_action_t *source = va_arg(args, pcmk_action_t *);
int len = 0;
char *reason = NULL;
@@ -126,25 +130,25 @@ rsc_action_item(pcmk__output_t *out, va_list args)
CRM_ASSERT(action);
CRM_ASSERT(destination != NULL || origin != NULL);
- if(source == NULL) {
+ if (source == NULL) {
source = action;
}
len = strlen(rsc->id);
- if(len > rsc_width) {
+ if (len > rsc_width) {
rsc_width = len + 2;
}
- if ((rsc->role > RSC_ROLE_STARTED)
- || (rsc->next_role > RSC_ROLE_UNPROMOTED)) {
+ if ((rsc->role > pcmk_role_started)
+ || (rsc->next_role > pcmk_role_unpromoted)) {
need_role = true;
}
- if(origin != NULL && destination != NULL && origin->details == destination->details) {
+ if (pe__same_node(origin, destination)) {
same_host = true;
}
- if(rsc->role == rsc->next_role) {
+ if (rsc->role == rsc->next_role) {
same_role = true;
}
@@ -202,41 +206,43 @@ rsc_action_item(pcmk__output_t *out, va_list args)
}
len = strlen(details);
- if(len > detail_width) {
+ if (len > detail_width) {
detail_width = len;
}
- if(source->reason && !pcmk_is_set(action->flags, pe_action_runnable)) {
+ if ((source->reason != NULL)
+ && !pcmk_is_set(action->flags, pcmk_action_runnable)) {
reason = crm_strdup_printf("due to %s (blocked)", source->reason);
- } else if(source->reason) {
+ } else if (source->reason) {
reason = crm_strdup_printf("due to %s", source->reason);
- } else if (!pcmk_is_set(action->flags, pe_action_runnable)) {
+ } else if (!pcmk_is_set(action->flags, pcmk_action_runnable)) {
reason = strdup("blocked");
}
- out->list_item(out, NULL, "%-8s %-*s ( %*s )%s%s", change, rsc_width,
- rsc->id, detail_width, details, reason ? " " : "", reason ? reason : "");
+ out->list_item(out, NULL, "%-8s %-*s ( %*s )%s%s",
+ change, rsc_width, rsc->id, detail_width, details,
+ ((reason == NULL)? "" : " "), pcmk__s(reason, ""));
free(details);
free(reason);
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("rsc-action-item", "const char *", "pe_resource_t *",
- "pe_node_t *", "pe_node_t *", "pe_action_t *",
- "pe_action_t *")
+PCMK__OUTPUT_ARGS("rsc-action-item", "const char *", "pcmk_resource_t *",
+ "pcmk_node_t *", "pcmk_node_t *", "pcmk_action_t *",
+ "pcmk_action_t *")
static int
rsc_action_item_xml(pcmk__output_t *out, va_list args)
{
const char *change = va_arg(args, const char *);
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
- pe_node_t *origin = va_arg(args, pe_node_t *);
- pe_node_t *destination = va_arg(args, pe_node_t *);
- pe_action_t *action = va_arg(args, pe_action_t *);
- pe_action_t *source = va_arg(args, pe_action_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
+ pcmk_node_t *origin = va_arg(args, pcmk_node_t *);
+ pcmk_node_t *destination = va_arg(args, pcmk_node_t *);
+ pcmk_action_t *action = va_arg(args, pcmk_action_t *);
+ pcmk_action_t *source = va_arg(args, pcmk_action_t *);
char *change_str = NULL;
@@ -252,16 +258,16 @@ rsc_action_item_xml(pcmk__output_t *out, va_list args)
source = action;
}
- if ((rsc->role > RSC_ROLE_STARTED)
- || (rsc->next_role > RSC_ROLE_UNPROMOTED)) {
+ if ((rsc->role > pcmk_role_started)
+ || (rsc->next_role > pcmk_role_unpromoted)) {
need_role = true;
}
- if(origin != NULL && destination != NULL && origin->details == destination->details) {
+ if (pe__same_node(origin, destination)) {
same_host = true;
}
- if(rsc->role == rsc->next_role) {
+ if (rsc->role == rsc->next_role) {
same_role = true;
}
@@ -339,16 +345,17 @@ rsc_action_item_xml(pcmk__output_t *out, va_list args)
NULL);
}
- if (source->reason && !pcmk_is_set(action->flags, pe_action_runnable)) {
+ if ((source->reason != NULL)
+ && !pcmk_is_set(action->flags, pcmk_action_runnable)) {
pcmk__xe_set_props(xml,
"reason", source->reason,
"blocked", "true",
NULL);
- } else if(source->reason) {
+ } else if (source->reason != NULL) {
crm_xml_add(xml, "reason", source->reason);
- } else if (!pcmk_is_set(action->flags, pe_action_runnable)) {
+ } else if (!pcmk_is_set(action->flags, pcmk_action_runnable)) {
pcmk__xe_set_bool_attr(xml, "blocked", true);
}
@@ -356,29 +363,30 @@ rsc_action_item_xml(pcmk__output_t *out, va_list args)
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("rsc-is-colocated-with-list", "pe_resource_t *", "bool")
+PCMK__OUTPUT_ARGS("rsc-is-colocated-with-list", "pcmk_resource_t *", "bool")
static int
rsc_is_colocated_with_list(pcmk__output_t *out, va_list args) {
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
bool recursive = va_arg(args, int);
int rc = pcmk_rc_no_output;
- if (pcmk_is_set(rsc->flags, pe_rsc_detect_loop)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_detect_loop)) {
return rc;
}
/* We're listing constraints explicitly involving rsc, so use rsc->rsc_cons
* directly rather than rsc->cmds->this_with_colocations().
*/
- pe__set_resource_flags(rsc, pe_rsc_detect_loop);
+ pe__set_resource_flags(rsc, pcmk_rsc_detect_loop);
for (GList *lpc = rsc->rsc_cons; lpc != NULL; lpc = lpc->next) {
pcmk__colocation_t *cons = (pcmk__colocation_t *) lpc->data;
char *hdr = NULL;
- PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Resources %s is colocated with", rsc->id);
+ PCMK__OUTPUT_LIST_HEADER(out, false, rc,
+ "Resources %s is colocated with", rsc->id);
- if (pcmk_is_set(cons->primary->flags, pe_rsc_detect_loop)) {
+ if (pcmk_is_set(cons->primary->flags, pcmk_rsc_detect_loop)) {
out->list_item(out, NULL, "%s (id=%s - loop)",
cons->primary->id, cons->id);
continue;
@@ -388,7 +396,7 @@ rsc_is_colocated_with_list(pcmk__output_t *out, va_list args) {
out->list_item(out, NULL, "%s", hdr);
free(hdr);
- /* Empty list header just for indentation of information about this resource. */
+ // Empty list header for indentation of information about this resource
out->begin_list(out, NULL, NULL, NULL);
out->message(out, "locations-list", cons->primary);
@@ -404,26 +412,26 @@ rsc_is_colocated_with_list(pcmk__output_t *out, va_list args) {
return rc;
}
-PCMK__OUTPUT_ARGS("rsc-is-colocated-with-list", "pe_resource_t *", "bool")
+PCMK__OUTPUT_ARGS("rsc-is-colocated-with-list", "pcmk_resource_t *", "bool")
static int
rsc_is_colocated_with_list_xml(pcmk__output_t *out, va_list args) {
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
bool recursive = va_arg(args, int);
int rc = pcmk_rc_no_output;
- if (pcmk_is_set(rsc->flags, pe_rsc_detect_loop)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_detect_loop)) {
return rc;
}
/* We're listing constraints explicitly involving rsc, so use rsc->rsc_cons
* directly rather than rsc->cmds->this_with_colocations().
*/
- pe__set_resource_flags(rsc, pe_rsc_detect_loop);
+ pe__set_resource_flags(rsc, pcmk_rsc_detect_loop);
for (GList *lpc = rsc->rsc_cons; lpc != NULL; lpc = lpc->next) {
pcmk__colocation_t *cons = (pcmk__colocation_t *) lpc->data;
- if (pcmk_is_set(cons->primary->flags, pe_rsc_detect_loop)) {
+ if (pcmk_is_set(cons->primary->flags, pcmk_rsc_detect_loop)) {
colocations_xml_node(out, cons->primary, cons);
continue;
}
@@ -440,15 +448,15 @@ rsc_is_colocated_with_list_xml(pcmk__output_t *out, va_list args) {
return rc;
}
-PCMK__OUTPUT_ARGS("rscs-colocated-with-list", "pe_resource_t *", "bool")
+PCMK__OUTPUT_ARGS("rscs-colocated-with-list", "pcmk_resource_t *", "bool")
static int
rscs_colocated_with_list(pcmk__output_t *out, va_list args) {
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
bool recursive = va_arg(args, int);
int rc = pcmk_rc_no_output;
- if (pcmk_is_set(rsc->flags, pe_rsc_detect_loop)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_detect_loop)) {
return rc;
}
@@ -456,14 +464,15 @@ rscs_colocated_with_list(pcmk__output_t *out, va_list args) {
* rsc->rsc_cons_lhs directly rather than
* rsc->cmds->with_this_colocations().
*/
- pe__set_resource_flags(rsc, pe_rsc_detect_loop);
+ pe__set_resource_flags(rsc, pcmk_rsc_detect_loop);
for (GList *lpc = rsc->rsc_cons_lhs; lpc != NULL; lpc = lpc->next) {
pcmk__colocation_t *cons = (pcmk__colocation_t *) lpc->data;
char *hdr = NULL;
- PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Resources colocated with %s", rsc->id);
+ PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Resources colocated with %s",
+ rsc->id);
- if (pcmk_is_set(cons->dependent->flags, pe_rsc_detect_loop)) {
+ if (pcmk_is_set(cons->dependent->flags, pcmk_rsc_detect_loop)) {
out->list_item(out, NULL, "%s (id=%s - loop)",
cons->dependent->id, cons->id);
continue;
@@ -473,7 +482,7 @@ rscs_colocated_with_list(pcmk__output_t *out, va_list args) {
out->list_item(out, NULL, "%s", hdr);
free(hdr);
- /* Empty list header just for indentation of information about this resource. */
+ // Empty list header for indentation of information about this resource
out->begin_list(out, NULL, NULL, NULL);
out->message(out, "locations-list", cons->dependent);
@@ -489,15 +498,15 @@ rscs_colocated_with_list(pcmk__output_t *out, va_list args) {
return rc;
}
-PCMK__OUTPUT_ARGS("rscs-colocated-with-list", "pe_resource_t *", "bool")
+PCMK__OUTPUT_ARGS("rscs-colocated-with-list", "pcmk_resource_t *", "bool")
static int
rscs_colocated_with_list_xml(pcmk__output_t *out, va_list args) {
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
bool recursive = va_arg(args, int);
int rc = pcmk_rc_no_output;
- if (pcmk_is_set(rsc->flags, pe_rsc_detect_loop)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_detect_loop)) {
return rc;
}
@@ -505,11 +514,11 @@ rscs_colocated_with_list_xml(pcmk__output_t *out, va_list args) {
* rsc->rsc_cons_lhs directly rather than
* rsc->cmds->with_this_colocations().
*/
- pe__set_resource_flags(rsc, pe_rsc_detect_loop);
+ pe__set_resource_flags(rsc, pcmk_rsc_detect_loop);
for (GList *lpc = rsc->rsc_cons_lhs; lpc != NULL; lpc = lpc->next) {
pcmk__colocation_t *cons = (pcmk__colocation_t *) lpc->data;
- if (pcmk_is_set(cons->dependent->flags, pe_rsc_detect_loop)) {
+ if (pcmk_is_set(cons->dependent->flags, pcmk_rsc_detect_loop)) {
colocations_xml_node(out, cons->dependent, cons);
continue;
}
@@ -526,10 +535,10 @@ rscs_colocated_with_list_xml(pcmk__output_t *out, va_list args) {
return rc;
}
-PCMK__OUTPUT_ARGS("locations-list", "pe_resource_t *")
+PCMK__OUTPUT_ARGS("locations-list", "pcmk_resource_t *")
static int
locations_list(pcmk__output_t *out, va_list args) {
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
GList *lpc = NULL;
GList *list = rsc->rsc_location;
@@ -541,7 +550,7 @@ locations_list(pcmk__output_t *out, va_list args) {
GList *lpc2 = NULL;
for (lpc2 = cons->node_list_rh; lpc2 != NULL; lpc2 = lpc2->next) {
- pe_node_t *node = (pe_node_t *) lpc2->data;
+ pcmk_node_t *node = (pcmk_node_t *) lpc2->data;
PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Locations");
out->list_item(out, NULL, "Node %s (score=%s, id=%s, rsc=%s)",
@@ -555,24 +564,23 @@ locations_list(pcmk__output_t *out, va_list args) {
return rc;
}
-PCMK__OUTPUT_ARGS("locations-list", "pe_resource_t *")
+PCMK__OUTPUT_ARGS("locations-list", "pcmk_resource_t *")
static int
locations_list_xml(pcmk__output_t *out, va_list args) {
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
return do_locations_list_xml(out, rsc, true);
}
-PCMK__OUTPUT_ARGS("locations-and-colocations", "pe_resource_t *",
- "pe_working_set_t *", "bool", "bool")
+PCMK__OUTPUT_ARGS("locations-and-colocations", "pcmk_resource_t *",
+ "bool", "bool")
static int
locations_and_colocations(pcmk__output_t *out, va_list args)
{
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
- pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
bool recursive = va_arg(args, int);
bool force = va_arg(args, int);
- pcmk__unpack_constraints(data_set);
+ pcmk__unpack_constraints(rsc->cluster);
// Constraints apply to group/clone, not member/instance
if (!force) {
@@ -581,25 +589,24 @@ locations_and_colocations(pcmk__output_t *out, va_list args)
out->message(out, "locations-list", rsc);
- pe__clear_resource_flags_on_all(data_set, pe_rsc_detect_loop);
+ pe__clear_resource_flags_on_all(rsc->cluster, pcmk_rsc_detect_loop);
out->message(out, "rscs-colocated-with-list", rsc, recursive);
- pe__clear_resource_flags_on_all(data_set, pe_rsc_detect_loop);
+ pe__clear_resource_flags_on_all(rsc->cluster, pcmk_rsc_detect_loop);
out->message(out, "rsc-is-colocated-with-list", rsc, recursive);
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("locations-and-colocations", "pe_resource_t *",
- "pe_working_set_t *", "bool", "bool")
+PCMK__OUTPUT_ARGS("locations-and-colocations", "pcmk_resource_t *",
+ "bool", "bool")
static int
locations_and_colocations_xml(pcmk__output_t *out, va_list args)
{
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
- pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
bool recursive = va_arg(args, int);
bool force = va_arg(args, int);
- pcmk__unpack_constraints(data_set);
+ pcmk__unpack_constraints(rsc->cluster);
// Constraints apply to group/clone, not member/instance
if (!force) {
@@ -609,17 +616,18 @@ locations_and_colocations_xml(pcmk__output_t *out, va_list args)
pcmk__output_xml_create_parent(out, "constraints", NULL);
do_locations_list_xml(out, rsc, false);
- pe__clear_resource_flags_on_all(data_set, pe_rsc_detect_loop);
+ pe__clear_resource_flags_on_all(rsc->cluster, pcmk_rsc_detect_loop);
out->message(out, "rscs-colocated-with-list", rsc, recursive);
- pe__clear_resource_flags_on_all(data_set, pe_rsc_detect_loop);
+ pe__clear_resource_flags_on_all(rsc->cluster, pcmk_rsc_detect_loop);
out->message(out, "rsc-is-colocated-with-list", rsc, recursive);
pcmk__output_xml_pop_parent(out);
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("health", "const char *", "const char *", "const char *", "const char *")
+PCMK__OUTPUT_ARGS("health", "const char *", "const char *", "const char *",
+ "const char *")
static int
health(pcmk__output_t *out, va_list args)
{
@@ -634,7 +642,8 @@ health(pcmk__output_t *out, va_list args)
pcmk__s(result, "unknown result"));
}
-PCMK__OUTPUT_ARGS("health", "const char *", "const char *", "const char *", "const char *")
+PCMK__OUTPUT_ARGS("health", "const char *", "const char *", "const char *",
+ "const char *")
static int
health_text(pcmk__output_t *out, va_list args)
{
@@ -655,7 +664,8 @@ health_text(pcmk__output_t *out, va_list args)
return pcmk_rc_no_output;
}
-PCMK__OUTPUT_ARGS("health", "const char *", "const char *", "const char *", "const char *")
+PCMK__OUTPUT_ARGS("health", "const char *", "const char *", "const char *",
+ "const char *")
static int
health_xml(pcmk__output_t *out, va_list args)
{
@@ -890,7 +900,8 @@ dc_xml(pcmk__output_t *out, va_list args)
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("crmadmin-node", "const char *", "const char *", "const char *", "bool")
+PCMK__OUTPUT_ARGS("crmadmin-node", "const char *", "const char *",
+ "const char *", "bool")
static int
crmadmin_node(pcmk__output_t *out, va_list args)
{
@@ -908,7 +919,8 @@ crmadmin_node(pcmk__output_t *out, va_list args)
}
}
-PCMK__OUTPUT_ARGS("crmadmin-node", "const char *", "const char *", "const char *", "bool")
+PCMK__OUTPUT_ARGS("crmadmin-node", "const char *", "const char *",
+ "const char *", "bool")
static int
crmadmin_node_text(pcmk__output_t *out, va_list args)
{
@@ -925,7 +937,8 @@ crmadmin_node_text(pcmk__output_t *out, va_list args)
}
}
-PCMK__OUTPUT_ARGS("crmadmin-node", "const char *", "const char *", "const char *", "bool")
+PCMK__OUTPUT_ARGS("crmadmin-node", "const char *", "const char *",
+ "const char *", "bool")
static int
crmadmin_node_xml(pcmk__output_t *out, va_list args)
{
@@ -942,13 +955,13 @@ crmadmin_node_xml(pcmk__output_t *out, va_list args)
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("digests", "const pe_resource_t *", "const pe_node_t *",
+PCMK__OUTPUT_ARGS("digests", "const pcmk_resource_t *", "const pcmk_node_t *",
"const char *", "guint", "const op_digest_cache_t *")
static int
digests_text(pcmk__output_t *out, va_list args)
{
- const pe_resource_t *rsc = va_arg(args, const pe_resource_t *);
- const pe_node_t *node = va_arg(args, const pe_node_t *);
+ const pcmk_resource_t *rsc = va_arg(args, const pcmk_resource_t *);
+ const pcmk_node_t *node = va_arg(args, const pcmk_node_t *);
const char *task = va_arg(args, const char *);
guint interval_ms = va_arg(args, guint);
const op_digest_cache_t *digests = va_arg(args, const op_digest_cache_t *);
@@ -960,7 +973,7 @@ digests_text(pcmk__output_t *out, va_list args)
if (interval_ms != 0) {
action_desc = crm_strdup_printf("%ums-interval %s action", interval_ms,
((task == NULL)? "unknown" : task));
- } else if (pcmk__str_eq(task, "monitor", pcmk__str_none)) {
+ } else if (pcmk__str_eq(task, PCMK_ACTION_MONITOR, pcmk__str_none)) {
action_desc = strdup("probe action");
} else {
action_desc = crm_strdup_printf("%s action",
@@ -1012,13 +1025,13 @@ add_digest_xml(xmlNode *parent, const char *type, const char *digest,
}
}
-PCMK__OUTPUT_ARGS("digests", "const pe_resource_t *", "const pe_node_t *",
+PCMK__OUTPUT_ARGS("digests", "const pcmk_resource_t *", "const pcmk_node_t *",
"const char *", "guint", "const op_digest_cache_t *")
static int
digests_xml(pcmk__output_t *out, va_list args)
{
- const pe_resource_t *rsc = va_arg(args, const pe_resource_t *);
- const pe_node_t *node = va_arg(args, const pe_node_t *);
+ const pcmk_resource_t *rsc = va_arg(args, const pcmk_resource_t *);
+ const pcmk_node_t *node = va_arg(args, const pcmk_node_t *);
const char *task = va_arg(args, const char *);
guint interval_ms = va_arg(args, guint);
const op_digest_cache_t *digests = va_arg(args, const op_digest_cache_t *);
@@ -1028,7 +1041,8 @@ digests_xml(pcmk__output_t *out, va_list args)
xml = pcmk__output_create_xml_node(out, "digests",
"resource", pcmk__s(rsc->id, ""),
- "node", pcmk__s(node->details->uname, ""),
+ "node",
+ pcmk__s(node->details->uname, ""),
"task", pcmk__s(task, ""),
"interval", interval_s,
NULL);
@@ -1045,111 +1059,124 @@ digests_xml(pcmk__output_t *out, va_list args)
}
#define STOP_SANITY_ASSERT(lineno) do { \
- if(current && current->details->unclean) { \
+ if ((current != NULL) && current->details->unclean) { \
/* It will be a pseudo op */ \
- } else if(stop == NULL) { \
+ } else if (stop == NULL) { \
crm_err("%s:%d: No stop action exists for %s", \
__func__, lineno, rsc->id); \
CRM_ASSERT(stop != NULL); \
- } else if (pcmk_is_set(stop->flags, pe_action_optional)) { \
+ } else if (pcmk_is_set(stop->flags, pcmk_action_optional)) { \
crm_err("%s:%d: Action %s is still optional", \
__func__, lineno, stop->uuid); \
- CRM_ASSERT(!pcmk_is_set(stop->flags, pe_action_optional)); \
+ CRM_ASSERT(!pcmk_is_set(stop->flags, pcmk_action_optional));\
} \
- } while(0)
+ } while (0)
-PCMK__OUTPUT_ARGS("rsc-action", "pe_resource_t *", "pe_node_t *", "pe_node_t *")
+PCMK__OUTPUT_ARGS("rsc-action", "pcmk_resource_t *", "pcmk_node_t *",
+ "pcmk_node_t *")
static int
rsc_action_default(pcmk__output_t *out, va_list args)
{
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
- pe_node_t *current = va_arg(args, pe_node_t *);
- pe_node_t *next = va_arg(args, pe_node_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
+ pcmk_node_t *current = va_arg(args, pcmk_node_t *);
+ pcmk_node_t *next = va_arg(args, pcmk_node_t *);
GList *possible_matches = NULL;
char *key = NULL;
int rc = pcmk_rc_no_output;
bool moving = false;
- pe_node_t *start_node = NULL;
- pe_action_t *start = NULL;
- pe_action_t *stop = NULL;
- pe_action_t *promote = NULL;
- pe_action_t *demote = NULL;
+ pcmk_node_t *start_node = NULL;
+ pcmk_action_t *start = NULL;
+ pcmk_action_t *stop = NULL;
+ pcmk_action_t *promote = NULL;
+ pcmk_action_t *demote = NULL;
+ pcmk_action_t *reason_op = NULL;
- if (!pcmk_is_set(rsc->flags, pe_rsc_managed)
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)
|| (current == NULL && next == NULL)) {
+ const bool managed = pcmk_is_set(rsc->flags, pcmk_rsc_managed);
+
pe_rsc_info(rsc, "Leave %s\t(%s%s)",
rsc->id, role2text(rsc->role),
- !pcmk_is_set(rsc->flags, pe_rsc_managed)? " unmanaged" : "");
+ (managed? "" : " unmanaged"));
return rc;
}
moving = (current != NULL) && (next != NULL)
- && (current->details != next->details);
+ && !pe__same_node(current, next);
- possible_matches = pe__resource_actions(rsc, next, RSC_START, false);
+ possible_matches = pe__resource_actions(rsc, next, PCMK_ACTION_START,
+ false);
if (possible_matches) {
start = possible_matches->data;
g_list_free(possible_matches);
}
- if ((start == NULL) || !pcmk_is_set(start->flags, pe_action_runnable)) {
+ if ((start == NULL)
+ || !pcmk_is_set(start->flags, pcmk_action_runnable)) {
start_node = NULL;
} else {
start_node = current;
}
- possible_matches = pe__resource_actions(rsc, start_node, RSC_STOP, false);
+ possible_matches = pe__resource_actions(rsc, start_node, PCMK_ACTION_STOP,
+ false);
if (possible_matches) {
stop = possible_matches->data;
g_list_free(possible_matches);
- } else if (pcmk_is_set(rsc->flags, pe_rsc_stop_unexpected)) {
+ } else if (pcmk_is_set(rsc->flags, pcmk_rsc_stop_unexpected)) {
/* The resource is multiply active with multiple-active set to
* stop_unexpected, and not stopping on its current node, but it should
* be stopping elsewhere.
*/
- possible_matches = pe__resource_actions(rsc, NULL, RSC_STOP, false);
+ possible_matches = pe__resource_actions(rsc, NULL, PCMK_ACTION_STOP,
+ false);
if (possible_matches != NULL) {
stop = possible_matches->data;
g_list_free(possible_matches);
}
}
- possible_matches = pe__resource_actions(rsc, next, RSC_PROMOTE, false);
+ possible_matches = pe__resource_actions(rsc, next, PCMK_ACTION_PROMOTE,
+ false);
if (possible_matches) {
promote = possible_matches->data;
g_list_free(possible_matches);
}
- possible_matches = pe__resource_actions(rsc, next, RSC_DEMOTE, false);
+ possible_matches = pe__resource_actions(rsc, next, PCMK_ACTION_DEMOTE,
+ false);
if (possible_matches) {
demote = possible_matches->data;
g_list_free(possible_matches);
}
if (rsc->role == rsc->next_role) {
- pe_action_t *migrate_op = NULL;
+ pcmk_action_t *migrate_op = NULL;
CRM_CHECK(next != NULL, return rc);
- possible_matches = pe__resource_actions(rsc, next, RSC_MIGRATED, false);
+ possible_matches = pe__resource_actions(rsc, next,
+ PCMK_ACTION_MIGRATE_FROM,
+ false);
if (possible_matches) {
migrate_op = possible_matches->data;
}
if ((migrate_op != NULL) && (current != NULL)
- && pcmk_is_set(migrate_op->flags, pe_action_runnable)) {
+ && pcmk_is_set(migrate_op->flags, pcmk_action_runnable)) {
rc = out->message(out, "rsc-action-item", "Migrate", rsc, current,
next, start, NULL);
- } else if (pcmk_is_set(rsc->flags, pe_rsc_reload)) {
+ } else if (pcmk_is_set(rsc->flags, pcmk_rsc_reload)) {
rc = out->message(out, "rsc-action-item", "Reload", rsc, current,
next, start, NULL);
- } else if (start == NULL || pcmk_is_set(start->flags, pe_action_optional)) {
+ } else if ((start == NULL)
+ || pcmk_is_set(start->flags, pcmk_action_optional)) {
if ((demote != NULL) && (promote != NULL)
- && !pcmk_is_set(demote->flags, pe_action_optional)
- && !pcmk_is_set(promote->flags, pe_action_optional)) {
+ && !pcmk_is_set(demote->flags, pcmk_action_optional)
+ && !pcmk_is_set(promote->flags, pcmk_action_optional)) {
rc = out->message(out, "rsc-action-item", "Re-promote", rsc,
current, next, promote, demote);
} else {
@@ -1157,16 +1184,24 @@ rsc_action_default(pcmk__output_t *out, va_list args)
role2text(rsc->role), pe__node_name(next));
}
- } else if (!pcmk_is_set(start->flags, pe_action_runnable)) {
+ } else if (!pcmk_is_set(start->flags, pcmk_action_runnable)) {
+ if ((stop == NULL) || (stop->reason == NULL)) {
+ reason_op = start;
+ } else {
+ reason_op = stop;
+ }
rc = out->message(out, "rsc-action-item", "Stop", rsc, current,
- NULL, stop, (stop && stop->reason)? stop : start);
+ NULL, stop, reason_op);
STOP_SANITY_ASSERT(__LINE__);
} else if (moving && current) {
- rc = out->message(out, "rsc-action-item", pcmk_is_set(rsc->flags, pe_rsc_failed)? "Recover" : "Move",
- rsc, current, next, stop, NULL);
+ const bool failed = pcmk_is_set(rsc->flags, pcmk_rsc_failed);
- } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
+ rc = out->message(out, "rsc-action-item",
+ (failed? "Recover" : "Move"), rsc, current, next,
+ stop, NULL);
+
+ } else if (pcmk_is_set(rsc->flags, pcmk_rsc_failed)) {
rc = out->message(out, "rsc-action-item", "Recover", rsc, current,
NULL, stop, NULL);
STOP_SANITY_ASSERT(__LINE__);
@@ -1174,36 +1209,46 @@ rsc_action_default(pcmk__output_t *out, va_list args)
} else {
rc = out->message(out, "rsc-action-item", "Restart", rsc, current,
next, start, NULL);
- /* STOP_SANITY_ASSERT(__LINE__); False positive for migrate-fail-7 */
+#if 0
+ /* @TODO This can be reached in situations that should really be
+ * "Start" (see for example the migrate-fail-7 regression test)
+ */
+ STOP_SANITY_ASSERT(__LINE__);
+#endif
}
g_list_free(possible_matches);
return rc;
}
- if(stop
- && (rsc->next_role == RSC_ROLE_STOPPED
- || (start && !pcmk_is_set(start->flags, pe_action_runnable)))) {
-
- GList *gIter = NULL;
+ if ((stop != NULL)
+ && ((rsc->next_role == pcmk_role_stopped)
+ || ((start != NULL)
+ && !pcmk_is_set(start->flags, pcmk_action_runnable)))) {
key = stop_key(rsc);
- for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
- pe_node_t *node = (pe_node_t *) gIter->data;
- pe_action_t *stop_op = NULL;
+ for (GList *iter = rsc->running_on; iter != NULL; iter = iter->next) {
+ pcmk_node_t *node = iter->data;
+ pcmk_action_t *stop_op = NULL;
+ reason_op = start;
possible_matches = find_actions(rsc->actions, key, node);
if (possible_matches) {
stop_op = possible_matches->data;
g_list_free(possible_matches);
}
- if (stop_op && (stop_op->flags & pe_action_runnable)) {
- STOP_SANITY_ASSERT(__LINE__);
+ if (stop_op != NULL) {
+ if (pcmk_is_set(stop_op->flags, pcmk_action_runnable)) {
+ STOP_SANITY_ASSERT(__LINE__);
+ }
+ if (stop_op->reason != NULL) {
+ reason_op = stop_op;
+ }
}
if (out->message(out, "rsc-action-item", "Stop", rsc, node, NULL,
- stop_op, (stop_op && stop_op->reason)? stop_op : start) == pcmk_rc_ok) {
+ stop_op, reason_op) == pcmk_rc_ok) {
rc = pcmk_rc_ok;
}
}
@@ -1211,7 +1256,8 @@ rsc_action_default(pcmk__output_t *out, va_list args)
free(key);
} else if ((stop != NULL)
- && pcmk_all_flags_set(rsc->flags, pe_rsc_failed|pe_rsc_stop)) {
+ && pcmk_all_flags_set(rsc->flags,
+ pcmk_rsc_failed|pcmk_rsc_stop_if_failed)) {
/* 'stop' may be NULL if the failure was ignored */
rc = out->message(out, "rsc-action-item", "Recover", rsc, current,
next, stop, start);
@@ -1222,26 +1268,28 @@ rsc_action_default(pcmk__output_t *out, va_list args)
stop, NULL);
STOP_SANITY_ASSERT(__LINE__);
- } else if (pcmk_is_set(rsc->flags, pe_rsc_reload)) {
+ } else if (pcmk_is_set(rsc->flags, pcmk_rsc_reload)) {
rc = out->message(out, "rsc-action-item", "Reload", rsc, current, next,
start, NULL);
- } else if (stop != NULL && !pcmk_is_set(stop->flags, pe_action_optional)) {
+ } else if ((stop != NULL)
+ && !pcmk_is_set(stop->flags, pcmk_action_optional)) {
rc = out->message(out, "rsc-action-item", "Restart", rsc, current,
next, start, NULL);
STOP_SANITY_ASSERT(__LINE__);
- } else if (rsc->role == RSC_ROLE_PROMOTED) {
+ } else if (rsc->role == pcmk_role_promoted) {
CRM_LOG_ASSERT(current != NULL);
rc = out->message(out, "rsc-action-item", "Demote", rsc, current,
next, demote, NULL);
- } else if (rsc->next_role == RSC_ROLE_PROMOTED) {
+ } else if (rsc->next_role == pcmk_role_promoted) {
CRM_LOG_ASSERT(next);
rc = out->message(out, "rsc-action-item", "Promote", rsc, current,
next, promote, NULL);
- } else if (rsc->role == RSC_ROLE_STOPPED && rsc->next_role > RSC_ROLE_STOPPED) {
+ } else if ((rsc->role == pcmk_role_stopped)
+ && (rsc->next_role > pcmk_role_stopped)) {
rc = out->message(out, "rsc-action-item", "Start", rsc, current, next,
start, NULL);
}
@@ -1291,12 +1339,12 @@ node_action_xml(pcmk__output_t *out, va_list args)
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("node-info", "int", "const char *", "const char *",
+PCMK__OUTPUT_ARGS("node-info", "uint32_t", "const char *", "const char *",
"const char *", "bool", "bool")
static int
node_info_default(pcmk__output_t *out, va_list args)
{
- int node_id = va_arg(args, int);
+ uint32_t node_id = va_arg(args, uint32_t);
const char *node_name = va_arg(args, const char *);
const char *uuid = va_arg(args, const char *);
const char *state = va_arg(args, const char *);
@@ -1304,32 +1352,32 @@ node_info_default(pcmk__output_t *out, va_list args)
bool is_remote = (bool) va_arg(args, int);
return out->info(out,
- "Node %d: %s "
+ "Node %" PRIu32 ": %s "
"(uuid=%s, state=%s, have_quorum=%s, is_remote=%s)",
node_id, pcmk__s(node_name, "unknown"),
pcmk__s(uuid, "unknown"), pcmk__s(state, "unknown"),
pcmk__btoa(have_quorum), pcmk__btoa(is_remote));
}
-PCMK__OUTPUT_ARGS("node-info", "int", "const char *", "const char *",
+PCMK__OUTPUT_ARGS("node-info", "uint32_t", "const char *", "const char *",
"const char *", "bool", "bool")
static int
node_info_xml(pcmk__output_t *out, va_list args)
{
- int node_id = va_arg(args, int);
+ uint32_t node_id = va_arg(args, uint32_t);
const char *node_name = va_arg(args, const char *);
const char *uuid = va_arg(args, const char *);
const char *state = va_arg(args, const char *);
bool have_quorum = (bool) va_arg(args, int);
bool is_remote = (bool) va_arg(args, int);
- char *id_s = crm_strdup_printf("%d", node_id);
+ char *id_s = crm_strdup_printf("%" PRIu32, node_id);
pcmk__output_create_xml_node(out, "node-info",
"nodeid", id_s,
XML_ATTR_UNAME, node_name,
XML_ATTR_ID, uuid,
- XML_NODE_IS_PEER, state,
+ PCMK__XA_CRMD, state,
XML_ATTR_HAVE_QUORUM, pcmk__btoa(have_quorum),
XML_NODE_IS_REMOTE, pcmk__btoa(is_remote),
NULL);
@@ -1337,7 +1385,8 @@ node_info_xml(pcmk__output_t *out, va_list args)
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("inject-cluster-action", "const char *", "const char *", "xmlNodePtr")
+PCMK__OUTPUT_ARGS("inject-cluster-action", "const char *", "const char *",
+ "xmlNodePtr")
static int
inject_cluster_action(pcmk__output_t *out, va_list args)
{
@@ -1349,8 +1398,9 @@ inject_cluster_action(pcmk__output_t *out, va_list args)
return pcmk_rc_no_output;
}
- if(rsc) {
- out->list_item(out, NULL, "Cluster action: %s for %s on %s", task, ID(rsc), node);
+ if (rsc != NULL) {
+ out->list_item(out, NULL, "Cluster action: %s for %s on %s",
+ task, ID(rsc), node);
} else {
out->list_item(out, NULL, "Cluster action: %s on %s", task, node);
}
@@ -1358,7 +1408,8 @@ inject_cluster_action(pcmk__output_t *out, va_list args)
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("inject-cluster-action", "const char *", "const char *", "xmlNodePtr")
+PCMK__OUTPUT_ARGS("inject-cluster-action", "const char *", "const char *",
+ "xmlNodePtr")
static int
inject_cluster_action_xml(pcmk__output_t *out, va_list args)
{
@@ -1638,8 +1689,8 @@ inject_pseudo_action(pcmk__output_t *out, va_list args)
return pcmk_rc_no_output;
}
- out->list_item(out, NULL, "Pseudo action: %s%s%s", task, node ? " on " : "",
- node ? node : "");
+ out->list_item(out, NULL, "Pseudo action: %s%s%s",
+ task, ((node == NULL)? "" : " on "), pcmk__s(node, ""));
return pcmk_rc_ok;
}
@@ -1728,14 +1779,14 @@ inject_rsc_action_xml(pcmk__output_t *out, va_list args)
retcode = pcmk_rc_ok; \
}
-PCMK__OUTPUT_ARGS("cluster-status", "pe_working_set_t *",
+PCMK__OUTPUT_ARGS("cluster-status", "pcmk_scheduler_t *",
"enum pcmk_pacemakerd_state", "crm_exit_t",
"stonith_history_t *", "enum pcmk__fence_history", "uint32_t",
"uint32_t", "const char *", "GList *", "GList *")
int
pcmk__cluster_status_text(pcmk__output_t *out, va_list args)
{
- pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+ pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *);
enum pcmk_pacemakerd_state pcmkd_state =
(enum pcmk_pacemakerd_state) va_arg(args, int);
crm_exit_t history_rc = va_arg(args, crm_exit_t);
@@ -1750,39 +1801,43 @@ pcmk__cluster_status_text(pcmk__output_t *out, va_list args)
int rc = pcmk_rc_no_output;
bool already_printed_failure = false;
- CHECK_RC(rc, out->message(out, "cluster-summary", data_set, pcmkd_state,
+ CHECK_RC(rc, out->message(out, "cluster-summary", scheduler, pcmkd_state,
section_opts, show_opts));
if (pcmk_is_set(section_opts, pcmk_section_nodes) && unames) {
- CHECK_RC(rc, out->message(out, "node-list", data_set->nodes, unames,
+ CHECK_RC(rc, out->message(out, "node-list", scheduler->nodes, unames,
resources, show_opts, rc == pcmk_rc_ok));
}
/* Print resources section, if needed */
if (pcmk_is_set(section_opts, pcmk_section_resources)) {
- CHECK_RC(rc, out->message(out, "resource-list", data_set, show_opts,
+ CHECK_RC(rc, out->message(out, "resource-list", scheduler, show_opts,
true, unames, resources, rc == pcmk_rc_ok));
}
/* print Node Attributes section if requested */
if (pcmk_is_set(section_opts, pcmk_section_attributes)) {
- CHECK_RC(rc, out->message(out, "node-attribute-list", data_set,
- show_opts, rc == pcmk_rc_ok, unames, resources));
+ CHECK_RC(rc, out->message(out, "node-attribute-list", scheduler,
+ show_opts, (rc == pcmk_rc_ok), unames,
+ resources));
}
/* If requested, print resource operations (which includes failcounts)
* or just failcounts
*/
- if (pcmk_any_flags_set(section_opts, pcmk_section_operations | pcmk_section_failcounts)) {
- CHECK_RC(rc, out->message(out, "node-summary", data_set, unames,
- resources, section_opts, show_opts, rc == pcmk_rc_ok));
+ if (pcmk_any_flags_set(section_opts,
+ pcmk_section_operations|pcmk_section_failcounts)) {
+ CHECK_RC(rc, out->message(out, "node-summary", scheduler, unames,
+ resources, section_opts, show_opts,
+ (rc == pcmk_rc_ok)));
}
/* If there were any failed actions, print them */
if (pcmk_is_set(section_opts, pcmk_section_failures)
- && xml_has_children(data_set->failed)) {
+ && (scheduler->failed != NULL)
+ && (scheduler->failed->children != NULL)) {
- CHECK_RC(rc, out->message(out, "failed-action-list", data_set, unames,
+ CHECK_RC(rc, out->message(out, "failed-action-list", scheduler, unames,
resources, show_opts, rc == pcmk_rc_ok));
}
@@ -1790,9 +1845,11 @@ pcmk__cluster_status_text(pcmk__output_t *out, va_list args)
if (pcmk_is_set(section_opts, pcmk_section_fence_failed) &&
fence_history != pcmk__fence_history_none) {
if (history_rc == 0) {
- stonith_history_t *hp = stonith__first_matching_event(stonith_history, stonith__event_state_eq,
- GINT_TO_POINTER(st_failed));
+ stonith_history_t *hp = NULL;
+ hp = stonith__first_matching_event(stonith_history,
+ stonith__event_state_eq,
+ GINT_TO_POINTER(st_failed));
if (hp) {
CHECK_RC(rc, out->message(out, "failed-fencing-list",
stonith_history, unames, section_opts,
@@ -1811,12 +1868,13 @@ pcmk__cluster_status_text(pcmk__output_t *out, va_list args)
/* Print tickets if requested */
if (pcmk_is_set(section_opts, pcmk_section_tickets)) {
- CHECK_RC(rc, out->message(out, "ticket-list", data_set, rc == pcmk_rc_ok));
+ CHECK_RC(rc, out->message(out, "ticket-list", scheduler,
+ (rc == pcmk_rc_ok)));
}
/* Print negative location constraints if requested */
if (pcmk_is_set(section_opts, pcmk_section_bans)) {
- CHECK_RC(rc, out->message(out, "ban-list", data_set, prefix, resources,
+ CHECK_RC(rc, out->message(out, "ban-list", scheduler, prefix, resources,
show_opts, rc == pcmk_rc_ok));
}
@@ -1832,17 +1890,22 @@ pcmk__cluster_status_text(pcmk__output_t *out, va_list args)
out->end_list(out);
}
} else if (pcmk_is_set(section_opts, pcmk_section_fence_worked)) {
- stonith_history_t *hp = stonith__first_matching_event(stonith_history, stonith__event_state_neq,
- GINT_TO_POINTER(st_failed));
+ stonith_history_t *hp = NULL;
+ hp = stonith__first_matching_event(stonith_history,
+ stonith__event_state_neq,
+ GINT_TO_POINTER(st_failed));
if (hp) {
CHECK_RC(rc, out->message(out, "fencing-list", hp, unames,
section_opts, show_opts,
rc == pcmk_rc_ok));
}
} else if (pcmk_is_set(section_opts, pcmk_section_fence_pending)) {
- stonith_history_t *hp = stonith__first_matching_event(stonith_history, stonith__event_state_pending, NULL);
+ stonith_history_t *hp = NULL;
+ hp = stonith__first_matching_event(stonith_history,
+ stonith__event_state_pending,
+ NULL);
if (hp) {
CHECK_RC(rc, out->message(out, "pending-fencing-list", hp,
unames, section_opts, show_opts,
@@ -1854,14 +1917,14 @@ pcmk__cluster_status_text(pcmk__output_t *out, va_list args)
return rc;
}
-PCMK__OUTPUT_ARGS("cluster-status", "pe_working_set_t *",
+PCMK__OUTPUT_ARGS("cluster-status", "pcmk_scheduler_t *",
"enum pcmk_pacemakerd_state", "crm_exit_t",
"stonith_history_t *", "enum pcmk__fence_history", "uint32_t",
"uint32_t", "const char *", "GList *", "GList *")
static int
cluster_status_xml(pcmk__output_t *out, va_list args)
{
- pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+ pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *);
enum pcmk_pacemakerd_state pcmkd_state =
(enum pcmk_pacemakerd_state) va_arg(args, int);
crm_exit_t history_rc = va_arg(args, crm_exit_t);
@@ -1873,12 +1936,12 @@ cluster_status_xml(pcmk__output_t *out, va_list args)
GList *unames = va_arg(args, GList *);
GList *resources = va_arg(args, GList *);
- out->message(out, "cluster-summary", data_set, pcmkd_state, section_opts,
+ out->message(out, "cluster-summary", scheduler, pcmkd_state, section_opts,
show_opts);
/*** NODES ***/
if (pcmk_is_set(section_opts, pcmk_section_nodes)) {
- out->message(out, "node-list", data_set->nodes, unames, resources,
+ out->message(out, "node-list", scheduler->nodes, unames, resources,
show_opts, false);
}
@@ -1887,29 +1950,31 @@ cluster_status_xml(pcmk__output_t *out, va_list args)
/* XML output always displays full details. */
uint32_t full_show_opts = show_opts & ~pcmk_show_brief;
- out->message(out, "resource-list", data_set, full_show_opts,
+ out->message(out, "resource-list", scheduler, full_show_opts,
false, unames, resources, false);
}
/* print Node Attributes section if requested */
if (pcmk_is_set(section_opts, pcmk_section_attributes)) {
- out->message(out, "node-attribute-list", data_set, show_opts, false,
+ out->message(out, "node-attribute-list", scheduler, show_opts, false,
unames, resources);
}
/* If requested, print resource operations (which includes failcounts)
* or just failcounts
*/
- if (pcmk_any_flags_set(section_opts, pcmk_section_operations | pcmk_section_failcounts)) {
- out->message(out, "node-summary", data_set, unames,
+ if (pcmk_any_flags_set(section_opts,
+ pcmk_section_operations|pcmk_section_failcounts)) {
+ out->message(out, "node-summary", scheduler, unames,
resources, section_opts, show_opts, false);
}
/* If there were any failed actions, print them */
if (pcmk_is_set(section_opts, pcmk_section_failures)
- && xml_has_children(data_set->failed)) {
+ && (scheduler->failed != NULL)
+ && (scheduler->failed->children != NULL)) {
- out->message(out, "failed-action-list", data_set, unames, resources,
+ out->message(out, "failed-action-list", scheduler, unames, resources,
show_opts, false);
}
@@ -1922,26 +1987,26 @@ cluster_status_xml(pcmk__output_t *out, va_list args)
/* Print tickets if requested */
if (pcmk_is_set(section_opts, pcmk_section_tickets)) {
- out->message(out, "ticket-list", data_set, false);
+ out->message(out, "ticket-list", scheduler, false);
}
/* Print negative location constraints if requested */
if (pcmk_is_set(section_opts, pcmk_section_bans)) {
- out->message(out, "ban-list", data_set, prefix, resources, show_opts,
+ out->message(out, "ban-list", scheduler, prefix, resources, show_opts,
false);
}
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("cluster-status", "pe_working_set_t *",
+PCMK__OUTPUT_ARGS("cluster-status", "pcmk_scheduler_t *",
"enum pcmk_pacemakerd_state", "crm_exit_t",
"stonith_history_t *", "enum pcmk__fence_history", "uint32_t",
"uint32_t", "const char *", "GList *", "GList *")
static int
cluster_status_html(pcmk__output_t *out, va_list args)
{
- pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+ pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *);
enum pcmk_pacemakerd_state pcmkd_state =
(enum pcmk_pacemakerd_state) va_arg(args, int);
crm_exit_t history_rc = va_arg(args, crm_exit_t);
@@ -1954,40 +2019,42 @@ cluster_status_html(pcmk__output_t *out, va_list args)
GList *resources = va_arg(args, GList *);
bool already_printed_failure = false;
- out->message(out, "cluster-summary", data_set, pcmkd_state, section_opts,
+ out->message(out, "cluster-summary", scheduler, pcmkd_state, section_opts,
show_opts);
/*** NODE LIST ***/
if (pcmk_is_set(section_opts, pcmk_section_nodes) && unames) {
- out->message(out, "node-list", data_set->nodes, unames, resources,
+ out->message(out, "node-list", scheduler->nodes, unames, resources,
show_opts, false);
}
/* Print resources section, if needed */
if (pcmk_is_set(section_opts, pcmk_section_resources)) {
- out->message(out, "resource-list", data_set, show_opts, true, unames,
+ out->message(out, "resource-list", scheduler, show_opts, true, unames,
resources, false);
}
/* print Node Attributes section if requested */
if (pcmk_is_set(section_opts, pcmk_section_attributes)) {
- out->message(out, "node-attribute-list", data_set, show_opts, false,
+ out->message(out, "node-attribute-list", scheduler, show_opts, false,
unames, resources);
}
/* If requested, print resource operations (which includes failcounts)
* or just failcounts
*/
- if (pcmk_any_flags_set(section_opts, pcmk_section_operations | pcmk_section_failcounts)) {
- out->message(out, "node-summary", data_set, unames,
+ if (pcmk_any_flags_set(section_opts,
+ pcmk_section_operations|pcmk_section_failcounts)) {
+ out->message(out, "node-summary", scheduler, unames,
resources, section_opts, show_opts, false);
}
/* If there were any failed actions, print them */
if (pcmk_is_set(section_opts, pcmk_section_failures)
- && xml_has_children(data_set->failed)) {
+ && (scheduler->failed != NULL)
+ && (scheduler->failed->children != NULL)) {
- out->message(out, "failed-action-list", data_set, unames, resources,
+ out->message(out, "failed-action-list", scheduler, unames, resources,
show_opts, false);
}
@@ -1995,12 +2062,14 @@ cluster_status_html(pcmk__output_t *out, va_list args)
if (pcmk_is_set(section_opts, pcmk_section_fence_failed) &&
fence_history != pcmk__fence_history_none) {
if (history_rc == 0) {
- stonith_history_t *hp = stonith__first_matching_event(stonith_history, stonith__event_state_eq,
- GINT_TO_POINTER(st_failed));
+ stonith_history_t *hp = NULL;
+ hp = stonith__first_matching_event(stonith_history,
+ stonith__event_state_eq,
+ GINT_TO_POINTER(st_failed));
if (hp) {
- out->message(out, "failed-fencing-list", stonith_history, unames,
- section_opts, show_opts, false);
+ out->message(out, "failed-fencing-list", stonith_history,
+ unames, section_opts, show_opts, false);
}
} else {
out->begin_list(out, NULL, NULL, "Failed Fencing Actions");
@@ -2021,16 +2090,21 @@ cluster_status_html(pcmk__output_t *out, va_list args)
out->end_list(out);
}
} else if (pcmk_is_set(section_opts, pcmk_section_fence_worked)) {
- stonith_history_t *hp = stonith__first_matching_event(stonith_history, stonith__event_state_neq,
- GINT_TO_POINTER(st_failed));
+ stonith_history_t *hp = NULL;
+ hp = stonith__first_matching_event(stonith_history,
+ stonith__event_state_neq,
+ GINT_TO_POINTER(st_failed));
if (hp) {
out->message(out, "fencing-list", hp, unames, section_opts,
show_opts, false);
}
} else if (pcmk_is_set(section_opts, pcmk_section_fence_pending)) {
- stonith_history_t *hp = stonith__first_matching_event(stonith_history, stonith__event_state_pending, NULL);
+ stonith_history_t *hp = NULL;
+ hp = stonith__first_matching_event(stonith_history,
+ stonith__event_state_pending,
+ NULL);
if (hp) {
out->message(out, "pending-fencing-list", hp, unames,
section_opts, show_opts, false);
@@ -2040,12 +2114,12 @@ cluster_status_html(pcmk__output_t *out, va_list args)
/* Print tickets if requested */
if (pcmk_is_set(section_opts, pcmk_section_tickets)) {
- out->message(out, "ticket-list", data_set, false);
+ out->message(out, "ticket-list", scheduler, false);
}
/* Print negative location constraints if requested */
if (pcmk_is_set(section_opts, pcmk_section_bans)) {
- out->message(out, "ban-list", data_set, prefix, resources, show_opts,
+ out->message(out, "ban-list", scheduler, prefix, resources, show_opts,
false);
}
diff --git a/lib/pacemaker/pcmk_resource.c b/lib/pacemaker/pcmk_resource.c
index ee4c904..7a17838 100644
--- a/lib/pacemaker/pcmk_resource.c
+++ b/lib/pacemaker/pcmk_resource.c
@@ -28,8 +28,7 @@
"/" XML_LRM_TAG_RESOURCE "[@" XML_ATTR_ID "='%s']"
static xmlNode *
-best_op(const pe_resource_t *rsc, const pe_node_t *node,
- pe_working_set_t *data_set)
+best_op(const pcmk_resource_t *rsc, const pcmk_node_t *node)
{
char *xpath = NULL;
xmlNode *history = NULL;
@@ -41,7 +40,7 @@ best_op(const pe_resource_t *rsc, const pe_node_t *node,
// Find node's resource history
xpath = crm_strdup_printf(XPATH_OP_HISTORY, node->details->uname, rsc->id);
- history = get_xpath_object(xpath, data_set->input, LOG_NEVER);
+ history = get_xpath_object(xpath, rsc->cluster->input, LOG_NEVER);
free(xpath);
// Examine each history entry
@@ -58,9 +57,10 @@ best_op(const pe_resource_t *rsc, const pe_node_t *node,
crm_element_value_ms(lrm_rsc_op, XML_LRM_ATTR_INTERVAL, &interval_ms);
effective_op = interval_ms == 0
- && pcmk__strcase_any_of(task, RSC_STATUS,
- RSC_START, RSC_PROMOTE,
- RSC_MIGRATED, NULL);
+ && pcmk__strcase_any_of(task, PCMK_ACTION_MONITOR,
+ PCMK_ACTION_START,
+ PCMK_ACTION_PROMOTE,
+ PCMK_ACTION_MIGRATE_FROM, NULL);
if (best == NULL) {
goto is_best;
@@ -71,7 +71,7 @@ best_op(const pe_resource_t *rsc, const pe_node_t *node,
if (!effective_op) {
continue;
}
- // Do not use an ineffective non-recurring op if there's a recurring one.
+ // Do not use an ineffective non-recurring op if there's a recurring one
} else if (best_interval != 0
&& !effective_op
&& interval_ms == 0) {
@@ -115,8 +115,8 @@ is_best:
* \return Standard Pacemaker return code
*/
int
-pcmk__resource_digests(pcmk__output_t *out, pe_resource_t *rsc,
- const pe_node_t *node, GHashTable *overrides)
+pcmk__resource_digests(pcmk__output_t *out, pcmk_resource_t *rsc,
+ const pcmk_node_t *node, GHashTable *overrides)
{
const char *task = NULL;
xmlNode *xml_op = NULL;
@@ -127,13 +127,13 @@ pcmk__resource_digests(pcmk__output_t *out, pe_resource_t *rsc,
if ((out == NULL) || (rsc == NULL) || (node == NULL)) {
return EINVAL;
}
- if (rsc->variant != pe_native) {
+ if (rsc->variant != pcmk_rsc_variant_primitive) {
// Only primitives get operation digests
return EOPNOTSUPP;
}
// Find XML of operation history to use
- xml_op = best_op(rsc, node, rsc->cluster);
+ xml_op = best_op(rsc, node);
// Generate an operation key
if (xml_op != NULL) {
@@ -141,7 +141,7 @@ pcmk__resource_digests(pcmk__output_t *out, pe_resource_t *rsc,
crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms);
}
if (task == NULL) { // Assume start if no history is available
- task = RSC_START;
+ task = PCMK_ACTION_START;
interval_ms = 0;
}
@@ -155,9 +155,9 @@ pcmk__resource_digests(pcmk__output_t *out, pe_resource_t *rsc,
}
int
-pcmk_resource_digests(xmlNodePtr *xml, pe_resource_t *rsc,
- const pe_node_t *node, GHashTable *overrides,
- pe_working_set_t *data_set)
+pcmk_resource_digests(xmlNodePtr *xml, pcmk_resource_t *rsc,
+ const pcmk_node_t *node, GHashTable *overrides,
+ pcmk_scheduler_t *scheduler)
{
pcmk__output_t *out = NULL;
int rc = pcmk_rc_ok;
diff --git a/lib/pacemaker/pcmk_rule.c b/lib/pacemaker/pcmk_rule.c
index b8ca453..99c0b23 100644
--- a/lib/pacemaker/pcmk_rule.c
+++ b/lib/pacemaker/pcmk_rule.c
@@ -13,6 +13,7 @@
#include <crm/common/cib.h>
#include <crm/common/iso8601.h>
#include <crm/msg_xml.h>
+#include <crm/pengine/internal.h>
#include <crm/pengine/rules_internal.h>
#include <pacemaker-internal.h>
@@ -30,7 +31,7 @@ eval_date_expression(const xmlNode *expr, crm_time_t *now)
{
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
- .role = RSC_ROLE_UNKNOWN,
+ .role = pcmk_role_unknown,
.now = now,
.match_data = NULL,
.rsc_data = NULL,
@@ -42,51 +43,51 @@ eval_date_expression(const xmlNode *expr, crm_time_t *now)
/*!
* \internal
- * \brief Initialize the cluster working set for checking rules
+ * \brief Initialize scheduler data for checking rules
*
* Make our own copies of the CIB XML and date/time object, if they're not
* \c NULL. This way we don't have to take ownership of the objects passed via
* the API.
*
- * \param[in,out] out Output object
- * \param[in] input The CIB XML to check (if \c NULL, use current CIB)
- * \param[in] date Check whether the rule is in effect at this date
- * and time (if \c NULL, use current date and time)
- * \param[out] data_set Where to store the cluster working set
+ * \param[in,out] out Output object
+ * \param[in] input The CIB XML to check (if \c NULL, use current CIB)
+ * \param[in] date Check whether the rule is in effect at this date
+ * and time (if \c NULL, use current date and time)
+ * \param[out] scheduler Where to store initialized scheduler data
*
* \return Standard Pacemaker return code
*/
static int
init_rule_check(pcmk__output_t *out, xmlNodePtr input, const crm_time_t *date,
- pe_working_set_t **data_set)
+ pcmk_scheduler_t **scheduler)
{
- // Allows for cleaner syntax than dereferencing the data_set argument
- pe_working_set_t *new_data_set = NULL;
+ // Allows for cleaner syntax than dereferencing the scheduler argument
+ pcmk_scheduler_t *new_scheduler = NULL;
- new_data_set = pe_new_working_set();
- if (new_data_set == NULL) {
+ new_scheduler = pe_new_working_set();
+ if (new_scheduler == NULL) {
return ENOMEM;
}
- pe__set_working_set_flags(new_data_set,
- pe_flag_no_counts|pe_flag_no_compat);
+ pe__set_working_set_flags(new_scheduler,
+ pcmk_sched_no_counts|pcmk_sched_no_compat);
- // Populate the working set instance
+ // Populate the scheduler data
// Make our own copy of the given input or fetch the CIB and use that
if (input != NULL) {
- new_data_set->input = copy_xml(input);
- if (new_data_set->input == NULL) {
+ new_scheduler->input = copy_xml(input);
+ if (new_scheduler->input == NULL) {
out->err(out, "Failed to copy input XML");
- pe_free_working_set(new_data_set);
+ pe_free_working_set(new_scheduler);
return ENOMEM;
}
} else {
- int rc = cib__signon_query(out, NULL, &(new_data_set->input));
+ int rc = cib__signon_query(out, NULL, &(new_scheduler->input));
if (rc != pcmk_rc_ok) {
- pe_free_working_set(new_data_set);
+ pe_free_working_set(new_scheduler);
return rc;
}
}
@@ -95,12 +96,12 @@ init_rule_check(pcmk__output_t *out, xmlNodePtr input, const crm_time_t *date,
// cluster_status() populates with the current time
if (date != NULL) {
// pcmk_copy_time() guarantees non-NULL
- new_data_set->now = pcmk_copy_time(date);
+ new_scheduler->now = pcmk_copy_time(date);
}
// Unpack everything
- cluster_status(new_data_set);
- *data_set = new_data_set;
+ cluster_status(new_scheduler);
+ *scheduler = new_scheduler;
return pcmk_rc_ok;
}
@@ -111,14 +112,14 @@ init_rule_check(pcmk__output_t *out, xmlNodePtr input, const crm_time_t *date,
* \internal
* \brief Check whether a given rule is in effect
*
- * \param[in] data_set Cluster working set
- * \param[in] rule_id The ID of the rule to check
- * \param[out] error Where to store a rule evaluation error message
+ * \param[in] scheduler Scheduler data
+ * \param[in] rule_id The ID of the rule to check
+ * \param[out] error Where to store a rule evaluation error message
*
* \return Standard Pacemaker return code
*/
static int
-eval_rule(pe_working_set_t *data_set, const char *rule_id, const char **error)
+eval_rule(pcmk_scheduler_t *scheduler, const char *rule_id, const char **error)
{
xmlNodePtr cib_constraints = NULL;
xmlNodePtr match = NULL;
@@ -130,7 +131,7 @@ eval_rule(pe_working_set_t *data_set, const char *rule_id, const char **error)
*error = NULL;
/* Rules are under the constraints node in the XML, so first find that. */
- cib_constraints = pcmk_find_cib_element(data_set->input,
+ cib_constraints = pcmk_find_cib_element(scheduler->input,
XML_CIB_TAG_CONSTRAINTS);
/* Get all rules matching the given ID that are also simple enough for us
@@ -215,7 +216,7 @@ eval_rule(pe_working_set_t *data_set, const char *rule_id, const char **error)
CRM_ASSERT(match != NULL);
CRM_ASSERT(find_expression_type(match) == time_expr);
- rc = eval_date_expression(match, data_set->now);
+ rc = eval_date_expression(match, scheduler->now);
if (rc == pcmk_rc_undetermined) {
/* pe__eval_date_expr() should return this only if something is
* malformed or missing
@@ -244,7 +245,7 @@ int
pcmk__check_rules(pcmk__output_t *out, xmlNodePtr input, const crm_time_t *date,
const char **rule_ids)
{
- pe_working_set_t *data_set = NULL;
+ pcmk_scheduler_t *scheduler = NULL;
int rc = pcmk_rc_ok;
CRM_ASSERT(out != NULL);
@@ -254,14 +255,14 @@ pcmk__check_rules(pcmk__output_t *out, xmlNodePtr input, const crm_time_t *date,
return pcmk_rc_ok;
}
- rc = init_rule_check(out, input, date, &data_set);
+ rc = init_rule_check(out, input, date, &scheduler);
if (rc != pcmk_rc_ok) {
return rc;
}
for (const char **rule_id = rule_ids; *rule_id != NULL; rule_id++) {
const char *error = NULL;
- int last_rc = eval_rule(data_set, *rule_id, &error);
+ int last_rc = eval_rule(scheduler, *rule_id, &error);
out->message(out, "rule-check", *rule_id, last_rc, error);
@@ -270,7 +271,7 @@ pcmk__check_rules(pcmk__output_t *out, xmlNodePtr input, const crm_time_t *date,
}
}
- pe_free_working_set(data_set);
+ pe_free_working_set(scheduler);
return rc;
}
diff --git a/lib/pacemaker/pcmk_sched_actions.c b/lib/pacemaker/pcmk_sched_actions.c
index 06d7f00..76b5584 100644
--- a/lib/pacemaker/pcmk_sched_actions.c
+++ b/lib/pacemaker/pcmk_sched_actions.c
@@ -14,6 +14,7 @@
#include <glib.h>
#include <crm/lrmd_internal.h>
+#include <crm/common/scheduler_internal.h>
#include <pacemaker-internal.h>
#include "libpacemaker_private.h"
@@ -27,11 +28,11 @@
*
* \return Action flags that should be used for orderings
*/
-static enum pe_action_flags
-action_flags_for_ordering(pe_action_t *action, const pe_node_t *node)
+static uint32_t
+action_flags_for_ordering(pcmk_action_t *action, const pcmk_node_t *node)
{
bool runnable = false;
- enum pe_action_flags flags;
+ uint32_t flags;
// For non-resource actions, return the action flags
if (action->rsc == NULL) {
@@ -50,7 +51,7 @@ action_flags_for_ordering(pe_action_t *action, const pe_node_t *node)
/* Otherwise (i.e., for clone resource actions on a specific node), first
* remember whether the non-node-specific action is runnable.
*/
- runnable = pcmk_is_set(flags, pe_action_runnable);
+ runnable = pcmk_is_set(flags, pcmk_action_runnable);
// Then recheck the resource method with the node
flags = action->rsc->cmds->action_flags(action, node);
@@ -63,9 +64,8 @@ action_flags_for_ordering(pe_action_t *action, const pe_node_t *node)
* function shouldn't be used for other types of constraints without
* changes. Not very satisfying, but it's logical and appears to work well.
*/
- if (runnable && !pcmk_is_set(flags, pe_action_runnable)) {
- pe__set_raw_action_flags(flags, action->rsc->id,
- pe_action_runnable);
+ if (runnable && !pcmk_is_set(flags, pcmk_action_runnable)) {
+ pe__set_raw_action_flags(flags, action->rsc->id, pcmk_action_runnable);
}
return flags;
}
@@ -89,18 +89,19 @@ action_flags_for_ordering(pe_action_t *action, const pe_node_t *node)
* \note It is the caller's responsibility to free the return value.
*/
static char *
-action_uuid_for_ordering(const char *first_uuid, const pe_resource_t *first_rsc)
+action_uuid_for_ordering(const char *first_uuid,
+ const pcmk_resource_t *first_rsc)
{
guint interval_ms = 0;
char *uuid = NULL;
char *rid = NULL;
char *first_task_str = NULL;
- enum action_tasks first_task = no_action;
- enum action_tasks remapped_task = no_action;
+ enum action_tasks first_task = pcmk_action_unspecified;
+ enum action_tasks remapped_task = pcmk_action_unspecified;
// Only non-notify actions for collective resources need remapping
- if ((strstr(first_uuid, "notify") != NULL)
- || (first_rsc->variant < pe_group)) {
+ if ((strstr(first_uuid, PCMK_ACTION_NOTIFY) != NULL)
+ || (first_rsc->variant < pcmk_rsc_variant_group)) {
goto done;
}
@@ -112,39 +113,35 @@ action_uuid_for_ordering(const char *first_uuid, const pe_resource_t *first_rsc)
first_task = text2task(first_task_str);
switch (first_task) {
- case stop_rsc:
- case start_rsc:
- case action_notify:
- case action_promote:
- case action_demote:
+ case pcmk_action_stop:
+ case pcmk_action_start:
+ case pcmk_action_notify:
+ case pcmk_action_promote:
+ case pcmk_action_demote:
remapped_task = first_task + 1;
break;
- case stopped_rsc:
- case started_rsc:
- case action_notified:
- case action_promoted:
- case action_demoted:
+ case pcmk_action_stopped:
+ case pcmk_action_started:
+ case pcmk_action_notified:
+ case pcmk_action_promoted:
+ case pcmk_action_demoted:
remapped_task = first_task;
break;
- case monitor_rsc:
- case shutdown_crm:
- case stonith_node:
+ case pcmk_action_monitor:
+ case pcmk_action_shutdown:
+ case pcmk_action_fence:
break;
default:
crm_err("Unknown action '%s' in ordering", first_task_str);
break;
}
- if (remapped_task != no_action) {
- /* If a (clone) resource has notifications enabled, we want to order
- * relative to when all notifications have been sent for the remapped
- * task. Only outermost resources or those in bundles have
- * notifications.
+ if (remapped_task != pcmk_action_unspecified) {
+ /* If a clone or bundle has notifications enabled, the ordering will be
+ * relative to when notifications have been sent for the remapped task.
*/
- if (pcmk_is_set(first_rsc->flags, pe_rsc_notify)
- && ((first_rsc->parent == NULL)
- || (pe_rsc_is_clone(first_rsc)
- && (first_rsc->parent->variant == pe_container)))) {
+ if (pcmk_is_set(first_rsc->flags, pcmk_rsc_notify)
+ && (pe_rsc_is_clone(first_rsc) || pe_rsc_is_bundled(first_rsc))) {
uuid = pcmk__notify_key(rid, "confirmed-post",
task2text(remapped_task));
} else {
@@ -181,13 +178,14 @@ done:
*
* \return Actual action that should be used for the ordering
*/
-static pe_action_t *
-action_for_ordering(pe_action_t *action)
+static pcmk_action_t *
+action_for_ordering(pcmk_action_t *action)
{
- pe_action_t *result = action;
- pe_resource_t *rsc = action->rsc;
+ pcmk_action_t *result = action;
+ pcmk_resource_t *rsc = action->rsc;
- if ((rsc != NULL) && (rsc->variant >= pe_group) && (action->uuid != NULL)) {
+ if ((rsc != NULL) && (rsc->variant >= pcmk_rsc_variant_group)
+ && (action->uuid != NULL)) {
char *uuid = action_uuid_for_ordering(action->uuid, rsc);
result = find_first_action(rsc->actions, uuid, NULL, NULL);
@@ -203,6 +201,34 @@ action_for_ordering(pe_action_t *action)
/*!
* \internal
+ * \brief Wrapper for update_ordered_actions() method for readability
+ *
+ * \param[in,out] rsc Resource to call method for
+ * \param[in,out] first 'First' action in an ordering
+ * \param[in,out] then 'Then' action in an ordering
+ * \param[in] node If not NULL, limit scope of ordering to this
+ * node (only used when interleaving instances)
+ * \param[in] flags Action flags for \p first for ordering purposes
+ * \param[in] filter Action flags to limit scope of certain updates
+ * (may include pcmk_action_optional to affect only
+ * mandatory actions, and pe_action_runnable to
+ * affect only runnable actions)
+ * \param[in] type Group of enum pcmk__action_relation_flags to apply
+ * \param[in,out] scheduler Scheduler data
+ *
+ * \return Group of enum pcmk__updated flags indicating what was updated
+ */
+static inline uint32_t
+update(pcmk_resource_t *rsc, pcmk_action_t *first, pcmk_action_t *then,
+ const pcmk_node_t *node, uint32_t flags, uint32_t filter, uint32_t type,
+ pcmk_scheduler_t *scheduler)
+{
+ return rsc->cmds->update_ordered_actions(first, then, node, flags, filter,
+ type, scheduler);
+}
+
+/*!
+ * \internal
* \brief Update flags for ordering's actions appropriately for ordering's flags
*
* \param[in,out] first First action in an ordering
@@ -210,16 +236,15 @@ action_for_ordering(pe_action_t *action)
* \param[in] first_flags Action flags for \p first for ordering purposes
* \param[in] then_flags Action flags for \p then for ordering purposes
* \param[in,out] order Action wrapper for \p first in ordering
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*
* \return Group of enum pcmk__updated flags
*/
static uint32_t
-update_action_for_ordering_flags(pe_action_t *first, pe_action_t *then,
- enum pe_action_flags first_flags,
- enum pe_action_flags then_flags,
- pe_action_wrapper_t *order,
- pe_working_set_t *data_set)
+update_action_for_ordering_flags(pcmk_action_t *first, pcmk_action_t *then,
+ uint32_t first_flags, uint32_t then_flags,
+ pcmk__related_action_t *order,
+ pcmk_scheduler_t *scheduler)
{
uint32_t changed = pcmk__updated_none;
@@ -228,96 +253,90 @@ update_action_for_ordering_flags(pe_action_t *first, pe_action_t *then,
* whole 'then' clone should restart if 'first' is restarted, so then->node
* is needed.
*/
- pe_node_t *node = then->node;
+ pcmk_node_t *node = then->node;
- if (pcmk_is_set(order->type, pe_order_implies_then_on_node)) {
+ if (pcmk_is_set(order->type, pcmk__ar_first_implies_same_node_then)) {
/* For unfencing, only instances of 'then' on the same node as 'first'
* (the unfencing operation) should restart, so reset node to
* first->node, at which point this case is handled like a normal
- * pe_order_implies_then.
+ * pcmk__ar_first_implies_then.
*/
- pe__clear_order_flags(order->type, pe_order_implies_then_on_node);
- pe__set_order_flags(order->type, pe_order_implies_then);
+ pe__clear_order_flags(order->type,
+ pcmk__ar_first_implies_same_node_then);
+ pe__set_order_flags(order->type, pcmk__ar_first_implies_then);
node = first->node;
pe_rsc_trace(then->rsc,
- "%s then %s: mapped pe_order_implies_then_on_node to "
- "pe_order_implies_then on %s",
+ "%s then %s: mapped pcmk__ar_first_implies_same_node_then "
+ "to pcmk__ar_first_implies_then on %s",
first->uuid, then->uuid, pe__node_name(node));
}
- if (pcmk_is_set(order->type, pe_order_implies_then)) {
+ if (pcmk_is_set(order->type, pcmk__ar_first_implies_then)) {
if (then->rsc != NULL) {
- changed |= then->rsc->cmds->update_ordered_actions(first, then,
- node,
- first_flags & pe_action_optional,
- pe_action_optional,
- pe_order_implies_then,
- data_set);
- } else if (!pcmk_is_set(first_flags, pe_action_optional)
- && pcmk_is_set(then->flags, pe_action_optional)) {
- pe__clear_action_flags(then, pe_action_optional);
+ changed |= update(then->rsc, first, then, node,
+ first_flags & pcmk_action_optional,
+ pcmk_action_optional, pcmk__ar_first_implies_then,
+ scheduler);
+ } else if (!pcmk_is_set(first_flags, pcmk_action_optional)
+ && pcmk_is_set(then->flags, pcmk_action_optional)) {
+ pe__clear_action_flags(then, pcmk_action_optional);
pcmk__set_updated_flags(changed, first, pcmk__updated_then);
}
- pe_rsc_trace(then->rsc, "%s then %s: %s after pe_order_implies_then",
+ pe_rsc_trace(then->rsc,
+ "%s then %s: %s after pcmk__ar_first_implies_then",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
- if (pcmk_is_set(order->type, pe_order_restart) && (then->rsc != NULL)) {
- enum pe_action_flags restart = pe_action_optional|pe_action_runnable;
+ if (pcmk_is_set(order->type, pcmk__ar_intermediate_stop)
+ && (then->rsc != NULL)) {
+ enum pe_action_flags restart = pcmk_action_optional
+ |pcmk_action_runnable;
- changed |= then->rsc->cmds->update_ordered_actions(first, then, node,
- first_flags, restart,
- pe_order_restart,
- data_set);
- pe_rsc_trace(then->rsc, "%s then %s: %s after pe_order_restart",
+ changed |= update(then->rsc, first, then, node, first_flags, restart,
+ pcmk__ar_intermediate_stop, scheduler);
+ pe_rsc_trace(then->rsc,
+ "%s then %s: %s after pcmk__ar_intermediate_stop",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
- if (pcmk_is_set(order->type, pe_order_implies_first)) {
+ if (pcmk_is_set(order->type, pcmk__ar_then_implies_first)) {
if (first->rsc != NULL) {
- changed |= first->rsc->cmds->update_ordered_actions(first, then,
- node,
- first_flags,
- pe_action_optional,
- pe_order_implies_first,
- data_set);
- } else if (!pcmk_is_set(first_flags, pe_action_optional)
- && pcmk_is_set(first->flags, pe_action_runnable)) {
- pe__clear_action_flags(first, pe_action_runnable);
+ changed |= update(first->rsc, first, then, node, first_flags,
+ pcmk_action_optional, pcmk__ar_then_implies_first,
+ scheduler);
+ } else if (!pcmk_is_set(first_flags, pcmk_action_optional)
+ && pcmk_is_set(first->flags, pcmk_action_runnable)) {
+ pe__clear_action_flags(first, pcmk_action_runnable);
pcmk__set_updated_flags(changed, first, pcmk__updated_first);
}
- pe_rsc_trace(then->rsc, "%s then %s: %s after pe_order_implies_first",
+ pe_rsc_trace(then->rsc,
+ "%s then %s: %s after pcmk__ar_then_implies_first",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
- if (pcmk_is_set(order->type, pe_order_promoted_implies_first)) {
+ if (pcmk_is_set(order->type, pcmk__ar_promoted_then_implies_first)) {
if (then->rsc != NULL) {
- changed |= then->rsc->cmds->update_ordered_actions(first, then,
- node,
- first_flags & pe_action_optional,
- pe_action_optional,
- pe_order_promoted_implies_first,
- data_set);
+ changed |= update(then->rsc, first, then, node,
+ first_flags & pcmk_action_optional,
+ pcmk_action_optional,
+ pcmk__ar_promoted_then_implies_first, scheduler);
}
pe_rsc_trace(then->rsc,
- "%s then %s: %s after pe_order_promoted_implies_first",
+ "%s then %s: %s after pcmk__ar_promoted_then_implies_first",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
- if (pcmk_is_set(order->type, pe_order_one_or_more)) {
+ if (pcmk_is_set(order->type, pcmk__ar_min_runnable)) {
if (then->rsc != NULL) {
- changed |= then->rsc->cmds->update_ordered_actions(first, then,
- node,
- first_flags,
- pe_action_runnable,
- pe_order_one_or_more,
- data_set);
-
- } else if (pcmk_is_set(first_flags, pe_action_runnable)) {
+ changed |= update(then->rsc, first, then, node, first_flags,
+ pcmk_action_runnable, pcmk__ar_min_runnable,
+ scheduler);
+
+ } else if (pcmk_is_set(first_flags, pcmk_action_runnable)) {
// We have another runnable instance of "first"
then->runnable_before++;
@@ -325,145 +344,131 @@ update_action_for_ordering_flags(pe_action_t *first, pe_action_t *then,
* "before" instances to be runnable, and they now are.
*/
if ((then->runnable_before >= then->required_runnable_before)
- && !pcmk_is_set(then->flags, pe_action_runnable)) {
+ && !pcmk_is_set(then->flags, pcmk_action_runnable)) {
- pe__set_action_flags(then, pe_action_runnable);
+ pe__set_action_flags(then, pcmk_action_runnable);
pcmk__set_updated_flags(changed, first, pcmk__updated_then);
}
}
- pe_rsc_trace(then->rsc, "%s then %s: %s after pe_order_one_or_more",
+ pe_rsc_trace(then->rsc, "%s then %s: %s after pcmk__ar_min_runnable",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
- if (pcmk_is_set(order->type, pe_order_probe) && (then->rsc != NULL)) {
- if (!pcmk_is_set(first_flags, pe_action_runnable)
- && (first->rsc->running_on != NULL)) {
+ if (pcmk_is_set(order->type, pcmk__ar_nested_remote_probe)
+ && (then->rsc != NULL)) {
+
+ if (!pcmk_is_set(first_flags, pcmk_action_runnable)
+ && (first->rsc != NULL) && (first->rsc->running_on != NULL)) {
pe_rsc_trace(then->rsc,
"%s then %s: ignoring because first is stopping",
first->uuid, then->uuid);
- order->type = pe_order_none;
+ order->type = (enum pe_ordering) pcmk__ar_none;
} else {
- changed |= then->rsc->cmds->update_ordered_actions(first, then,
- node,
- first_flags,
- pe_action_runnable,
- pe_order_runnable_left,
- data_set);
+ changed |= update(then->rsc, first, then, node, first_flags,
+ pcmk_action_runnable,
+ pcmk__ar_unrunnable_first_blocks, scheduler);
}
- pe_rsc_trace(then->rsc, "%s then %s: %s after pe_order_probe",
+ pe_rsc_trace(then->rsc,
+ "%s then %s: %s after pcmk__ar_nested_remote_probe",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
- if (pcmk_is_set(order->type, pe_order_runnable_left)) {
+ if (pcmk_is_set(order->type, pcmk__ar_unrunnable_first_blocks)) {
if (then->rsc != NULL) {
- changed |= then->rsc->cmds->update_ordered_actions(first, then,
- node,
- first_flags,
- pe_action_runnable,
- pe_order_runnable_left,
- data_set);
+ changed |= update(then->rsc, first, then, node, first_flags,
+ pcmk_action_runnable,
+ pcmk__ar_unrunnable_first_blocks, scheduler);
- } else if (!pcmk_is_set(first_flags, pe_action_runnable)
- && pcmk_is_set(then->flags, pe_action_runnable)) {
+ } else if (!pcmk_is_set(first_flags, pcmk_action_runnable)
+ && pcmk_is_set(then->flags, pcmk_action_runnable)) {
- pe__clear_action_flags(then, pe_action_runnable);
+ pe__clear_action_flags(then, pcmk_action_runnable);
pcmk__set_updated_flags(changed, first, pcmk__updated_then);
}
- pe_rsc_trace(then->rsc, "%s then %s: %s after pe_order_runnable_left",
+ pe_rsc_trace(then->rsc,
+ "%s then %s: %s after pcmk__ar_unrunnable_first_blocks",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
- if (pcmk_is_set(order->type, pe_order_implies_first_migratable)) {
+ if (pcmk_is_set(order->type, pcmk__ar_unmigratable_then_blocks)) {
if (then->rsc != NULL) {
- changed |= then->rsc->cmds->update_ordered_actions(first, then,
- node,
- first_flags,
- pe_action_optional,
- pe_order_implies_first_migratable,
- data_set);
+ changed |= update(then->rsc, first, then, node, first_flags,
+ pcmk_action_optional,
+ pcmk__ar_unmigratable_then_blocks, scheduler);
}
pe_rsc_trace(then->rsc, "%s then %s: %s after "
- "pe_order_implies_first_migratable",
+ "pcmk__ar_unmigratable_then_blocks",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
- if (pcmk_is_set(order->type, pe_order_pseudo_left)) {
+ if (pcmk_is_set(order->type, pcmk__ar_first_else_then)) {
if (then->rsc != NULL) {
- changed |= then->rsc->cmds->update_ordered_actions(first, then,
- node,
- first_flags,
- pe_action_optional,
- pe_order_pseudo_left,
- data_set);
+ changed |= update(then->rsc, first, then, node, first_flags,
+ pcmk_action_optional, pcmk__ar_first_else_then,
+ scheduler);
}
- pe_rsc_trace(then->rsc, "%s then %s: %s after pe_order_pseudo_left",
+ pe_rsc_trace(then->rsc, "%s then %s: %s after pcmk__ar_first_else_then",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
- if (pcmk_is_set(order->type, pe_order_optional)) {
+ if (pcmk_is_set(order->type, pcmk__ar_ordered)) {
if (then->rsc != NULL) {
- changed |= then->rsc->cmds->update_ordered_actions(first, then,
- node,
- first_flags,
- pe_action_runnable,
- pe_order_optional,
- data_set);
+ changed |= update(then->rsc, first, then, node, first_flags,
+ pcmk_action_runnable, pcmk__ar_ordered,
+ scheduler);
}
- pe_rsc_trace(then->rsc, "%s then %s: %s after pe_order_optional",
+ pe_rsc_trace(then->rsc, "%s then %s: %s after pcmk__ar_ordered",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
- if (pcmk_is_set(order->type, pe_order_asymmetrical)) {
+ if (pcmk_is_set(order->type, pcmk__ar_asymmetric)) {
if (then->rsc != NULL) {
- changed |= then->rsc->cmds->update_ordered_actions(first, then,
- node,
- first_flags,
- pe_action_runnable,
- pe_order_asymmetrical,
- data_set);
+ changed |= update(then->rsc, first, then, node, first_flags,
+ pcmk_action_runnable, pcmk__ar_asymmetric,
+ scheduler);
}
- pe_rsc_trace(then->rsc, "%s then %s: %s after pe_order_asymmetrical",
+ pe_rsc_trace(then->rsc, "%s then %s: %s after pcmk__ar_asymmetric",
first->uuid, then->uuid,
(changed? "changed" : "unchanged"));
}
- if (pcmk_is_set(first->flags, pe_action_runnable)
- && pcmk_is_set(order->type, pe_order_implies_then_printed)
- && !pcmk_is_set(first_flags, pe_action_optional)) {
+ if (pcmk_is_set(first->flags, pcmk_action_runnable)
+ && pcmk_is_set(order->type, pcmk__ar_first_implies_then_graphed)
+ && !pcmk_is_set(first_flags, pcmk_action_optional)) {
pe_rsc_trace(then->rsc, "%s will be in graph because %s is required",
then->uuid, first->uuid);
- pe__set_action_flags(then, pe_action_print_always);
+ pe__set_action_flags(then, pcmk_action_always_in_graph);
// Don't bother marking 'then' as changed just for this
}
- if (pcmk_is_set(order->type, pe_order_implies_first_printed)
- && !pcmk_is_set(then_flags, pe_action_optional)) {
+ if (pcmk_is_set(order->type, pcmk__ar_then_implies_first_graphed)
+ && !pcmk_is_set(then_flags, pcmk_action_optional)) {
pe_rsc_trace(then->rsc, "%s will be in graph because %s is required",
first->uuid, then->uuid);
- pe__set_action_flags(first, pe_action_print_always);
+ pe__set_action_flags(first, pcmk_action_always_in_graph);
// Don't bother marking 'first' as changed just for this
}
- if (pcmk_any_flags_set(order->type, pe_order_implies_then
- |pe_order_implies_first
- |pe_order_restart)
+ if (pcmk_any_flags_set(order->type, pcmk__ar_first_implies_then
+ |pcmk__ar_then_implies_first
+ |pcmk__ar_intermediate_stop)
&& (first->rsc != NULL)
- && !pcmk_is_set(first->rsc->flags, pe_rsc_managed)
- && pcmk_is_set(first->rsc->flags, pe_rsc_block)
- && !pcmk_is_set(first->flags, pe_action_runnable)
- && pcmk__str_eq(first->task, RSC_STOP, pcmk__str_casei)) {
+ && !pcmk_is_set(first->rsc->flags, pcmk_rsc_managed)
+ && pcmk_is_set(first->rsc->flags, pcmk_rsc_blocked)
+ && !pcmk_is_set(first->flags, pcmk_action_runnable)
+ && pcmk__str_eq(first->task, PCMK_ACTION_STOP, pcmk__str_none)) {
- if (pcmk_is_set(then->flags, pe_action_runnable)) {
- pe__clear_action_flags(then, pe_action_runnable);
+ if (pcmk_is_set(then->flags, pcmk_action_runnable)) {
+ pe__clear_action_flags(then, pcmk_action_runnable);
pcmk__set_updated_flags(changed, first, pcmk__updated_then);
}
pe_rsc_trace(then->rsc, "%s then %s: %s after checking whether first "
@@ -478,13 +483,13 @@ update_action_for_ordering_flags(pe_action_t *first, pe_action_t *then,
// Convenience macros for logging action properties
#define action_type_str(flags) \
- (pcmk_is_set((flags), pe_action_pseudo)? "pseudo-action" : "action")
+ (pcmk_is_set((flags), pcmk_action_pseudo)? "pseudo-action" : "action")
#define action_optional_str(flags) \
- (pcmk_is_set((flags), pe_action_optional)? "optional" : "required")
+ (pcmk_is_set((flags), pcmk_action_optional)? "optional" : "required")
#define action_runnable_str(flags) \
- (pcmk_is_set((flags), pe_action_runnable)? "runnable" : "unrunnable")
+ (pcmk_is_set((flags), pcmk_action_runnable)? "runnable" : "unrunnable")
#define action_node_str(a) \
(((a)->node == NULL)? "no node" : (a)->node->details->uname)
@@ -493,11 +498,12 @@ update_action_for_ordering_flags(pe_action_t *first, pe_action_t *then,
* \internal
* \brief Update an action's flags for all orderings where it is "then"
*
- * \param[in,out] then Action to update
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] then Action to update
+ * \param[in,out] scheduler Scheduler data
*/
void
-pcmk__update_action_for_orderings(pe_action_t *then, pe_working_set_t *data_set)
+pcmk__update_action_for_orderings(pcmk_action_t *then,
+ pcmk_scheduler_t *scheduler)
{
GList *lpc = NULL;
uint32_t changed = pcmk__updated_none;
@@ -508,7 +514,7 @@ pcmk__update_action_for_orderings(pe_action_t *then, pe_working_set_t *data_set)
action_optional_str(then->flags),
action_runnable_str(then->flags), action_node_str(then));
- if (pcmk_is_set(then->flags, pe_action_requires_any)) {
+ if (pcmk_is_set(then->flags, pcmk_action_min_runnable)) {
/* Initialize current known "runnable before" actions. As
* update_action_for_ordering_flags() is called for each of then's
* before actions, this number will increment as runnable 'first'
@@ -523,22 +529,23 @@ pcmk__update_action_for_orderings(pe_action_t *then, pe_working_set_t *data_set)
then->required_runnable_before = 1;
}
- /* The pe_order_one_or_more clause of update_action_for_ordering_flags()
- * (called below) will reset runnable if appropriate.
+ /* The pcmk__ar_min_runnable clause of
+ * update_action_for_ordering_flags() (called below)
+ * will reset runnable if appropriate.
*/
- pe__clear_action_flags(then, pe_action_runnable);
+ pe__clear_action_flags(then, pcmk_action_runnable);
}
for (lpc = then->actions_before; lpc != NULL; lpc = lpc->next) {
- pe_action_wrapper_t *other = (pe_action_wrapper_t *) lpc->data;
- pe_action_t *first = other->action;
+ pcmk__related_action_t *other = lpc->data;
+ pcmk_action_t *first = other->action;
- pe_node_t *then_node = then->node;
- pe_node_t *first_node = first->node;
+ pcmk_node_t *then_node = then->node;
+ pcmk_node_t *first_node = first->node;
if ((first->rsc != NULL)
- && (first->rsc->variant == pe_group)
- && pcmk__str_eq(first->task, RSC_START, pcmk__str_casei)) {
+ && (first->rsc->variant == pcmk_rsc_variant_group)
+ && pcmk__str_eq(first->task, PCMK_ACTION_START, pcmk__str_none)) {
first_node = first->rsc->fns->location(first->rsc, NULL, FALSE);
if (first_node != NULL) {
@@ -548,8 +555,8 @@ pcmk__update_action_for_orderings(pe_action_t *then, pe_working_set_t *data_set)
}
if ((then->rsc != NULL)
- && (then->rsc->variant == pe_group)
- && pcmk__str_eq(then->task, RSC_START, pcmk__str_casei)) {
+ && (then->rsc->variant == pcmk_rsc_variant_group)
+ && pcmk__str_eq(then->task, PCMK_ACTION_START, pcmk__str_none)) {
then_node = then->rsc->fns->location(then->rsc, NULL, FALSE);
if (then_node != NULL) {
@@ -559,30 +566,31 @@ pcmk__update_action_for_orderings(pe_action_t *then, pe_working_set_t *data_set)
}
// Disable constraint if it only applies when on same node, but isn't
- if (pcmk_is_set(other->type, pe_order_same_node)
+ if (pcmk_is_set(other->type, pcmk__ar_if_on_same_node)
&& (first_node != NULL) && (then_node != NULL)
- && (first_node->details != then_node->details)) {
+ && !pe__same_node(first_node, then_node)) {
pe_rsc_trace(then->rsc,
- "Disabled ordering %s on %s then %s on %s: not same node",
+ "Disabled ordering %s on %s then %s on %s: "
+ "not same node",
other->action->uuid, pe__node_name(first_node),
then->uuid, pe__node_name(then_node));
- other->type = pe_order_none;
+ other->type = (enum pe_ordering) pcmk__ar_none;
continue;
}
pcmk__clear_updated_flags(changed, then, pcmk__updated_first);
if ((first->rsc != NULL)
- && pcmk_is_set(other->type, pe_order_then_cancels_first)
- && !pcmk_is_set(then->flags, pe_action_optional)) {
+ && pcmk_is_set(other->type, pcmk__ar_then_cancels_first)
+ && !pcmk_is_set(then->flags, pcmk_action_optional)) {
/* 'then' is required, so we must abandon 'first'
* (e.g. a required stop cancels any agent reload).
*/
- pe__set_action_flags(other->action, pe_action_optional);
- if (!strcmp(first->task, CRMD_ACTION_RELOAD_AGENT)) {
- pe__clear_resource_flags(first->rsc, pe_rsc_reload);
+ pe__set_action_flags(other->action, pcmk_action_optional);
+ if (!strcmp(first->task, PCMK_ACTION_RELOAD_AGENT)) {
+ pe__clear_resource_flags(first->rsc, pcmk_rsc_reload);
}
}
@@ -605,14 +613,14 @@ pcmk__update_action_for_orderings(pe_action_t *then, pe_working_set_t *data_set)
* could mean it is a non-resource action, a primitive resource
* action, or already expanded.
*/
- enum pe_action_flags first_flags, then_flags;
+ uint32_t first_flags, then_flags;
first_flags = action_flags_for_ordering(first, then_node);
then_flags = action_flags_for_ordering(then, first_node);
changed |= update_action_for_ordering_flags(first, then,
first_flags, then_flags,
- other, data_set);
+ other, scheduler);
/* 'first' was for a complex resource (clone, group, etc),
* create a new dependency if necessary
@@ -626,7 +634,7 @@ pcmk__update_action_for_orderings(pe_action_t *then, pe_working_set_t *data_set)
"Disabled ordering %s then %s in favor of %s then %s",
other->action->uuid, then->uuid, first->uuid,
then->uuid);
- other->type = pe_order_none;
+ other->type = (enum pe_ordering) pcmk__ar_none;
}
@@ -635,15 +643,15 @@ pcmk__update_action_for_orderings(pe_action_t *then, pe_working_set_t *data_set)
"because it changed", first->uuid);
for (GList *lpc2 = first->actions_after; lpc2 != NULL;
lpc2 = lpc2->next) {
- pe_action_wrapper_t *other = (pe_action_wrapper_t *) lpc2->data;
+ pcmk__related_action_t *other = lpc2->data;
- pcmk__update_action_for_orderings(other->action, data_set);
+ pcmk__update_action_for_orderings(other->action, scheduler);
}
- pcmk__update_action_for_orderings(first, data_set);
+ pcmk__update_action_for_orderings(first, scheduler);
}
}
- if (pcmk_is_set(then->flags, pe_action_requires_any)) {
+ if (pcmk_is_set(then->flags, pcmk_action_min_runnable)) {
if (last_flags == then->flags) {
pcmk__clear_updated_flags(changed, then, pcmk__updated_then);
} else {
@@ -654,23 +662,24 @@ pcmk__update_action_for_orderings(pe_action_t *then, pe_working_set_t *data_set)
if (pcmk_is_set(changed, pcmk__updated_then)) {
crm_trace("Re-processing %s and its 'after' actions because it changed",
then->uuid);
- if (pcmk_is_set(last_flags, pe_action_runnable)
- && !pcmk_is_set(then->flags, pe_action_runnable)) {
- pcmk__block_colocation_dependents(then, data_set);
+ if (pcmk_is_set(last_flags, pcmk_action_runnable)
+ && !pcmk_is_set(then->flags, pcmk_action_runnable)) {
+ pcmk__block_colocation_dependents(then);
}
- pcmk__update_action_for_orderings(then, data_set);
+ pcmk__update_action_for_orderings(then, scheduler);
for (lpc = then->actions_after; lpc != NULL; lpc = lpc->next) {
- pe_action_wrapper_t *other = (pe_action_wrapper_t *) lpc->data;
+ pcmk__related_action_t *other = lpc->data;
- pcmk__update_action_for_orderings(other->action, data_set);
+ pcmk__update_action_for_orderings(other->action, scheduler);
}
}
}
static inline bool
-is_primitive_action(const pe_action_t *action)
+is_primitive_action(const pcmk_action_t *action)
{
- return action && action->rsc && (action->rsc->variant == pe_native);
+ return (action != NULL) && (action->rsc != NULL)
+ && (action->rsc->variant == pcmk_rsc_variant_primitive);
}
/*!
@@ -686,8 +695,7 @@ is_primitive_action(const pe_action_t *action)
pe__clear_action_flags(action, flag); \
if ((action)->rsc != (reason)->rsc) { \
char *reason_text = pe__action2reason((reason), (flag)); \
- pe_action_set_reason((action), reason_text, \
- ((flag) == pe_action_migrate_runnable)); \
+ pe_action_set_reason((action), reason_text, false); \
free(reason_text); \
} \
} \
@@ -704,27 +712,28 @@ is_primitive_action(const pe_action_t *action)
* \param[in,out] then 'Then' action in an asymmetric ordering
*/
static void
-handle_asymmetric_ordering(const pe_action_t *first, pe_action_t *then)
+handle_asymmetric_ordering(const pcmk_action_t *first, pcmk_action_t *then)
{
/* Only resource actions after an unrunnable 'first' action need updates for
* asymmetric ordering.
*/
- if ((then->rsc == NULL) || pcmk_is_set(first->flags, pe_action_runnable)) {
+ if ((then->rsc == NULL)
+ || pcmk_is_set(first->flags, pcmk_action_runnable)) {
return;
}
// Certain optional 'then' actions are unaffected by unrunnable 'first'
- if (pcmk_is_set(then->flags, pe_action_optional)) {
+ if (pcmk_is_set(then->flags, pcmk_action_optional)) {
enum rsc_role_e then_rsc_role = then->rsc->fns->state(then->rsc, TRUE);
- if ((then_rsc_role == RSC_ROLE_STOPPED)
- && pcmk__str_eq(then->task, RSC_STOP, pcmk__str_none)) {
+ if ((then_rsc_role == pcmk_role_stopped)
+ && pcmk__str_eq(then->task, PCMK_ACTION_STOP, pcmk__str_none)) {
/* If 'then' should stop after 'first' but is already stopped, the
* ordering is irrelevant.
*/
return;
- } else if ((then_rsc_role >= RSC_ROLE_STARTED)
- && pcmk__str_eq(then->task, RSC_START, pcmk__str_none)
+ } else if ((then_rsc_role >= pcmk_role_started)
+ && pcmk__str_eq(then->task, PCMK_ACTION_START, pcmk__str_none)
&& pe__rsc_running_on_only(then->rsc, then->node)) {
/* Similarly if 'then' should start after 'first' but is already
* started on a single node.
@@ -734,8 +743,8 @@ handle_asymmetric_ordering(const pe_action_t *first, pe_action_t *then)
}
// 'First' can't run, so 'then' can't either
- clear_action_flag_because(then, pe_action_optional, first);
- clear_action_flag_because(then, pe_action_runnable, first);
+ clear_action_flag_because(then, pcmk_action_optional, first);
+ clear_action_flag_because(then, pcmk_action_runnable, first);
}
/*!
@@ -750,7 +759,8 @@ handle_asymmetric_ordering(const pe_action_t *first, pe_action_t *then)
* "stop later group member before stopping earlier group member"
*/
static void
-handle_restart_ordering(pe_action_t *first, pe_action_t *then, uint32_t filter)
+handle_restart_ordering(pcmk_action_t *first, pcmk_action_t *then,
+ uint32_t filter)
{
const char *reason = NULL;
@@ -760,17 +770,17 @@ handle_restart_ordering(pe_action_t *first, pe_action_t *then, uint32_t filter)
// We need to update the action in two cases:
// ... if 'then' is required
- if (pcmk_is_set(filter, pe_action_optional)
- && !pcmk_is_set(then->flags, pe_action_optional)) {
+ if (pcmk_is_set(filter, pcmk_action_optional)
+ && !pcmk_is_set(then->flags, pcmk_action_optional)) {
reason = "restart";
}
/* ... if 'then' is unrunnable action on same resource (if a resource
* should restart but can't start, we still want to stop)
*/
- if (pcmk_is_set(filter, pe_action_runnable)
- && !pcmk_is_set(then->flags, pe_action_runnable)
- && pcmk_is_set(then->rsc->flags, pe_rsc_managed)
+ if (pcmk_is_set(filter, pcmk_action_runnable)
+ && !pcmk_is_set(then->flags, pcmk_action_runnable)
+ && pcmk_is_set(then->rsc->flags, pcmk_rsc_managed)
&& (first->rsc == then->rsc)) {
reason = "stop";
}
@@ -783,24 +793,24 @@ handle_restart_ordering(pe_action_t *first, pe_action_t *then, uint32_t filter)
first->uuid, then->uuid, reason);
// Make 'first' required if it is runnable
- if (pcmk_is_set(first->flags, pe_action_runnable)) {
- clear_action_flag_because(first, pe_action_optional, then);
+ if (pcmk_is_set(first->flags, pcmk_action_runnable)) {
+ clear_action_flag_because(first, pcmk_action_optional, then);
}
// Make 'first' required if 'then' is required
- if (!pcmk_is_set(then->flags, pe_action_optional)) {
- clear_action_flag_because(first, pe_action_optional, then);
+ if (!pcmk_is_set(then->flags, pcmk_action_optional)) {
+ clear_action_flag_because(first, pcmk_action_optional, then);
}
// Make 'first' unmigratable if 'then' is unmigratable
- if (!pcmk_is_set(then->flags, pe_action_migrate_runnable)) {
- clear_action_flag_because(first, pe_action_migrate_runnable, then);
+ if (!pcmk_is_set(then->flags, pcmk_action_migratable)) {
+ clear_action_flag_because(first, pcmk_action_migratable, then);
}
// Make 'then' unrunnable if 'first' is required but unrunnable
- if (!pcmk_is_set(first->flags, pe_action_optional)
- && !pcmk_is_set(first->flags, pe_action_runnable)) {
- clear_action_flag_because(then, pe_action_runnable, first);
+ if (!pcmk_is_set(first->flags, pcmk_action_optional)
+ && !pcmk_is_set(first->flags, pcmk_action_runnable)) {
+ clear_action_flag_because(then, pcmk_action_runnable, first);
}
}
@@ -812,104 +822,107 @@ handle_restart_ordering(pe_action_t *first, pe_action_t *then, uint32_t filter)
* (and runnable_before members if appropriate) as appropriate for the ordering.
* Effects may cascade to other orderings involving the actions as well.
*
- * \param[in,out] first 'First' action in an ordering
- * \param[in,out] then 'Then' action in an ordering
- * \param[in] node If not NULL, limit scope of ordering to this node
- * (ignored)
- * \param[in] flags Action flags for \p first for ordering purposes
- * \param[in] filter Action flags to limit scope of certain updates (may
- * include pe_action_optional to affect only mandatory
- * actions, and pe_action_runnable to affect only
- * runnable actions)
- * \param[in] type Group of enum pe_ordering flags to apply
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] first 'First' action in an ordering
+ * \param[in,out] then 'Then' action in an ordering
+ * \param[in] node If not NULL, limit scope of ordering to this node
+ * (ignored)
+ * \param[in] flags Action flags for \p first for ordering purposes
+ * \param[in] filter Action flags to limit scope of certain updates (may
+ * include pcmk_action_optional to affect only
+ * mandatory actions, and pcmk_action_runnable to
+ * affect only runnable actions)
+ * \param[in] type Group of enum pcmk__action_relation_flags to apply
+ * \param[in,out] scheduler Scheduler data
*
* \return Group of enum pcmk__updated flags indicating what was updated
*/
uint32_t
-pcmk__update_ordered_actions(pe_action_t *first, pe_action_t *then,
- const pe_node_t *node, uint32_t flags,
+pcmk__update_ordered_actions(pcmk_action_t *first, pcmk_action_t *then,
+ const pcmk_node_t *node, uint32_t flags,
uint32_t filter, uint32_t type,
- pe_working_set_t *data_set)
+ pcmk_scheduler_t *scheduler)
{
uint32_t changed = pcmk__updated_none;
- uint32_t then_flags = then->flags;
- uint32_t first_flags = first->flags;
+ uint32_t then_flags = 0U;
+ uint32_t first_flags = 0U;
+
+ CRM_ASSERT((first != NULL) && (then != NULL) && (scheduler != NULL));
- if (pcmk_is_set(type, pe_order_asymmetrical)) {
+ then_flags = then->flags;
+ first_flags = first->flags;
+ if (pcmk_is_set(type, pcmk__ar_asymmetric)) {
handle_asymmetric_ordering(first, then);
}
- if (pcmk_is_set(type, pe_order_implies_first)
- && !pcmk_is_set(then_flags, pe_action_optional)) {
+ if (pcmk_is_set(type, pcmk__ar_then_implies_first)
+ && !pcmk_is_set(then_flags, pcmk_action_optional)) {
// Then is required, and implies first should be, too
- if (pcmk_is_set(filter, pe_action_optional)
- && !pcmk_is_set(flags, pe_action_optional)
- && pcmk_is_set(first_flags, pe_action_optional)) {
- clear_action_flag_because(first, pe_action_optional, then);
+ if (pcmk_is_set(filter, pcmk_action_optional)
+ && !pcmk_is_set(flags, pcmk_action_optional)
+ && pcmk_is_set(first_flags, pcmk_action_optional)) {
+ clear_action_flag_because(first, pcmk_action_optional, then);
}
- if (pcmk_is_set(flags, pe_action_migrate_runnable)
- && !pcmk_is_set(then->flags, pe_action_migrate_runnable)) {
- clear_action_flag_because(first, pe_action_migrate_runnable, then);
+ if (pcmk_is_set(flags, pcmk_action_migratable)
+ && !pcmk_is_set(then->flags, pcmk_action_migratable)) {
+ clear_action_flag_because(first, pcmk_action_migratable, then);
}
}
- if (pcmk_is_set(type, pe_order_promoted_implies_first)
- && (then->rsc != NULL) && (then->rsc->role == RSC_ROLE_PROMOTED)
- && pcmk_is_set(filter, pe_action_optional)
- && !pcmk_is_set(then->flags, pe_action_optional)) {
+ if (pcmk_is_set(type, pcmk__ar_promoted_then_implies_first)
+ && (then->rsc != NULL) && (then->rsc->role == pcmk_role_promoted)
+ && pcmk_is_set(filter, pcmk_action_optional)
+ && !pcmk_is_set(then->flags, pcmk_action_optional)) {
- clear_action_flag_because(first, pe_action_optional, then);
+ clear_action_flag_because(first, pcmk_action_optional, then);
- if (pcmk_is_set(first->flags, pe_action_migrate_runnable)
- && !pcmk_is_set(then->flags, pe_action_migrate_runnable)) {
- clear_action_flag_because(first, pe_action_migrate_runnable,
- then);
+ if (pcmk_is_set(first->flags, pcmk_action_migratable)
+ && !pcmk_is_set(then->flags, pcmk_action_migratable)) {
+ clear_action_flag_because(first, pcmk_action_migratable, then);
}
}
- if (pcmk_is_set(type, pe_order_implies_first_migratable)
- && pcmk_is_set(filter, pe_action_optional)) {
+ if (pcmk_is_set(type, pcmk__ar_unmigratable_then_blocks)
+ && pcmk_is_set(filter, pcmk_action_optional)) {
- if (!pcmk_all_flags_set(then->flags,
- pe_action_migrate_runnable|pe_action_runnable)) {
- clear_action_flag_because(first, pe_action_runnable, then);
+ if (!pcmk_all_flags_set(then->flags, pcmk_action_migratable
+ |pcmk_action_runnable)) {
+ clear_action_flag_because(first, pcmk_action_runnable, then);
}
- if (!pcmk_is_set(then->flags, pe_action_optional)) {
- clear_action_flag_because(first, pe_action_optional, then);
+ if (!pcmk_is_set(then->flags, pcmk_action_optional)) {
+ clear_action_flag_because(first, pcmk_action_optional, then);
}
}
- if (pcmk_is_set(type, pe_order_pseudo_left)
- && pcmk_is_set(filter, pe_action_optional)
- && !pcmk_is_set(first->flags, pe_action_runnable)) {
+ if (pcmk_is_set(type, pcmk__ar_first_else_then)
+ && pcmk_is_set(filter, pcmk_action_optional)
+ && !pcmk_is_set(first->flags, pcmk_action_runnable)) {
- clear_action_flag_because(then, pe_action_migrate_runnable, first);
- pe__clear_action_flags(then, pe_action_pseudo);
+ clear_action_flag_because(then, pcmk_action_migratable, first);
+ pe__clear_action_flags(then, pcmk_action_pseudo);
}
- if (pcmk_is_set(type, pe_order_runnable_left)
- && pcmk_is_set(filter, pe_action_runnable)
- && pcmk_is_set(then->flags, pe_action_runnable)
- && !pcmk_is_set(flags, pe_action_runnable)) {
+ if (pcmk_is_set(type, pcmk__ar_unrunnable_first_blocks)
+ && pcmk_is_set(filter, pcmk_action_runnable)
+ && pcmk_is_set(then->flags, pcmk_action_runnable)
+ && !pcmk_is_set(flags, pcmk_action_runnable)) {
- clear_action_flag_because(then, pe_action_runnable, first);
- clear_action_flag_because(then, pe_action_migrate_runnable, first);
+ clear_action_flag_because(then, pcmk_action_runnable, first);
+ clear_action_flag_because(then, pcmk_action_migratable, first);
}
- if (pcmk_is_set(type, pe_order_implies_then)
- && pcmk_is_set(filter, pe_action_optional)
- && pcmk_is_set(then->flags, pe_action_optional)
- && !pcmk_is_set(flags, pe_action_optional)
- && !pcmk_is_set(first->flags, pe_action_migrate_runnable)) {
+ if (pcmk_is_set(type, pcmk__ar_first_implies_then)
+ && pcmk_is_set(filter, pcmk_action_optional)
+ && pcmk_is_set(then->flags, pcmk_action_optional)
+ && !pcmk_is_set(flags, pcmk_action_optional)
+ && !pcmk_is_set(first->flags, pcmk_action_migratable)) {
- clear_action_flag_because(then, pe_action_optional, first);
+ clear_action_flag_because(then, pcmk_action_optional, first);
}
- if (pcmk_is_set(type, pe_order_restart)) {
+ if (pcmk_is_set(type, pcmk__ar_intermediate_stop)) {
handle_restart_ordering(first, then, filter);
}
@@ -923,7 +936,7 @@ pcmk__update_ordered_actions(pe_action_t *first, pe_action_t *then,
if ((then->rsc != NULL) && (then->rsc->parent != NULL)) {
// Required to handle "X_stop then X_start" for cloned groups
- pcmk__update_action_for_orderings(then, data_set);
+ pcmk__update_action_for_orderings(then, scheduler);
}
}
@@ -948,7 +961,8 @@ pcmk__update_ordered_actions(pe_action_t *first, pe_action_t *then,
* \param[in] details If true, recursively log dependent actions
*/
void
-pcmk__log_action(const char *pre_text, const pe_action_t *action, bool details)
+pcmk__log_action(const char *pre_text, const pcmk_action_t *action,
+ bool details)
{
const char *node_uname = NULL;
const char *node_uuid = NULL;
@@ -956,7 +970,7 @@ pcmk__log_action(const char *pre_text, const pe_action_t *action, bool details)
CRM_CHECK(action != NULL, return);
- if (!pcmk_is_set(action->flags, pe_action_pseudo)) {
+ if (!pcmk_is_set(action->flags, pcmk_action_pseudo)) {
if (action->node != NULL) {
node_uname = action->node->details->uname;
node_uuid = action->node->details->id;
@@ -966,16 +980,14 @@ pcmk__log_action(const char *pre_text, const pe_action_t *action, bool details)
}
switch (text2task(action->task)) {
- case stonith_node:
- case shutdown_crm:
- if (pcmk_is_set(action->flags, pe_action_pseudo)) {
+ case pcmk_action_fence:
+ case pcmk_action_shutdown:
+ if (pcmk_is_set(action->flags, pcmk_action_pseudo)) {
desc = "Pseudo ";
- } else if (pcmk_is_set(action->flags, pe_action_optional)) {
+ } else if (pcmk_is_set(action->flags, pcmk_action_optional)) {
desc = "Optional ";
- } else if (!pcmk_is_set(action->flags, pe_action_runnable)) {
+ } else if (!pcmk_is_set(action->flags, pcmk_action_runnable)) {
desc = "!!Non-Startable!! ";
- } else if (pcmk_is_set(action->flags, pe_action_processed)) {
- desc = "";
} else {
desc = "(Provisional) ";
}
@@ -988,14 +1000,12 @@ pcmk__log_action(const char *pre_text, const pe_action_t *action, bool details)
(node_uuid? ")" : ""));
break;
default:
- if (pcmk_is_set(action->flags, pe_action_optional)) {
+ if (pcmk_is_set(action->flags, pcmk_action_optional)) {
desc = "Optional ";
- } else if (pcmk_is_set(action->flags, pe_action_pseudo)) {
+ } else if (pcmk_is_set(action->flags, pcmk_action_pseudo)) {
desc = "Pseudo ";
- } else if (!pcmk_is_set(action->flags, pe_action_runnable)) {
+ } else if (!pcmk_is_set(action->flags, pcmk_action_runnable)) {
desc = "!!Non-Startable!! ";
- } else if (pcmk_is_set(action->flags, pe_action_processed)) {
- desc = "";
} else {
desc = "(Provisional) ";
}
@@ -1012,16 +1022,16 @@ pcmk__log_action(const char *pre_text, const pe_action_t *action, bool details)
if (details) {
const GList *iter = NULL;
- const pe_action_wrapper_t *other = NULL;
+ const pcmk__related_action_t *other = NULL;
crm_trace("\t\t====== Preceding Actions");
for (iter = action->actions_before; iter != NULL; iter = iter->next) {
- other = (const pe_action_wrapper_t *) iter->data;
+ other = (const pcmk__related_action_t *) iter->data;
pcmk__log_action("\t\t", other->action, false);
}
crm_trace("\t\t====== Subsequent Actions");
for (iter = action->actions_after; iter != NULL; iter = iter->next) {
- other = (const pe_action_wrapper_t *) iter->data;
+ other = (const pcmk__related_action_t *) iter->data;
pcmk__log_action("\t\t", other->action, false);
}
crm_trace("\t\t====== End");
@@ -1041,19 +1051,19 @@ pcmk__log_action(const char *pre_text, const pe_action_t *action, bool details)
*
* \return Newly created shutdown action for \p node
*/
-pe_action_t *
-pcmk__new_shutdown_action(pe_node_t *node)
+pcmk_action_t *
+pcmk__new_shutdown_action(pcmk_node_t *node)
{
char *shutdown_id = NULL;
- pe_action_t *shutdown_op = NULL;
+ pcmk_action_t *shutdown_op = NULL;
CRM_ASSERT(node != NULL);
- shutdown_id = crm_strdup_printf("%s-%s", CRM_OP_SHUTDOWN,
+ shutdown_id = crm_strdup_printf("%s-%s", PCMK_ACTION_DO_SHUTDOWN,
node->details->uname);
- shutdown_op = custom_action(NULL, shutdown_id, CRM_OP_SHUTDOWN, node, FALSE,
- TRUE, node->details->data_set);
+ shutdown_op = custom_action(NULL, shutdown_id, PCMK_ACTION_DO_SHUTDOWN,
+ node, FALSE, node->details->data_set);
pcmk__order_stops_before_shutdown(node, shutdown_op);
add_hash_param(shutdown_op->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE);
@@ -1136,17 +1146,17 @@ pcmk__create_history_xml(xmlNode *parent, lrmd_event_data_t *op,
* only ever get results for actions scheduled by us, so we can reasonably
* assume any "reload" is actually a pre-1.1 agent reload.
*/
- if (pcmk__str_any_of(task, CRMD_ACTION_RELOAD, CRMD_ACTION_RELOAD_AGENT,
+ if (pcmk__str_any_of(task, PCMK_ACTION_RELOAD, PCMK_ACTION_RELOAD_AGENT,
NULL)) {
if (op->op_status == PCMK_EXEC_DONE) {
- task = CRMD_ACTION_START;
+ task = PCMK_ACTION_START;
} else {
- task = CRMD_ACTION_STATUS;
+ task = PCMK_ACTION_MONITOR;
}
}
key = pcmk__op_key(op->rsc_id, task, op->interval_ms);
- if (pcmk__str_eq(task, CRMD_ACTION_NOTIFY, pcmk__str_none)) {
+ if (pcmk__str_eq(task, PCMK_ACTION_NOTIFY, pcmk__str_none)) {
const char *n_type = crm_meta_value(op->params, "notify_type");
const char *n_task = crm_meta_value(op->params, "notify_operation");
@@ -1166,8 +1176,8 @@ pcmk__create_history_xml(xmlNode *parent, lrmd_event_data_t *op,
/* Migration history is preserved separately, which usually matters for
* multiple nodes and is important for future cluster transitions.
*/
- } else if (pcmk__str_any_of(op->op_type, CRMD_ACTION_MIGRATE,
- CRMD_ACTION_MIGRATED, NULL)) {
+ } else if (pcmk__str_any_of(op->op_type, PCMK_ACTION_MIGRATE_TO,
+ PCMK_ACTION_MIGRATE_FROM, NULL)) {
op_id = strdup(key);
} else if (did_rsc_op_fail(op, target_rc)) {
@@ -1212,8 +1222,8 @@ pcmk__create_history_xml(xmlNode *parent, lrmd_event_data_t *op,
crm_xml_add(xml_op, XML_ATTR_CRM_VERSION, caller_version);
crm_xml_add(xml_op, XML_ATTR_TRANSITION_KEY, op->user_data);
crm_xml_add(xml_op, XML_ATTR_TRANSITION_MAGIC, magic);
- crm_xml_add(xml_op, XML_LRM_ATTR_EXIT_REASON, exit_reason == NULL ? "" : exit_reason);
- crm_xml_add(xml_op, XML_LRM_ATTR_TARGET, node); /* For context during triage */
+ crm_xml_add(xml_op, XML_LRM_ATTR_EXIT_REASON, pcmk__s(exit_reason, ""));
+ crm_xml_add(xml_op, XML_LRM_ATTR_TARGET, node); // For context during triage
crm_xml_add_int(xml_op, XML_LRM_ATTR_CALLID, op->call_id);
crm_xml_add_int(xml_op, XML_LRM_ATTR_RC, op->rc);
@@ -1241,7 +1251,8 @@ pcmk__create_history_xml(xmlNode *parent, lrmd_event_data_t *op,
}
}
- if (pcmk__str_any_of(op->op_type, CRMD_ACTION_MIGRATE, CRMD_ACTION_MIGRATED, NULL)) {
+ if (pcmk__str_any_of(op->op_type, PCMK_ACTION_MIGRATE_TO,
+ PCMK_ACTION_MIGRATE_FROM, NULL)) {
/*
* Record migrate_source and migrate_target always for migrate ops.
*/
@@ -1287,12 +1298,11 @@ pcmk__create_history_xml(xmlNode *parent, lrmd_event_data_t *op,
* otherwise false
*/
bool
-pcmk__action_locks_rsc_to_node(const pe_action_t *action)
+pcmk__action_locks_rsc_to_node(const pcmk_action_t *action)
{
// Only resource actions taking place on resource's lock node are locked
if ((action == NULL) || (action->rsc == NULL)
- || (action->rsc->lock_node == NULL) || (action->node == NULL)
- || (action->node->details != action->rsc->lock_node->details)) {
+ || !pe__same_node(action->node, action->rsc->lock_node)) {
return false;
}
@@ -1300,7 +1310,7 @@ pcmk__action_locks_rsc_to_node(const pe_action_t *action)
* a demote would cause the controller to clear the lock)
*/
if (action->node->details->shutdown && (action->task != NULL)
- && (strcmp(action->task, RSC_STOP) != 0)) {
+ && (strcmp(action->task, PCMK_ACTION_STOP) != 0)) {
return false;
}
@@ -1311,8 +1321,8 @@ pcmk__action_locks_rsc_to_node(const pe_action_t *action)
static gint
sort_action_id(gconstpointer a, gconstpointer b)
{
- const pe_action_wrapper_t *action_wrapper2 = (const pe_action_wrapper_t *)a;
- const pe_action_wrapper_t *action_wrapper1 = (const pe_action_wrapper_t *)b;
+ const pcmk__related_action_t *action_wrapper2 = a;
+ const pcmk__related_action_t *action_wrapper1 = b;
if (a == NULL) {
return 1;
@@ -1336,16 +1346,16 @@ sort_action_id(gconstpointer a, gconstpointer b)
* \param[in,out] action Action whose inputs should be checked
*/
void
-pcmk__deduplicate_action_inputs(pe_action_t *action)
+pcmk__deduplicate_action_inputs(pcmk_action_t *action)
{
GList *item = NULL;
GList *next = NULL;
- pe_action_wrapper_t *last_input = NULL;
+ pcmk__related_action_t *last_input = NULL;
action->actions_before = g_list_sort(action->actions_before,
sort_action_id);
for (item = action->actions_before; item != NULL; item = next) {
- pe_action_wrapper_t *input = (pe_action_wrapper_t *) item->data;
+ pcmk__related_action_t *input = item->data;
next = item->next;
if ((last_input != NULL)
@@ -1377,31 +1387,34 @@ pcmk__deduplicate_action_inputs(pe_action_t *action)
* \internal
* \brief Output all scheduled actions
*
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*/
void
-pcmk__output_actions(pe_working_set_t *data_set)
+pcmk__output_actions(pcmk_scheduler_t *scheduler)
{
- pcmk__output_t *out = data_set->priv;
+ pcmk__output_t *out = scheduler->priv;
// Output node (non-resource) actions
- for (GList *iter = data_set->actions; iter != NULL; iter = iter->next) {
+ for (GList *iter = scheduler->actions; iter != NULL; iter = iter->next) {
char *node_name = NULL;
char *task = NULL;
- pe_action_t *action = (pe_action_t *) iter->data;
+ pcmk_action_t *action = (pcmk_action_t *) iter->data;
if (action->rsc != NULL) {
continue; // Resource actions will be output later
- } else if (pcmk_is_set(action->flags, pe_action_optional)) {
+ } else if (pcmk_is_set(action->flags, pcmk_action_optional)) {
continue; // This action was not scheduled
}
- if (pcmk__str_eq(action->task, CRM_OP_SHUTDOWN, pcmk__str_casei)) {
+ if (pcmk__str_eq(action->task, PCMK_ACTION_DO_SHUTDOWN,
+ pcmk__str_none)) {
task = strdup("Shutdown");
- } else if (pcmk__str_eq(action->task, CRM_OP_FENCE, pcmk__str_casei)) {
- const char *op = g_hash_table_lookup(action->meta, "stonith_action");
+ } else if (pcmk__str_eq(action->task, PCMK_ACTION_STONITH,
+ pcmk__str_none)) {
+ const char *op = g_hash_table_lookup(action->meta,
+ "stonith_action");
task = crm_strdup_printf("Fence (%s)", op);
@@ -1410,9 +1423,11 @@ pcmk__output_actions(pe_working_set_t *data_set)
}
if (pe__is_guest_node(action->node)) {
+ const pcmk_resource_t *remote = action->node->details->remote_rsc;
+
node_name = crm_strdup_printf("%s (resource: %s)",
pe__node_name(action->node),
- action->node->details->remote_rsc->container->id);
+ remote->container->id);
} else if (action->node != NULL) {
node_name = crm_strdup_printf("%s", pe__node_name(action->node));
}
@@ -1424,8 +1439,8 @@ pcmk__output_actions(pe_working_set_t *data_set)
}
// Output resource actions
- for (GList *iter = data_set->resources; iter != NULL; iter = iter->next) {
- pe_resource_t *rsc = (pe_resource_t *) iter->data;
+ for (GList *iter = scheduler->resources; iter != NULL; iter = iter->next) {
+ pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
rsc->cmds->output_actions(rsc);
}
@@ -1433,26 +1448,6 @@ pcmk__output_actions(pe_working_set_t *data_set)
/*!
* \internal
- * \brief Check whether action from resource history is still in configuration
- *
- * \param[in] rsc Resource that action is for
- * \param[in] task Action's name
- * \param[in] interval_ms Action's interval (in milliseconds)
- *
- * \return true if action is still in resource configuration, otherwise false
- */
-static bool
-action_in_config(const pe_resource_t *rsc, const char *task, guint interval_ms)
-{
- char *key = pcmk__op_key(rsc->id, task, interval_ms);
- bool config = (find_rsc_op_entry(rsc, key) != NULL);
-
- free(key);
- return config;
-}
-
-/*!
- * \internal
* \brief Get action name needed to compare digest for configuration changes
*
* \param[in] task Action name from history
@@ -1467,8 +1462,9 @@ task_for_digest(const char *task, guint interval_ms)
* the resource.
*/
if ((interval_ms == 0)
- && pcmk__str_any_of(task, RSC_STATUS, RSC_MIGRATED, RSC_PROMOTE, NULL)) {
- task = RSC_START;
+ && pcmk__str_any_of(task, PCMK_ACTION_MONITOR, PCMK_ACTION_MIGRATE_FROM,
+ PCMK_ACTION_PROMOTE, NULL)) {
+ task = PCMK_ACTION_START;
}
return task;
}
@@ -1486,25 +1482,25 @@ task_for_digest(const char *task, guint interval_ms)
*
* \param[in] xml_op Resource history entry with secure digest
* \param[in] digest_data Operation digest information being compared
- * \param[in] data_set Cluster working set
+ * \param[in] scheduler Scheduler data
*
* \return true if only sanitized parameters changed, otherwise false
*/
static bool
only_sanitized_changed(const xmlNode *xml_op,
const op_digest_cache_t *digest_data,
- const pe_working_set_t *data_set)
+ const pcmk_scheduler_t *scheduler)
{
const char *digest_secure = NULL;
- if (!pcmk_is_set(data_set->flags, pe_flag_sanitized)) {
+ if (!pcmk_is_set(scheduler->flags, pcmk_sched_sanitized)) {
// The scheduler is not being run as a simulation
return false;
}
digest_secure = crm_element_value(xml_op, XML_LRM_ATTR_SECURE_DIGEST);
- return (digest_data->rc != RSC_DIGEST_MATCH) && (digest_secure != NULL)
+ return (digest_data->rc != pcmk__digest_match) && (digest_secure != NULL)
&& (digest_data->digest_secure_calc != NULL)
&& (strcmp(digest_data->digest_secure_calc, digest_secure) == 0);
}
@@ -1519,12 +1515,12 @@ only_sanitized_changed(const xmlNode *xml_op,
* \param[in,out] node Node where resource should be restarted
*/
static void
-force_restart(pe_resource_t *rsc, const char *task, guint interval_ms,
- pe_node_t *node)
+force_restart(pcmk_resource_t *rsc, const char *task, guint interval_ms,
+ pcmk_node_t *node)
{
char *key = pcmk__op_key(rsc->id, task, interval_ms);
- pe_action_t *required = custom_action(rsc, key, task, NULL, FALSE, TRUE,
- rsc->cluster);
+ pcmk_action_t *required = custom_action(rsc, key, task, NULL, FALSE,
+ rsc->cluster);
pe_action_set_reason(required, "resource definition change", true);
trigger_unfencing(rsc, node, "Device parameters changed", NULL,
@@ -1535,28 +1531,30 @@ force_restart(pe_resource_t *rsc, const char *task, guint interval_ms,
* \internal
* \brief Schedule a reload of a resource on a node
*
- * \param[in,out] rsc Resource to reload
- * \param[in] node Where resource should be reloaded
+ * \param[in,out] data Resource to reload
+ * \param[in] user_data Where resource should be reloaded
*/
static void
-schedule_reload(pe_resource_t *rsc, const pe_node_t *node)
+schedule_reload(gpointer data, gpointer user_data)
{
- pe_action_t *reload = NULL;
+ pcmk_resource_t *rsc = data;
+ const pcmk_node_t *node = user_data;
+ pcmk_action_t *reload = NULL;
// For collective resources, just call recursively for children
- if (rsc->variant > pe_native) {
- g_list_foreach(rsc->children, (GFunc) schedule_reload, (gpointer) node);
+ if (rsc->variant > pcmk_rsc_variant_primitive) {
+ g_list_foreach(rsc->children, schedule_reload, user_data);
return;
}
// Skip the reload in certain situations
if ((node == NULL)
- || !pcmk_is_set(rsc->flags, pe_rsc_managed)
- || pcmk_is_set(rsc->flags, pe_rsc_failed)) {
+ || !pcmk_is_set(rsc->flags, pcmk_rsc_managed)
+ || pcmk_is_set(rsc->flags, pcmk_rsc_failed)) {
pe_rsc_trace(rsc, "Skip reload of %s:%s%s %s",
rsc->id,
- pcmk_is_set(rsc->flags, pe_rsc_managed)? "" : " unmanaged",
- pcmk_is_set(rsc->flags, pe_rsc_failed)? " failed" : "",
+ pcmk_is_set(rsc->flags, pcmk_rsc_managed)? "" : " unmanaged",
+ pcmk_is_set(rsc->flags, pcmk_rsc_failed)? " failed" : "",
(node == NULL)? "inactive" : node->details->uname);
return;
}
@@ -1564,26 +1562,26 @@ schedule_reload(pe_resource_t *rsc, const pe_node_t *node)
/* If a resource's configuration changed while a start was pending,
* force a full restart instead of a reload.
*/
- if (pcmk_is_set(rsc->flags, pe_rsc_start_pending)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_start_pending)) {
pe_rsc_trace(rsc, "%s: preventing agent reload because start pending",
rsc->id);
- custom_action(rsc, stop_key(rsc), CRMD_ACTION_STOP, node, FALSE, TRUE,
+ custom_action(rsc, stop_key(rsc), PCMK_ACTION_STOP, node, FALSE,
rsc->cluster);
return;
}
// Schedule the reload
- pe__set_resource_flags(rsc, pe_rsc_reload);
- reload = custom_action(rsc, reload_key(rsc), CRMD_ACTION_RELOAD_AGENT, node,
- FALSE, TRUE, rsc->cluster);
+ pe__set_resource_flags(rsc, pcmk_rsc_reload);
+ reload = custom_action(rsc, reload_key(rsc), PCMK_ACTION_RELOAD_AGENT, node,
+ FALSE, rsc->cluster);
pe_action_set_reason(reload, "resource definition change", FALSE);
// Set orderings so that a required stop or demote cancels the reload
pcmk__new_ordering(NULL, NULL, reload, rsc, stop_key(rsc), NULL,
- pe_order_optional|pe_order_then_cancels_first,
+ pcmk__ar_ordered|pcmk__ar_then_cancels_first,
rsc->cluster);
pcmk__new_ordering(NULL, NULL, reload, rsc, demote_key(rsc), NULL,
- pe_order_optional|pe_order_then_cancels_first,
+ pcmk__ar_ordered|pcmk__ar_then_cancels_first,
rsc->cluster);
}
@@ -1602,7 +1600,7 @@ schedule_reload(pe_resource_t *rsc, const pe_node_t *node)
* \return true if action configuration changed, otherwise false
*/
bool
-pcmk__check_action_config(pe_resource_t *rsc, pe_node_t *node,
+pcmk__check_action_config(pcmk_resource_t *rsc, pcmk_node_t *node,
const xmlNode *xml_op)
{
guint interval_ms = 0;
@@ -1619,14 +1617,15 @@ pcmk__check_action_config(pe_resource_t *rsc, pe_node_t *node,
// If this is a recurring action, check whether it has been orphaned
if (interval_ms > 0) {
- if (action_in_config(rsc, task, interval_ms)) {
+ if (pcmk__find_action_config(rsc, task, interval_ms, false) != NULL) {
pe_rsc_trace(rsc, "%s-interval %s for %s on %s is in configuration",
pcmk__readable_interval(interval_ms), task, rsc->id,
pe__node_name(node));
} else if (pcmk_is_set(rsc->cluster->flags,
- pe_flag_stop_action_orphans)) {
+ pcmk_sched_cancel_removed_actions)) {
pcmk__schedule_cancel(rsc,
- crm_element_value(xml_op, XML_LRM_ATTR_CALLID),
+ crm_element_value(xml_op,
+ XML_LRM_ATTR_CALLID),
task, interval_ms, node, "orphan");
return true;
} else {
@@ -1658,13 +1657,13 @@ pcmk__check_action_config(pe_resource_t *rsc, pe_node_t *node,
}
switch (digest_data->rc) {
- case RSC_DIGEST_RESTART:
+ case pcmk__digest_restart:
crm_log_xml_debug(digest_data->params_restart, "params:restart");
force_restart(rsc, task, interval_ms, node);
return true;
- case RSC_DIGEST_ALL:
- case RSC_DIGEST_UNKNOWN:
+ case pcmk__digest_unknown:
+ case pcmk__digest_mismatch:
// Changes that can potentially be handled by an agent reload
if (interval_ms > 0) {
@@ -1682,12 +1681,12 @@ pcmk__check_action_config(pe_resource_t *rsc, pe_node_t *node,
"Device parameters changed (reload)", NULL,
rsc->cluster);
crm_log_xml_debug(digest_data->params_all, "params:reload");
- schedule_reload(rsc, node);
+ schedule_reload((gpointer) rsc, (gpointer) node);
} else {
pe_rsc_trace(rsc,
- "Restarting %s because agent doesn't support reload",
- rsc->id);
+ "Restarting %s "
+ "because agent doesn't support reload", rsc->id);
crm_log_xml_debug(digest_data->params_restart,
"params:restart");
force_restart(rsc, task, interval_ms, node);
@@ -1737,15 +1736,15 @@ rsc_history_as_list(const xmlNode *rsc_entry, int *start_index, int *stop_index)
* \param[in,out] node Node whose history is being processed
*/
static void
-process_rsc_history(const xmlNode *rsc_entry, pe_resource_t *rsc,
- pe_node_t *node)
+process_rsc_history(const xmlNode *rsc_entry, pcmk_resource_t *rsc,
+ pcmk_node_t *node)
{
int offset = -1;
int stop_index = 0;
int start_index = 0;
GList *sorted_op_list = NULL;
- if (pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_removed)) {
if (pe_rsc_is_anon_clone(pe__const_top_resource(rsc, false))) {
pe_rsc_trace(rsc,
"Skipping configuration check "
@@ -1797,33 +1796,36 @@ process_rsc_history(const xmlNode *rsc_entry, pe_resource_t *rsc,
crm_element_value_ms(rsc_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms);
if ((interval_ms > 0)
- && (pcmk_is_set(rsc->flags, pe_rsc_maintenance)
+ && (pcmk_is_set(rsc->flags, pcmk_rsc_maintenance)
|| node->details->maintenance)) {
// Maintenance mode cancels recurring operations
pcmk__schedule_cancel(rsc,
- crm_element_value(rsc_op, XML_LRM_ATTR_CALLID),
+ crm_element_value(rsc_op,
+ XML_LRM_ATTR_CALLID),
task, interval_ms, node, "maintenance mode");
} else if ((interval_ms > 0)
- || pcmk__strcase_any_of(task, RSC_STATUS, RSC_START,
- RSC_PROMOTE, RSC_MIGRATED, NULL)) {
+ || pcmk__strcase_any_of(task, PCMK_ACTION_MONITOR,
+ PCMK_ACTION_START,
+ PCMK_ACTION_PROMOTE,
+ PCMK_ACTION_MIGRATE_FROM, NULL)) {
/* If a resource operation failed, and the operation's definition
* has changed, clear any fail count so they can be retried fresh.
*/
if (pe__bundle_needs_remote_name(rsc)) {
- /* We haven't allocated resources to nodes yet, so if the
+ /* We haven't assigned resources to nodes yet, so if the
* REMOTE_CONTAINER_HACK is used, we may calculate the digest
* based on the literal "#uname" value rather than the properly
* substituted value. That would mistakenly make the action
* definition appear to have been changed. Defer the check until
* later in this case.
*/
- pe__add_param_check(rsc_op, rsc, node, pe_check_active,
+ pe__add_param_check(rsc_op, rsc, node, pcmk__check_active,
rsc->cluster);
} else if (pcmk__check_action_config(rsc, node, rsc_op)
- && (pe_get_failcount(node, rsc, NULL, pe_fc_effective,
+ && (pe_get_failcount(node, rsc, NULL, pcmk__fc_effective,
NULL) != 0)) {
pe__clear_failcount(rsc, node, "action definition changed",
rsc->cluster);
@@ -1847,21 +1849,21 @@ process_rsc_history(const xmlNode *rsc_entry, pe_resource_t *rsc,
* \param[in] lrm_rscs Node's <lrm_resources> from CIB status XML
*/
static void
-process_node_history(pe_node_t *node, const xmlNode *lrm_rscs)
+process_node_history(pcmk_node_t *node, const xmlNode *lrm_rscs)
{
crm_trace("Processing node history for %s", pe__node_name(node));
for (const xmlNode *rsc_entry = first_named_child(lrm_rscs,
XML_LRM_TAG_RESOURCE);
rsc_entry != NULL; rsc_entry = crm_next_same_xml(rsc_entry)) {
- if (xml_has_children(rsc_entry)) {
+ if (rsc_entry->children != NULL) {
GList *result = pcmk__rscs_matching_id(ID(rsc_entry),
node->details->data_set);
for (GList *iter = result; iter != NULL; iter = iter->next) {
- pe_resource_t *rsc = (pe_resource_t *) iter->data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
- if (rsc->variant == pe_native) {
+ if (rsc->variant == pcmk_rsc_variant_primitive) {
process_rsc_history(rsc_entry, rsc, node);
}
}
@@ -1885,10 +1887,10 @@ process_node_history(pe_node_t *node, const xmlNode *lrm_rscs)
* (This also cancels recurring actions for maintenance mode, which is not
* entirely related but convenient to do here.)
*
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*/
void
-pcmk__handle_rsc_config_changes(pe_working_set_t *data_set)
+pcmk__handle_rsc_config_changes(pcmk_scheduler_t *scheduler)
{
crm_trace("Check resource and action configuration for changes");
@@ -1896,8 +1898,8 @@ pcmk__handle_rsc_config_changes(pe_working_set_t *data_set)
* and search for the appropriate status subsection for each. This skips
* orphaned nodes and lets us eliminate some cases before searching the XML.
*/
- for (GList *iter = data_set->nodes; iter != NULL; iter = iter->next) {
- pe_node_t *node = (pe_node_t *) iter->data;
+ for (GList *iter = scheduler->nodes; iter != NULL; iter = iter->next) {
+ pcmk_node_t *node = (pcmk_node_t *) iter->data;
/* Don't bother checking actions for a node that can't run actions ...
* unless it's in maintenance mode, in which case we still need to
@@ -1910,7 +1912,7 @@ pcmk__handle_rsc_config_changes(pe_working_set_t *data_set)
xmlNode *history = NULL;
xpath = crm_strdup_printf(XPATH_NODE_HISTORY, node->details->uname);
- history = get_xpath_object(xpath, data_set->input, LOG_NEVER);
+ history = get_xpath_object(xpath, scheduler->input, LOG_NEVER);
free(xpath);
process_node_history(node, history);
diff --git a/lib/pacemaker/pcmk_sched_bundle.c b/lib/pacemaker/pcmk_sched_bundle.c
index 5682744..1c66314 100644
--- a/lib/pacemaker/pcmk_sched_bundle.c
+++ b/lib/pacemaker/pcmk_sched_bundle.c
@@ -16,402 +16,496 @@
#include "libpacemaker_private.h"
-#define PE__VARIANT_BUNDLE 1
-#include <lib/pengine/variant.h>
+struct assign_data {
+ const pcmk_node_t *prefer;
+ bool stop_if_fail;
+};
+/*!
+ * \internal
+ * \brief Assign a single bundle replica's resources (other than container)
+ *
+ * \param[in,out] replica Replica to assign
+ * \param[in] user_data Preferred node, if any
+ *
+ * \return true (to indicate that any further replicas should be processed)
+ */
static bool
-is_bundle_node(pe__bundle_variant_data_t *data, pe_node_t *node)
+assign_replica(pe__bundle_replica_t *replica, void *user_data)
{
- for (GList *gIter = data->replicas; gIter != NULL; gIter = gIter->next) {
- pe__bundle_replica_t *replica = gIter->data;
+ pcmk_node_t *container_host = NULL;
+
+ struct assign_data *assign_data = user_data;
+ const pcmk_node_t *prefer = assign_data->prefer;
+ bool stop_if_fail = assign_data->stop_if_fail;
+
+ const pcmk_resource_t *bundle = pe__const_top_resource(replica->container,
+ true);
+
+ if (replica->ip != NULL) {
+ pe_rsc_trace(bundle, "Assigning bundle %s IP %s",
+ bundle->id, replica->ip->id);
+ replica->ip->cmds->assign(replica->ip, prefer, stop_if_fail);
+ }
+
+ container_host = replica->container->allocated_to;
+ if (replica->remote != NULL) {
+ if (pe__is_guest_or_remote_node(container_host)) {
+ /* REMOTE_CONTAINER_HACK: "Nested" connection resources must be on
+ * the same host because Pacemaker Remote only supports a single
+ * active connection.
+ */
+ pcmk__new_colocation("#replica-remote-with-host-remote", NULL,
+ INFINITY, replica->remote,
+ container_host->details->remote_rsc, NULL,
+ NULL, pcmk__coloc_influence);
+ }
+ pe_rsc_trace(bundle, "Assigning bundle %s connection %s",
+ bundle->id, replica->remote->id);
+ replica->remote->cmds->assign(replica->remote, prefer, stop_if_fail);
+ }
+
+ if (replica->child != NULL) {
+ pcmk_node_t *node = NULL;
+ GHashTableIter iter;
- if (node->details == replica->node->details) {
- return TRUE;
+ g_hash_table_iter_init(&iter, replica->child->allowed_nodes);
+ while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) {
+ if (!pe__same_node(node, replica->node)) {
+ node->weight = -INFINITY;
+ } else if (!pcmk__threshold_reached(replica->child, node, NULL)) {
+ node->weight = INFINITY;
+ }
}
+
+ pe__set_resource_flags(replica->child->parent, pcmk_rsc_assigning);
+ pe_rsc_trace(bundle, "Assigning bundle %s replica child %s",
+ bundle->id, replica->child->id);
+ replica->child->cmds->assign(replica->child, replica->node,
+ stop_if_fail);
+ pe__clear_resource_flags(replica->child->parent, pcmk_rsc_assigning);
}
- return FALSE;
+ return true;
}
/*!
* \internal
* \brief Assign a bundle resource to a node
*
- * \param[in,out] rsc Resource to assign to a node
- * \param[in] prefer Node to prefer, if all else is equal
+ * \param[in,out] rsc Resource to assign to a node
+ * \param[in] prefer Node to prefer, if all else is equal
+ * \param[in] stop_if_fail If \c true and a primitive descendant of \p rsc
+ * can't be assigned to a node, set the
+ * descendant's next role to stopped and update
+ * existing actions
*
* \return Node that \p rsc is assigned to, if assigned entirely to one node
+ *
+ * \note If \p stop_if_fail is \c false, then \c pcmk__unassign_resource() can
+ * completely undo the assignment. A successful assignment can be either
+ * undone or left alone as final. A failed assignment has the same effect
+ * as calling pcmk__unassign_resource(); there are no side effects on
+ * roles or actions.
*/
-pe_node_t *
-pcmk__bundle_allocate(pe_resource_t *rsc, const pe_node_t *prefer)
+pcmk_node_t *
+pcmk__bundle_assign(pcmk_resource_t *rsc, const pcmk_node_t *prefer,
+ bool stop_if_fail)
{
GList *containers = NULL;
- pe__bundle_variant_data_t *bundle_data = NULL;
+ pcmk_resource_t *bundled_resource = NULL;
+ struct assign_data assign_data = { prefer, stop_if_fail };
- CRM_CHECK(rsc != NULL, return NULL);
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_bundle));
- get_bundle_variant_data(bundle_data, rsc);
+ pe_rsc_trace(rsc, "Assigning bundle %s", rsc->id);
+ pe__set_resource_flags(rsc, pcmk_rsc_assigning);
- pe__set_resource_flags(rsc, pe_rsc_allocating);
- containers = pe__bundle_containers(rsc);
+ pe__show_node_scores(!pcmk_is_set(rsc->cluster->flags,
+ pcmk_sched_output_scores),
+ rsc, __func__, rsc->allowed_nodes, rsc->cluster);
- pe__show_node_weights(!pcmk_is_set(rsc->cluster->flags, pe_flag_show_scores),
- rsc, __func__, rsc->allowed_nodes, rsc->cluster);
-
- containers = g_list_sort(containers, pcmk__cmp_instance);
- pcmk__assign_instances(rsc, containers, bundle_data->nreplicas,
- bundle_data->nreplicas_per_host);
+ // Assign all containers first, so we know what nodes the bundle will be on
+ containers = g_list_sort(pe__bundle_containers(rsc), pcmk__cmp_instance);
+ pcmk__assign_instances(rsc, containers, pe__bundle_max(rsc),
+ rsc->fns->max_per_node(rsc));
g_list_free(containers);
- for (GList *gIter = bundle_data->replicas; gIter != NULL;
- gIter = gIter->next) {
- pe__bundle_replica_t *replica = gIter->data;
- pe_node_t *container_host = NULL;
-
- CRM_ASSERT(replica);
- if (replica->ip) {
- pe_rsc_trace(rsc, "Allocating bundle %s IP %s",
- rsc->id, replica->ip->id);
- replica->ip->cmds->assign(replica->ip, prefer);
- }
-
- container_host = replica->container->allocated_to;
- if (replica->remote && pe__is_guest_or_remote_node(container_host)) {
- /* We need 'nested' connection resources to be on the same
- * host because pacemaker-remoted only supports a single
- * active connection
- */
- pcmk__new_colocation("child-remote-with-docker-remote", NULL,
- INFINITY, replica->remote,
- container_host->details->remote_rsc, NULL,
- NULL, true, rsc->cluster);
- }
-
- if (replica->remote) {
- pe_rsc_trace(rsc, "Allocating bundle %s connection %s",
- rsc->id, replica->remote->id);
- replica->remote->cmds->assign(replica->remote, prefer);
- }
-
- // Explicitly allocate replicas' children before bundle child
- if (replica->child) {
- pe_node_t *node = NULL;
- GHashTableIter iter;
-
- g_hash_table_iter_init(&iter, replica->child->allowed_nodes);
- while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & node)) {
- if (node->details != replica->node->details) {
- node->weight = -INFINITY;
- } else if (!pcmk__threshold_reached(replica->child, node,
- NULL)) {
- node->weight = INFINITY;
- }
- }
-
- pe__set_resource_flags(replica->child->parent, pe_rsc_allocating);
- pe_rsc_trace(rsc, "Allocating bundle %s replica child %s",
- rsc->id, replica->child->id);
- replica->child->cmds->assign(replica->child, replica->node);
- pe__clear_resource_flags(replica->child->parent,
- pe_rsc_allocating);
- }
- }
+ // Then assign remaining replica resources
+ pe__foreach_bundle_replica(rsc, assign_replica, (void *) &assign_data);
- if (bundle_data->child) {
- pe_node_t *node = NULL;
+ // Finally, assign the bundled resources to each bundle node
+ bundled_resource = pe__bundled_resource(rsc);
+ if (bundled_resource != NULL) {
+ pcmk_node_t *node = NULL;
GHashTableIter iter;
- g_hash_table_iter_init(&iter, bundle_data->child->allowed_nodes);
+
+ g_hash_table_iter_init(&iter, bundled_resource->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & node)) {
- if (is_bundle_node(bundle_data, node)) {
+ if (pe__node_is_bundle_instance(rsc, node)) {
node->weight = 0;
} else {
node->weight = -INFINITY;
}
}
- pe_rsc_trace(rsc, "Allocating bundle %s child %s",
- rsc->id, bundle_data->child->id);
- bundle_data->child->cmds->assign(bundle_data->child, prefer);
+ bundled_resource->cmds->assign(bundled_resource, prefer, stop_if_fail);
}
- pe__clear_resource_flags(rsc, pe_rsc_allocating|pe_rsc_provisional);
+ pe__clear_resource_flags(rsc, pcmk_rsc_assigning|pcmk_rsc_unassigned);
return NULL;
}
+/*!
+ * \internal
+ * \brief Create actions for a bundle replica's resources (other than child)
+ *
+ * \param[in,out] replica Replica to create actions for
+ * \param[in] user_data Unused
+ *
+ * \return true (to indicate that any further replicas should be processed)
+ */
+static bool
+create_replica_actions(pe__bundle_replica_t *replica, void *user_data)
+{
+ if (replica->ip != NULL) {
+ replica->ip->cmds->create_actions(replica->ip);
+ }
+ if (replica->container != NULL) {
+ replica->container->cmds->create_actions(replica->container);
+ }
+ if (replica->remote != NULL) {
+ replica->remote->cmds->create_actions(replica->remote);
+ }
+ return true;
+}
+/*!
+ * \internal
+ * \brief Create all actions needed for a given bundle resource
+ *
+ * \param[in,out] rsc Bundle resource to create actions for
+ */
void
-pcmk__bundle_create_actions(pe_resource_t *rsc)
+pcmk__bundle_create_actions(pcmk_resource_t *rsc)
{
- pe_action_t *action = NULL;
+ pcmk_action_t *action = NULL;
GList *containers = NULL;
- pe__bundle_variant_data_t *bundle_data = NULL;
+ pcmk_resource_t *bundled_resource = NULL;
- CRM_CHECK(rsc != NULL, return);
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_bundle));
- containers = pe__bundle_containers(rsc);
- get_bundle_variant_data(bundle_data, rsc);
- for (GList *gIter = bundle_data->replicas; gIter != NULL;
- gIter = gIter->next) {
- pe__bundle_replica_t *replica = gIter->data;
-
- CRM_ASSERT(replica);
- if (replica->ip) {
- replica->ip->cmds->create_actions(replica->ip);
- }
- if (replica->container) {
- replica->container->cmds->create_actions(replica->container);
- }
- if (replica->remote) {
- replica->remote->cmds->create_actions(replica->remote);
- }
- }
+ pe__foreach_bundle_replica(rsc, create_replica_actions, NULL);
+ containers = pe__bundle_containers(rsc);
pcmk__create_instance_actions(rsc, containers);
+ g_list_free(containers);
- if (bundle_data->child) {
- bundle_data->child->cmds->create_actions(bundle_data->child);
+ bundled_resource = pe__bundled_resource(rsc);
+ if (bundled_resource != NULL) {
+ bundled_resource->cmds->create_actions(bundled_resource);
- if (pcmk_is_set(bundle_data->child->flags, pe_rsc_promotable)) {
- /* promote */
- pe__new_rsc_pseudo_action(rsc, RSC_PROMOTE, true, true);
- action = pe__new_rsc_pseudo_action(rsc, RSC_PROMOTED, true, true);
+ if (pcmk_is_set(bundled_resource->flags, pcmk_rsc_promotable)) {
+ pe__new_rsc_pseudo_action(rsc, PCMK_ACTION_PROMOTE, true, true);
+ action = pe__new_rsc_pseudo_action(rsc, PCMK_ACTION_PROMOTED,
+ true, true);
action->priority = INFINITY;
- /* demote */
- pe__new_rsc_pseudo_action(rsc, RSC_DEMOTE, true, true);
- action = pe__new_rsc_pseudo_action(rsc, RSC_DEMOTED, true, true);
+ pe__new_rsc_pseudo_action(rsc, PCMK_ACTION_DEMOTE, true, true);
+ action = pe__new_rsc_pseudo_action(rsc, PCMK_ACTION_DEMOTED,
+ true, true);
action->priority = INFINITY;
}
}
-
- g_list_free(containers);
}
-void
-pcmk__bundle_internal_constraints(pe_resource_t *rsc)
+/*!
+ * \internal
+ * \brief Create internal constraints for a bundle replica's resources
+ *
+ * \param[in,out] replica Replica to create internal constraints for
+ * \param[in,out] user_data Replica's parent bundle
+ *
+ * \return true (to indicate that any further replicas should be processed)
+ */
+static bool
+replica_internal_constraints(pe__bundle_replica_t *replica, void *user_data)
{
- pe__bundle_variant_data_t *bundle_data = NULL;
+ pcmk_resource_t *bundle = user_data;
- CRM_CHECK(rsc != NULL, return);
+ replica->container->cmds->internal_constraints(replica->container);
- get_bundle_variant_data(bundle_data, rsc);
+ // Start bundle -> start replica container
+ pcmk__order_starts(bundle, replica->container,
+ pcmk__ar_unrunnable_first_blocks
+ |pcmk__ar_then_implies_first_graphed);
- if (bundle_data->child) {
- pcmk__order_resource_actions(rsc, RSC_START, bundle_data->child,
- RSC_START, pe_order_implies_first_printed);
- pcmk__order_resource_actions(rsc, RSC_STOP, bundle_data->child,
- RSC_STOP, pe_order_implies_first_printed);
+ // Stop bundle -> stop replica child and container
+ if (replica->child != NULL) {
+ pcmk__order_stops(bundle, replica->child,
+ pcmk__ar_then_implies_first_graphed);
+ }
+ pcmk__order_stops(bundle, replica->container,
+ pcmk__ar_then_implies_first_graphed);
+
+ // Start replica container -> bundle is started
+ pcmk__order_resource_actions(replica->container, PCMK_ACTION_START, bundle,
+ PCMK_ACTION_RUNNING,
+ pcmk__ar_first_implies_then_graphed);
+
+ // Stop replica container -> bundle is stopped
+ pcmk__order_resource_actions(replica->container, PCMK_ACTION_STOP, bundle,
+ PCMK_ACTION_STOPPED,
+ pcmk__ar_first_implies_then_graphed);
+
+ if (replica->ip != NULL) {
+ replica->ip->cmds->internal_constraints(replica->ip);
+
+ // Replica IP address -> replica container (symmetric)
+ pcmk__order_starts(replica->ip, replica->container,
+ pcmk__ar_unrunnable_first_blocks
+ |pcmk__ar_guest_allowed);
+ pcmk__order_stops(replica->container, replica->ip,
+ pcmk__ar_then_implies_first|pcmk__ar_guest_allowed);
+
+ pcmk__new_colocation("#ip-with-container", NULL, INFINITY, replica->ip,
+ replica->container, NULL, NULL,
+ pcmk__coloc_influence);
+ }
- if (bundle_data->child->children) {
- pcmk__order_resource_actions(bundle_data->child, RSC_STARTED, rsc,
- RSC_STARTED,
- pe_order_implies_then_printed);
- pcmk__order_resource_actions(bundle_data->child, RSC_STOPPED, rsc,
- RSC_STOPPED,
- pe_order_implies_then_printed);
- } else {
- pcmk__order_resource_actions(bundle_data->child, RSC_START, rsc,
- RSC_STARTED,
- pe_order_implies_then_printed);
- pcmk__order_resource_actions(bundle_data->child, RSC_STOP, rsc,
- RSC_STOPPED,
- pe_order_implies_then_printed);
- }
+ if (replica->remote != NULL) {
+ /* This handles ordering and colocating remote relative to container
+ * (via "#resource-with-container"). Since IP is also ordered and
+ * colocated relative to the container, we don't need to do anything
+ * explicit here with IP.
+ */
+ replica->remote->cmds->internal_constraints(replica->remote);
}
- for (GList *gIter = bundle_data->replicas; gIter != NULL;
- gIter = gIter->next) {
- pe__bundle_replica_t *replica = gIter->data;
+ if (replica->child != NULL) {
+ CRM_ASSERT(replica->remote != NULL);
+ // "Start remote then child" is implicit in scheduler's remote logic
+ }
+ return true;
+}
- CRM_ASSERT(replica);
- CRM_ASSERT(replica->container);
+/*!
+ * \internal
+ * \brief Create implicit constraints needed for a bundle resource
+ *
+ * \param[in,out] rsc Bundle resource to create implicit constraints for
+ */
+void
+pcmk__bundle_internal_constraints(pcmk_resource_t *rsc)
+{
+ pcmk_resource_t *bundled_resource = NULL;
- replica->container->cmds->internal_constraints(replica->container);
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_bundle));
- pcmk__order_starts(rsc, replica->container,
- pe_order_runnable_left|pe_order_implies_first_printed);
+ pe__foreach_bundle_replica(rsc, replica_internal_constraints, rsc);
- if (replica->child) {
- pcmk__order_stops(rsc, replica->child,
- pe_order_implies_first_printed);
- }
- pcmk__order_stops(rsc, replica->container,
- pe_order_implies_first_printed);
- pcmk__order_resource_actions(replica->container, RSC_START, rsc,
- RSC_STARTED,
- pe_order_implies_then_printed);
- pcmk__order_resource_actions(replica->container, RSC_STOP, rsc,
- RSC_STOPPED,
- pe_order_implies_then_printed);
-
- if (replica->ip) {
- replica->ip->cmds->internal_constraints(replica->ip);
-
- // Start IP then container
- pcmk__order_starts(replica->ip, replica->container,
- pe_order_runnable_left|pe_order_preserve);
- pcmk__order_stops(replica->container, replica->ip,
- pe_order_implies_first|pe_order_preserve);
-
- pcmk__new_colocation("ip-with-docker", NULL, INFINITY, replica->ip,
- replica->container, NULL, NULL, true,
- rsc->cluster);
- }
+ bundled_resource = pe__bundled_resource(rsc);
+ if (bundled_resource == NULL) {
+ return;
+ }
- if (replica->remote) {
- /* This handles ordering and colocating remote relative to container
- * (via "resource-with-container"). Since IP is also ordered and
- * colocated relative to the container, we don't need to do anything
- * explicit here with IP.
- */
- replica->remote->cmds->internal_constraints(replica->remote);
- }
+ // Start bundle -> start bundled clone
+ pcmk__order_resource_actions(rsc, PCMK_ACTION_START, bundled_resource,
+ PCMK_ACTION_START,
+ pcmk__ar_then_implies_first_graphed);
- if (replica->child) {
- CRM_ASSERT(replica->remote);
+ // Bundled clone is started -> bundle is started
+ pcmk__order_resource_actions(bundled_resource, PCMK_ACTION_RUNNING,
+ rsc, PCMK_ACTION_RUNNING,
+ pcmk__ar_first_implies_then_graphed);
- // "Start remote then child" is implicit in scheduler's remote logic
- }
+ // Stop bundle -> stop bundled clone
+ pcmk__order_resource_actions(rsc, PCMK_ACTION_STOP, bundled_resource,
+ PCMK_ACTION_STOP,
+ pcmk__ar_then_implies_first_graphed);
- }
+ // Bundled clone is stopped -> bundle is stopped
+ pcmk__order_resource_actions(bundled_resource, PCMK_ACTION_STOPPED,
+ rsc, PCMK_ACTION_STOPPED,
+ pcmk__ar_first_implies_then_graphed);
- if (bundle_data->child) {
- bundle_data->child->cmds->internal_constraints(bundle_data->child);
- if (pcmk_is_set(bundle_data->child->flags, pe_rsc_promotable)) {
- pcmk__promotable_restart_ordering(rsc);
+ bundled_resource->cmds->internal_constraints(bundled_resource);
- /* child demoted before global demoted */
- pcmk__order_resource_actions(bundle_data->child, RSC_DEMOTED, rsc,
- RSC_DEMOTED,
- pe_order_implies_then_printed);
+ if (!pcmk_is_set(bundled_resource->flags, pcmk_rsc_promotable)) {
+ return;
+ }
+ pcmk__promotable_restart_ordering(rsc);
+
+ // Demote bundle -> demote bundled clone
+ pcmk__order_resource_actions(rsc, PCMK_ACTION_DEMOTE, bundled_resource,
+ PCMK_ACTION_DEMOTE,
+ pcmk__ar_then_implies_first_graphed);
+
+ // Bundled clone is demoted -> bundle is demoted
+ pcmk__order_resource_actions(bundled_resource, PCMK_ACTION_DEMOTED,
+ rsc, PCMK_ACTION_DEMOTED,
+ pcmk__ar_first_implies_then_graphed);
+
+ // Promote bundle -> promote bundled clone
+ pcmk__order_resource_actions(rsc, PCMK_ACTION_PROMOTE,
+ bundled_resource, PCMK_ACTION_PROMOTE,
+ pcmk__ar_then_implies_first_graphed);
+
+ // Bundled clone is promoted -> bundle is promoted
+ pcmk__order_resource_actions(bundled_resource, PCMK_ACTION_PROMOTED,
+ rsc, PCMK_ACTION_PROMOTED,
+ pcmk__ar_first_implies_then_graphed);
+}
- /* global demote before child demote */
- pcmk__order_resource_actions(rsc, RSC_DEMOTE, bundle_data->child,
- RSC_DEMOTE,
- pe_order_implies_first_printed);
+struct match_data {
+ const pcmk_node_t *node; // Node to compare against replica
+ pcmk_resource_t *container; // Replica container corresponding to node
+};
- /* child promoted before global promoted */
- pcmk__order_resource_actions(bundle_data->child, RSC_PROMOTED, rsc,
- RSC_PROMOTED,
- pe_order_implies_then_printed);
+/*!
+ * \internal
+ * \brief Check whether a replica container is assigned to a given node
+ *
+ * \param[in] replica Replica to check
+ * \param[in,out] user_data struct match_data with node to compare against
+ *
+ * \return true if the replica does not match (to indicate further replicas
+ * should be processed), otherwise false
+ */
+static bool
+match_replica_container(const pe__bundle_replica_t *replica, void *user_data)
+{
+ struct match_data *match_data = user_data;
- /* global promote before child promote */
- pcmk__order_resource_actions(rsc, RSC_PROMOTE, bundle_data->child,
- RSC_PROMOTE,
- pe_order_implies_first_printed);
- }
+ if (pcmk__instance_matches(replica->container, match_data->node,
+ pcmk_role_unknown, false)) {
+ match_data->container = replica->container;
+ return false; // Match found, don't bother searching further replicas
}
+ return true; // No match, keep searching
}
-static pe_resource_t *
-compatible_replica_for_node(const pe_resource_t *rsc_lh,
- const pe_node_t *candidate,
- const pe_resource_t *rsc, enum rsc_role_e filter,
- gboolean current)
+/*!
+ * \internal
+ * \brief Get the host to which a bundle node is assigned
+ *
+ * \param[in] node Possible bundle node to check
+ *
+ * \return Node to which the container for \p node is assigned if \p node is a
+ * bundle node, otherwise \p node itself
+ */
+static const pcmk_node_t *
+get_bundle_node_host(const pcmk_node_t *node)
{
- pe__bundle_variant_data_t *bundle_data = NULL;
-
- CRM_CHECK(candidate != NULL, return NULL);
- get_bundle_variant_data(bundle_data, rsc);
+ if (pe__is_bundle_node(node)) {
+ const pcmk_resource_t *container = node->details->remote_rsc->container;
- crm_trace("Looking for compatible child from %s for %s on %s",
- rsc_lh->id, rsc->id, pe__node_name(candidate));
-
- for (GList *gIter = bundle_data->replicas; gIter != NULL;
- gIter = gIter->next) {
- pe__bundle_replica_t *replica = gIter->data;
-
- if (pcmk__instance_matches(replica->container, candidate, filter,
- current)) {
- crm_trace("Pairing %s with %s on %s",
- rsc_lh->id, replica->container->id,
- pe__node_name(candidate));
- return replica->container;
- }
+ return container->fns->location(container, NULL, 0);
}
-
- crm_trace("Can't pair %s with %s", rsc_lh->id, rsc->id);
- return NULL;
+ return node;
}
-static pe_resource_t *
-compatible_replica(const pe_resource_t *rsc_lh, const pe_resource_t *rsc,
- enum rsc_role_e filter, gboolean current,
- pe_working_set_t *data_set)
+/*!
+ * \internal
+ * \brief Find a bundle container compatible with a dependent resource
+ *
+ * \param[in] dependent Dependent resource in colocation with bundle
+ * \param[in] bundle Bundle that \p dependent is colocated with
+ *
+ * \return A container from \p bundle assigned to the same node as \p dependent
+ * if assigned, otherwise assigned to any of dependent's allowed nodes,
+ * otherwise NULL.
+ */
+static pcmk_resource_t *
+compatible_container(const pcmk_resource_t *dependent,
+ const pcmk_resource_t *bundle)
{
GList *scratch = NULL;
- pe_resource_t *pair = NULL;
- pe_node_t *active_node_lh = NULL;
-
- active_node_lh = rsc_lh->fns->location(rsc_lh, NULL, current);
- if (active_node_lh) {
- return compatible_replica_for_node(rsc_lh, active_node_lh, rsc, filter,
- current);
+ struct match_data match_data = { NULL, NULL };
+
+ // If dependent is assigned, only check there
+ match_data.node = dependent->fns->location(dependent, NULL, 0);
+ match_data.node = get_bundle_node_host(match_data.node);
+ if (match_data.node != NULL) {
+ pe__foreach_const_bundle_replica(bundle, match_replica_container,
+ &match_data);
+ return match_data.container;
}
- scratch = g_hash_table_get_values(rsc_lh->allowed_nodes);
+ // Otherwise, check for any of the dependent's allowed nodes
+ scratch = g_hash_table_get_values(dependent->allowed_nodes);
scratch = pcmk__sort_nodes(scratch, NULL);
+ for (const GList *iter = scratch; iter != NULL; iter = iter->next) {
+ match_data.node = iter->data;
+ match_data.node = get_bundle_node_host(match_data.node);
+ if (match_data.node == NULL) {
+ continue;
+ }
- for (GList *gIter = scratch; gIter != NULL; gIter = gIter->next) {
- pe_node_t *node = (pe_node_t *) gIter->data;
-
- pair = compatible_replica_for_node(rsc_lh, node, rsc, filter, current);
- if (pair) {
- goto done;
+ pe__foreach_const_bundle_replica(bundle, match_replica_container,
+ &match_data);
+ if (match_data.container != NULL) {
+ break;
}
}
-
- pe_rsc_debug(rsc, "Can't pair %s with %s", rsc_lh->id, (rsc? rsc->id : "none"));
- done:
g_list_free(scratch);
- return pair;
+ return match_data.container;
}
-int copies_per_node(pe_resource_t * rsc)
+struct coloc_data {
+ const pcmk__colocation_t *colocation;
+ pcmk_resource_t *dependent;
+ GList *container_hosts;
+};
+
+/*!
+ * \internal
+ * \brief Apply a colocation score to replica node scores or resource priority
+ *
+ * \param[in] replica Replica of primary bundle resource in colocation
+ * \param[in,out] user_data struct coloc_data for colocation being applied
+ *
+ * \return true (to indicate that any further replicas should be processed)
+ */
+static bool
+replica_apply_coloc_score(const pe__bundle_replica_t *replica, void *user_data)
{
- /* Strictly speaking, there should be a 'copies_per_node' addition
- * to the resource function table and each case would be a
- * function. However that would be serious overkill to return an
- * int. In fact, it seems to me that both function tables
- * could/should be replaced by resources.{c,h} full of
- * rsc_{some_operation} functions containing a switch as below
- * which calls out to functions named {variant}_{some_operation}
- * as needed.
- */
- switch(rsc->variant) {
- case pe_unknown:
- return 0;
- case pe_native:
- case pe_group:
- return 1;
- case pe_clone:
- {
- const char *max_clones_node = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION_NODEMAX);
-
- if (max_clones_node == NULL) {
- return 1;
-
- } else {
- int max_i;
-
- pcmk__scan_min_int(max_clones_node, &max_i, 0);
- return max_i;
- }
- }
- case pe_container:
- {
- pe__bundle_variant_data_t *data = NULL;
- get_bundle_variant_data(data, rsc);
- return data->nreplicas_per_host;
- }
+ struct coloc_data *coloc_data = user_data;
+ pcmk_node_t *chosen = NULL;
+
+ if (coloc_data->colocation->score < INFINITY) {
+ replica->container->cmds->apply_coloc_score(coloc_data->dependent,
+ replica->container,
+ coloc_data->colocation,
+ false);
+ return true;
+ }
+
+ chosen = replica->container->fns->location(replica->container, NULL, 0);
+ if ((chosen == NULL)
+ || is_set_recursive(replica->container, pcmk_rsc_blocked, true)) {
+ return true;
+ }
+
+ if ((coloc_data->colocation->primary_role >= pcmk_role_promoted)
+ && ((replica->child == NULL)
+ || (replica->child->next_role < pcmk_role_promoted))) {
+ return true;
}
- return 0;
+
+ pe_rsc_trace(pe__const_top_resource(replica->container, true),
+ "Allowing mandatory colocation %s using %s @%d",
+ coloc_data->colocation->id, pe__node_name(chosen),
+ chosen->weight);
+ coloc_data->container_hosts = g_list_prepend(coloc_data->container_hosts,
+ chosen);
+ return true;
}
/*!
* \internal
- * \brief Apply a colocation's score to node weights or resource priority
+ * \brief Apply a colocation's score to node scores or resource priority
*
* Given a colocation constraint, apply its score to the dependent's
- * allowed node weights (if we are still placing resources) or priority (if
+ * allowed node scores (if we are still placing resources) or priority (if
* we are choosing promotable clone instance roles).
*
* \param[in,out] dependent Dependent resource in colocation
@@ -420,151 +514,193 @@ int copies_per_node(pe_resource_t * rsc)
* \param[in] for_dependent true if called on behalf of dependent
*/
void
-pcmk__bundle_apply_coloc_score(pe_resource_t *dependent,
- const pe_resource_t *primary,
+pcmk__bundle_apply_coloc_score(pcmk_resource_t *dependent,
+ const pcmk_resource_t *primary,
const pcmk__colocation_t *colocation,
bool for_dependent)
{
- GList *allocated_primaries = NULL;
- pe__bundle_variant_data_t *bundle_data = NULL;
+ struct coloc_data coloc_data = { colocation, dependent, NULL };
/* This should never be called for the bundle itself as a dependent.
- * Instead, we add its colocation constraints to its replicas and call the
- * apply_coloc_score() for the replicas as dependents.
+ * Instead, we add its colocation constraints to its containers and bundled
+ * primitive and call the apply_coloc_score() method for them as dependents.
*/
- CRM_ASSERT(!for_dependent);
-
- CRM_CHECK((colocation != NULL) && (dependent != NULL) && (primary != NULL),
- return);
- CRM_ASSERT(dependent->variant == pe_native);
-
- if (pcmk_is_set(primary->flags, pe_rsc_provisional)) {
- pe_rsc_trace(primary, "%s is still provisional", primary->id);
+ CRM_ASSERT((primary != NULL)
+ && (primary->variant == pcmk_rsc_variant_bundle)
+ && (dependent != NULL)
+ && (dependent->variant == pcmk_rsc_variant_primitive)
+ && (colocation != NULL) && !for_dependent);
+
+ if (pcmk_is_set(primary->flags, pcmk_rsc_unassigned)) {
+ pe_rsc_trace(primary,
+ "Skipping applying colocation %s "
+ "because %s is still provisional",
+ colocation->id, primary->id);
return;
+ }
+ pe_rsc_trace(primary, "Applying colocation %s (%s with %s at %s)",
+ colocation->id, dependent->id, primary->id,
+ pcmk_readable_score(colocation->score));
- } else if (colocation->dependent->variant > pe_group) {
- pe_resource_t *primary_replica = compatible_replica(dependent, primary,
- RSC_ROLE_UNKNOWN,
- FALSE,
- dependent->cluster);
+ /* If the constraint dependent is a clone or bundle, "dependent" here is one
+ * of its instances. Look for a compatible instance of this bundle.
+ */
+ if (colocation->dependent->variant > pcmk_rsc_variant_group) {
+ const pcmk_resource_t *primary_container = NULL;
- if (primary_replica) {
+ primary_container = compatible_container(dependent, primary);
+ if (primary_container != NULL) { // Success, we found one
pe_rsc_debug(primary, "Pairing %s with %s",
- dependent->id, primary_replica->id);
- dependent->cmds->apply_coloc_score(dependent, primary_replica,
+ dependent->id, primary_container->id);
+ dependent->cmds->apply_coloc_score(dependent, primary_container,
colocation, true);
- } else if (colocation->score >= INFINITY) {
- crm_notice("Cannot pair %s with instance of %s",
+ } else if (colocation->score >= INFINITY) { // Failure, and it's fatal
+ crm_notice("%s cannot run because there is no compatible "
+ "instance of %s to colocate with",
dependent->id, primary->id);
- pcmk__assign_resource(dependent, NULL, true);
+ pcmk__assign_resource(dependent, NULL, true, true);
- } else {
- pe_rsc_debug(primary, "Cannot pair %s with instance of %s",
+ } else { // Failure, but we can ignore it
+ pe_rsc_debug(primary,
+ "%s cannot be colocated with any instance of %s",
dependent->id, primary->id);
}
-
return;
}
- get_bundle_variant_data(bundle_data, primary);
- pe_rsc_trace(primary, "Processing constraint %s: %s -> %s %d",
- colocation->id, dependent->id, primary->id, colocation->score);
+ pe__foreach_const_bundle_replica(primary, replica_apply_coloc_score,
+ &coloc_data);
- for (GList *gIter = bundle_data->replicas; gIter != NULL;
- gIter = gIter->next) {
- pe__bundle_replica_t *replica = gIter->data;
+ if (colocation->score >= INFINITY) {
+ pcmk__colocation_intersect_nodes(dependent, primary, colocation,
+ coloc_data.container_hosts, false);
+ }
+ g_list_free(coloc_data.container_hosts);
+}
- if (colocation->score < INFINITY) {
- replica->container->cmds->apply_coloc_score(dependent,
- replica->container,
- colocation, false);
+// Bundle implementation of pcmk_assignment_methods_t:with_this_colocations()
+void
+pcmk__with_bundle_colocations(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc, GList **list)
+{
+ const pcmk_resource_t *bundled_rsc = NULL;
- } else {
- pe_node_t *chosen = replica->container->fns->location(replica->container,
- NULL, FALSE);
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_bundle)
+ && (orig_rsc != NULL) && (list != NULL));
- if ((chosen == NULL)
- || is_set_recursive(replica->container, pe_rsc_block, TRUE)) {
- continue;
- }
- if ((colocation->primary_role >= RSC_ROLE_PROMOTED)
- && (replica->child == NULL)) {
- continue;
- }
- if ((colocation->primary_role >= RSC_ROLE_PROMOTED)
- && (replica->child->next_role < RSC_ROLE_PROMOTED)) {
- continue;
- }
+ // The bundle itself and its containers always get its colocations
+ if ((orig_rsc == rsc)
+ || pcmk_is_set(orig_rsc->flags, pcmk_rsc_replica_container)) {
- pe_rsc_trace(primary, "Allowing %s: %s %d",
- colocation->id, pe__node_name(chosen), chosen->weight);
- allocated_primaries = g_list_prepend(allocated_primaries, chosen);
- }
+ pcmk__add_with_this_list(list, rsc->rsc_cons_lhs, orig_rsc);
+ return;
}
- if (colocation->score >= INFINITY) {
- node_list_exclude(dependent->allowed_nodes, allocated_primaries, FALSE);
+ /* The bundled resource gets the colocations if it's promotable and we've
+ * begun choosing roles
+ */
+ bundled_rsc = pe__bundled_resource(rsc);
+ if ((bundled_rsc == NULL)
+ || !pcmk_is_set(bundled_rsc->flags, pcmk_rsc_promotable)
+ || (pe__const_top_resource(orig_rsc, false) != bundled_rsc)) {
+ return;
+ }
+
+ if (orig_rsc == bundled_rsc) {
+ if (pe__clone_flag_is_set(orig_rsc,
+ pcmk__clone_promotion_constrained)) {
+ /* orig_rsc is the clone and we're setting roles (or have already
+ * done so)
+ */
+ pcmk__add_with_this_list(list, rsc->rsc_cons_lhs, orig_rsc);
+ }
+
+ } else if (!pcmk_is_set(orig_rsc->flags, pcmk_rsc_unassigned)) {
+ /* orig_rsc is an instance and is already assigned. If something
+ * requests colocations for orig_rsc now, it's for setting roles.
+ */
+ pcmk__add_with_this_list(list, rsc->rsc_cons_lhs, orig_rsc);
}
- g_list_free(allocated_primaries);
}
-// Bundle implementation of resource_alloc_functions_t:with_this_colocations()
+// Bundle implementation of pcmk_assignment_methods_t:this_with_colocations()
void
-pcmk__with_bundle_colocations(const pe_resource_t *rsc,
- const pe_resource_t *orig_rsc, GList **list)
+pcmk__bundle_with_colocations(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc, GList **list)
{
- CRM_CHECK((rsc != NULL) && (rsc->variant == pe_container)
- && (orig_rsc != NULL) && (list != NULL),
- return);
+ const pcmk_resource_t *bundled_rsc = NULL;
+
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_bundle)
+ && (orig_rsc != NULL) && (list != NULL));
- if (rsc == orig_rsc) { // Colocations are wanted for bundle itself
- pcmk__add_with_this_list(list, rsc->rsc_cons_lhs);
+ // The bundle itself and its containers always get its colocations
+ if ((orig_rsc == rsc)
+ || pcmk_is_set(orig_rsc->flags, pcmk_rsc_replica_container)) {
- // Only the bundle replicas' containers get the bundle's constraints
- } else if (pcmk_is_set(orig_rsc->flags, pe_rsc_replica_container)) {
- pcmk__add_collective_constraints(list, orig_rsc, rsc, true);
+ pcmk__add_this_with_list(list, rsc->rsc_cons, orig_rsc);
+ return;
}
-}
-// Bundle implementation of resource_alloc_functions_t:this_with_colocations()
-void
-pcmk__bundle_with_colocations(const pe_resource_t *rsc,
- const pe_resource_t *orig_rsc, GList **list)
-{
- CRM_CHECK((rsc != NULL) && (rsc->variant == pe_container)
- && (orig_rsc != NULL) && (list != NULL),
- return);
+ /* The bundled resource gets the colocations if it's promotable and we've
+ * begun choosing roles
+ */
+ bundled_rsc = pe__bundled_resource(rsc);
+ if ((bundled_rsc == NULL)
+ || !pcmk_is_set(bundled_rsc->flags, pcmk_rsc_promotable)
+ || (pe__const_top_resource(orig_rsc, false) != bundled_rsc)) {
+ return;
+ }
- if (rsc == orig_rsc) { // Colocations are wanted for bundle itself
- pcmk__add_this_with_list(list, rsc->rsc_cons);
+ if (orig_rsc == bundled_rsc) {
+ if (pe__clone_flag_is_set(orig_rsc,
+ pcmk__clone_promotion_constrained)) {
+ /* orig_rsc is the clone and we're setting roles (or have already
+ * done so)
+ */
+ pcmk__add_this_with_list(list, rsc->rsc_cons, orig_rsc);
+ }
- // Only the bundle replicas' containers get the bundle's constraints
- } else if (pcmk_is_set(orig_rsc->flags, pe_rsc_replica_container)) {
- pcmk__add_collective_constraints(list, orig_rsc, rsc, false);
+ } else if (!pcmk_is_set(orig_rsc->flags, pcmk_rsc_unassigned)) {
+ /* orig_rsc is an instance and is already assigned. If something
+ * requests colocations for orig_rsc now, it's for setting roles.
+ */
+ pcmk__add_this_with_list(list, rsc->rsc_cons, orig_rsc);
}
}
-enum pe_action_flags
-pcmk__bundle_action_flags(pe_action_t *action, const pe_node_t *node)
+/*!
+ * \internal
+ * \brief Return action flags for a given bundle resource action
+ *
+ * \param[in,out] action Bundle resource action to get flags for
+ * \param[in] node If not NULL, limit effects to this node
+ *
+ * \return Flags appropriate to \p action on \p node
+ */
+uint32_t
+pcmk__bundle_action_flags(pcmk_action_t *action, const pcmk_node_t *node)
{
GList *containers = NULL;
- enum pe_action_flags flags = 0;
- pe__bundle_variant_data_t *data = NULL;
-
- get_bundle_variant_data(data, action->rsc);
- if(data->child) {
- enum action_tasks task = get_complex_task(data->child, action->task);
- switch(task) {
- case no_action:
- case action_notify:
- case action_notified:
- case action_promote:
- case action_promoted:
- case action_demote:
- case action_demoted:
+ uint32_t flags = 0;
+ pcmk_resource_t *bundled_resource = NULL;
+
+ CRM_ASSERT((action != NULL) && (action->rsc != NULL)
+ && (action->rsc->variant == pcmk_rsc_variant_bundle));
+
+ bundled_resource = pe__bundled_resource(action->rsc);
+ if (bundled_resource != NULL) {
+ // Clone actions are done on the bundled clone resource, not container
+ switch (get_complex_task(bundled_resource, action->task)) {
+ case pcmk_action_unspecified:
+ case pcmk_action_notify:
+ case pcmk_action_notified:
+ case pcmk_action_promote:
+ case pcmk_action_promoted:
+ case pcmk_action_demote:
+ case pcmk_action_demoted:
return pcmk__collective_action_flags(action,
- data->child->children,
+ bundled_resource->children,
node);
default:
break;
@@ -579,281 +715,326 @@ pcmk__bundle_action_flags(pe_action_t *action, const pe_node_t *node)
/*!
* \internal
- * \brief Get containerized resource corresponding to a given bundle container
+ * \brief Apply a location constraint to a bundle replica
*
- * \param[in] instance Collective instance that might be a bundle container
+ * \param[in,out] replica Replica to apply constraint to
+ * \param[in,out] user_data Location constraint to apply
*
- * \return Bundled resource instance inside \p instance if it is a bundle
- * container instance, otherwise NULL
+ * \return true (to indicate that any further replicas should be processed)
*/
-const pe_resource_t *
-pcmk__get_rsc_in_container(const pe_resource_t *instance)
+static bool
+apply_location_to_replica(pe__bundle_replica_t *replica, void *user_data)
{
- const pe__bundle_variant_data_t *data = NULL;
- const pe_resource_t *top = pe__const_top_resource(instance, true);
+ pe__location_t *location = user_data;
- if ((top == NULL) || (top->variant != pe_container)) {
- return NULL;
+ if (replica->container != NULL) {
+ replica->container->cmds->apply_location(replica->container, location);
}
- get_bundle_variant_data(data, top);
-
- for (const GList *iter = data->replicas; iter != NULL; iter = iter->next) {
- const pe__bundle_replica_t *replica = iter->data;
-
- if (instance == replica->container) {
- return replica->child;
- }
+ if (replica->ip != NULL) {
+ replica->ip->cmds->apply_location(replica->ip, location);
}
- return NULL;
+ return true;
}
+/*!
+ * \internal
+ * \brief Apply a location constraint to a bundle resource's allowed node scores
+ *
+ * \param[in,out] rsc Bundle resource to apply constraint to
+ * \param[in,out] location Location constraint to apply
+ */
void
-pcmk__bundle_rsc_location(pe_resource_t *rsc, pe__location_t *constraint)
+pcmk__bundle_apply_location(pcmk_resource_t *rsc, pe__location_t *location)
{
- pe__bundle_variant_data_t *bundle_data = NULL;
- get_bundle_variant_data(bundle_data, rsc);
+ pcmk_resource_t *bundled_resource = NULL;
- pcmk__apply_location(rsc, constraint);
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_bundle)
+ && (location != NULL));
- for (GList *gIter = bundle_data->replicas; gIter != NULL;
- gIter = gIter->next) {
- pe__bundle_replica_t *replica = gIter->data;
+ pcmk__apply_location(rsc, location);
+ pe__foreach_bundle_replica(rsc, apply_location_to_replica, location);
- if (replica->container) {
- replica->container->cmds->apply_location(replica->container,
- constraint);
- }
- if (replica->ip) {
- replica->ip->cmds->apply_location(replica->ip, constraint);
- }
+ bundled_resource = pe__bundled_resource(rsc);
+ if ((bundled_resource != NULL)
+ && ((location->role_filter == pcmk_role_unpromoted)
+ || (location->role_filter == pcmk_role_promoted))) {
+ bundled_resource->cmds->apply_location(bundled_resource, location);
+ bundled_resource->rsc_location = g_list_prepend(
+ bundled_resource->rsc_location, location);
}
+}
+
+#define XPATH_REMOTE "//nvpair[@name='" XML_RSC_ATTR_REMOTE_RA_ADDR "']"
- if (bundle_data->child
- && ((constraint->role_filter == RSC_ROLE_UNPROMOTED)
- || (constraint->role_filter == RSC_ROLE_PROMOTED))) {
- bundle_data->child->cmds->apply_location(bundle_data->child,
- constraint);
- bundle_data->child->rsc_location = g_list_prepend(bundle_data->child->rsc_location,
- constraint);
+/*!
+ * \internal
+ * \brief Add a bundle replica's actions to transition graph
+ *
+ * \param[in,out] replica Replica to add to graph
+ * \param[in] user_data Bundle that replica belongs to (for logging only)
+ *
+ * \return true (to indicate that any further replicas should be processed)
+ */
+static bool
+add_replica_actions_to_graph(pe__bundle_replica_t *replica, void *user_data)
+{
+ if ((replica->remote != NULL) && (replica->container != NULL)
+ && pe__bundle_needs_remote_name(replica->remote)) {
+
+ /* REMOTE_CONTAINER_HACK: Allow remote nodes to run containers that
+ * run pacemaker-remoted inside, without needing a separate IP for
+ * the container. This is done by configuring the inner remote's
+ * connection host as the magic string "#uname", then
+ * replacing it with the underlying host when needed.
+ */
+ xmlNode *nvpair = get_xpath_object(XPATH_REMOTE, replica->remote->xml,
+ LOG_ERR);
+ const char *calculated_addr = NULL;
+
+ // Replace the value in replica->remote->xml (if appropriate)
+ calculated_addr = pe__add_bundle_remote_name(replica->remote,
+ replica->remote->cluster,
+ nvpair, "value");
+ if (calculated_addr != NULL) {
+ /* Since this is for the bundle as a resource, and not any
+ * particular action, replace the value in the default
+ * parameters (not evaluated for node). create_graph_action()
+ * will grab it from there to replace it in node-evaluated
+ * parameters.
+ */
+ GHashTable *params = pe_rsc_params(replica->remote,
+ NULL, replica->remote->cluster);
+
+ g_hash_table_replace(params,
+ strdup(XML_RSC_ATTR_REMOTE_RA_ADDR),
+ strdup(calculated_addr));
+ } else {
+ pcmk_resource_t *bundle = user_data;
+
+ /* The only way to get here is if the remote connection is
+ * neither currently running nor scheduled to run. That means we
+ * won't be doing any operations that require addr (only start
+ * requires it; we additionally use it to compare digests when
+ * unpacking status, promote, and migrate_from history, but
+ * that's already happened by this point).
+ */
+ pe_rsc_info(bundle,
+ "Unable to determine address for bundle %s "
+ "remote connection", bundle->id);
+ }
+ }
+ if (replica->ip != NULL) {
+ replica->ip->cmds->add_actions_to_graph(replica->ip);
+ }
+ if (replica->container != NULL) {
+ replica->container->cmds->add_actions_to_graph(replica->container);
+ }
+ if (replica->remote != NULL) {
+ replica->remote->cmds->add_actions_to_graph(replica->remote);
}
+ return true;
}
/*!
* \internal
- * \brief Add a resource's actions to the transition graph
+ * \brief Add a bundle resource's actions to the transition graph
*
- * \param[in,out] rsc Resource whose actions should be added
+ * \param[in,out] rsc Bundle resource whose actions should be added
*/
void
-pcmk__bundle_expand(pe_resource_t *rsc)
+pcmk__bundle_add_actions_to_graph(pcmk_resource_t *rsc)
{
- pe__bundle_variant_data_t *bundle_data = NULL;
-
- CRM_CHECK(rsc != NULL, return);
+ pcmk_resource_t *bundled_resource = NULL;
- get_bundle_variant_data(bundle_data, rsc);
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_bundle));
- if (bundle_data->child) {
- bundle_data->child->cmds->add_actions_to_graph(bundle_data->child);
+ bundled_resource = pe__bundled_resource(rsc);
+ if (bundled_resource != NULL) {
+ bundled_resource->cmds->add_actions_to_graph(bundled_resource);
}
+ pe__foreach_bundle_replica(rsc, add_replica_actions_to_graph, rsc);
+}
- for (GList *gIter = bundle_data->replicas; gIter != NULL;
- gIter = gIter->next) {
- pe__bundle_replica_t *replica = gIter->data;
+struct probe_data {
+ pcmk_resource_t *bundle; // Bundle being probed
+ pcmk_node_t *node; // Node to create probes on
+ bool any_created; // Whether any probes have been created
+};
- CRM_ASSERT(replica);
- if (replica->remote && replica->container
- && pe__bundle_needs_remote_name(replica->remote)) {
+/*!
+ * \internal
+ * \brief Order a bundle replica's start after another replica's probe
+ *
+ * \param[in,out] replica Replica to order start for
+ * \param[in,out] user_data Replica with probe to order after
+ *
+ * \return true (to indicate that any further replicas should be processed)
+ */
+static bool
+order_replica_start_after(pe__bundle_replica_t *replica, void *user_data)
+{
+ pe__bundle_replica_t *probed_replica = user_data;
- /* REMOTE_CONTAINER_HACK: Allow remote nodes to run containers that
- * run pacemaker-remoted inside, without needing a separate IP for
- * the container. This is done by configuring the inner remote's
- * connection host as the magic string "#uname", then
- * replacing it with the underlying host when needed.
- */
- xmlNode *nvpair = get_xpath_object("//nvpair[@name='" XML_RSC_ATTR_REMOTE_RA_ADDR "']",
- replica->remote->xml, LOG_ERR);
- const char *calculated_addr = NULL;
-
- // Replace the value in replica->remote->xml (if appropriate)
- calculated_addr = pe__add_bundle_remote_name(replica->remote,
- rsc->cluster,
- nvpair, "value");
- if (calculated_addr) {
- /* Since this is for the bundle as a resource, and not any
- * particular action, replace the value in the default
- * parameters (not evaluated for node). create_graph_action()
- * will grab it from there to replace it in node-evaluated
- * parameters.
- */
- GHashTable *params = pe_rsc_params(replica->remote,
- NULL, rsc->cluster);
-
- g_hash_table_replace(params,
- strdup(XML_RSC_ATTR_REMOTE_RA_ADDR),
- strdup(calculated_addr));
- } else {
- /* The only way to get here is if the remote connection is
- * neither currently running nor scheduled to run. That means we
- * won't be doing any operations that require addr (only start
- * requires it; we additionally use it to compare digests when
- * unpacking status, promote, and migrate_from history, but
- * that's already happened by this point).
- */
- crm_info("Unable to determine address for bundle %s remote connection",
- rsc->id);
- }
- }
- if (replica->ip) {
- replica->ip->cmds->add_actions_to_graph(replica->ip);
- }
- if (replica->container) {
- replica->container->cmds->add_actions_to_graph(replica->container);
+ if ((replica == probed_replica) || (replica->container == NULL)) {
+ return true;
+ }
+ pcmk__new_ordering(probed_replica->container,
+ pcmk__op_key(probed_replica->container->id,
+ PCMK_ACTION_MONITOR, 0),
+ NULL, replica->container,
+ pcmk__op_key(replica->container->id, PCMK_ACTION_START,
+ 0),
+ NULL, pcmk__ar_ordered|pcmk__ar_if_on_same_node,
+ replica->container->cluster);
+ return true;
+}
+
+/*!
+ * \internal
+ * \brief Create probes for a bundle replica's resources
+ *
+ * \param[in,out] replica Replica to create probes for
+ * \param[in,out] user_data struct probe_data
+ *
+ * \return true (to indicate that any further replicas should be processed)
+ */
+static bool
+create_replica_probes(pe__bundle_replica_t *replica, void *user_data)
+{
+ struct probe_data *probe_data = user_data;
+
+ if ((replica->ip != NULL)
+ && replica->ip->cmds->create_probe(replica->ip, probe_data->node)) {
+ probe_data->any_created = true;
+ }
+ if ((replica->child != NULL)
+ && pe__same_node(probe_data->node, replica->node)
+ && replica->child->cmds->create_probe(replica->child,
+ probe_data->node)) {
+ probe_data->any_created = true;
+ }
+ if ((replica->container != NULL)
+ && replica->container->cmds->create_probe(replica->container,
+ probe_data->node)) {
+ probe_data->any_created = true;
+
+ /* If we're limited to one replica per host (due to
+ * the lack of an IP range probably), then we don't
+ * want any of our peer containers starting until
+ * we've established that no other copies are already
+ * running.
+ *
+ * Partly this is to ensure that the maximum replicas per host is
+ * observed, but also to ensure that the containers
+ * don't fail to start because the necessary port
+ * mappings (which won't include an IP for uniqueness)
+ * are already taken
+ */
+ if (probe_data->bundle->fns->max_per_node(probe_data->bundle) == 1) {
+ pe__foreach_bundle_replica(probe_data->bundle,
+ order_replica_start_after, replica);
}
- if (replica->remote) {
- replica->remote->cmds->add_actions_to_graph(replica->remote);
+ }
+ if ((replica->container != NULL) && (replica->remote != NULL)
+ && replica->remote->cmds->create_probe(replica->remote,
+ probe_data->node)) {
+ /* Do not probe the remote resource until we know where the container is
+ * running. This is required for REMOTE_CONTAINER_HACK to correctly
+ * probe remote resources.
+ */
+ char *probe_uuid = pcmk__op_key(replica->remote->id,
+ PCMK_ACTION_MONITOR, 0);
+ pcmk_action_t *probe = find_first_action(replica->remote->actions,
+ probe_uuid, NULL,
+ probe_data->node);
+
+ free(probe_uuid);
+ if (probe != NULL) {
+ probe_data->any_created = true;
+ pe_rsc_trace(probe_data->bundle, "Ordering %s probe on %s",
+ replica->remote->id, pe__node_name(probe_data->node));
+ pcmk__new_ordering(replica->container,
+ pcmk__op_key(replica->container->id,
+ PCMK_ACTION_START, 0),
+ NULL, replica->remote, NULL, probe,
+ pcmk__ar_nested_remote_probe,
+ probe_data->bundle->cluster);
}
}
+ return true;
}
/*!
* \internal
*
- * \brief Schedule any probes needed for a resource on a node
+ * \brief Schedule any probes needed for a bundle resource on a node
*
- * \param[in,out] rsc Resource to create probe for
+ * \param[in,out] rsc Bundle resource to create probes for
* \param[in,out] node Node to create probe on
*
* \return true if any probe was created, otherwise false
*/
bool
-pcmk__bundle_create_probe(pe_resource_t *rsc, pe_node_t *node)
+pcmk__bundle_create_probe(pcmk_resource_t *rsc, pcmk_node_t *node)
{
- bool any_created = false;
- pe__bundle_variant_data_t *bundle_data = NULL;
-
- CRM_CHECK(rsc != NULL, return false);
-
- get_bundle_variant_data(bundle_data, rsc);
- for (GList *gIter = bundle_data->replicas; gIter != NULL;
- gIter = gIter->next) {
- pe__bundle_replica_t *replica = gIter->data;
-
- CRM_ASSERT(replica);
- if ((replica->ip != NULL)
- && replica->ip->cmds->create_probe(replica->ip, node)) {
- any_created = true;
- }
- if ((replica->child != NULL) && (node->details == replica->node->details)
- && replica->child->cmds->create_probe(replica->child, node)) {
- any_created = true;
- }
- if ((replica->container != NULL)
- && replica->container->cmds->create_probe(replica->container,
- node)) {
- any_created = true;
-
- /* If we're limited to one replica per host (due to
- * the lack of an IP range probably), then we don't
- * want any of our peer containers starting until
- * we've established that no other copies are already
- * running.
- *
- * Partly this is to ensure that nreplicas_per_host is
- * observed, but also to ensure that the containers
- * don't fail to start because the necessary port
- * mappings (which won't include an IP for uniqueness)
- * are already taken
- */
+ struct probe_data probe_data = { rsc, node, false };
- for (GList *tIter = bundle_data->replicas;
- tIter && (bundle_data->nreplicas_per_host == 1);
- tIter = tIter->next) {
- pe__bundle_replica_t *other = tIter->data;
-
- if ((other != replica) && (other != NULL)
- && (other->container != NULL)) {
-
- pcmk__new_ordering(replica->container,
- pcmk__op_key(replica->container->id, RSC_STATUS, 0),
- NULL, other->container,
- pcmk__op_key(other->container->id, RSC_START, 0),
- NULL,
- pe_order_optional|pe_order_same_node,
- rsc->cluster);
- }
- }
- }
- if ((replica->container != NULL) && (replica->remote != NULL)
- && replica->remote->cmds->create_probe(replica->remote, node)) {
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_bundle));
+ pe__foreach_bundle_replica(rsc, create_replica_probes, &probe_data);
+ return probe_data.any_created;
+}
- /* Do not probe the remote resource until we know where the
- * container is running. This is required for REMOTE_CONTAINER_HACK
- * to correctly probe remote resources.
- */
- char *probe_uuid = pcmk__op_key(replica->remote->id, RSC_STATUS,
- 0);
- pe_action_t *probe = find_first_action(replica->remote->actions,
- probe_uuid, NULL, node);
-
- free(probe_uuid);
- if (probe != NULL) {
- any_created = true;
- crm_trace("Ordering %s probe on %s",
- replica->remote->id, pe__node_name(node));
- pcmk__new_ordering(replica->container,
- pcmk__op_key(replica->container->id, RSC_START, 0),
- NULL, replica->remote, NULL, probe,
- pe_order_probe, rsc->cluster);
- }
- }
+/*!
+ * \internal
+ * \brief Output actions for one bundle replica
+ *
+ * \param[in,out] replica Replica to output actions for
+ * \param[in] user_data Unused
+ *
+ * \return true (to indicate that any further replicas should be processed)
+ */
+static bool
+output_replica_actions(pe__bundle_replica_t *replica, void *user_data)
+{
+ if (replica->ip != NULL) {
+ replica->ip->cmds->output_actions(replica->ip);
+ }
+ if (replica->container != NULL) {
+ replica->container->cmds->output_actions(replica->container);
}
- return any_created;
+ if (replica->remote != NULL) {
+ replica->remote->cmds->output_actions(replica->remote);
+ }
+ if (replica->child != NULL) {
+ replica->child->cmds->output_actions(replica->child);
+ }
+ return true;
}
+/*!
+ * \internal
+ * \brief Output a summary of scheduled actions for a bundle resource
+ *
+ * \param[in,out] rsc Bundle resource to output actions for
+ */
void
-pcmk__output_bundle_actions(pe_resource_t *rsc)
+pcmk__output_bundle_actions(pcmk_resource_t *rsc)
{
- pe__bundle_variant_data_t *bundle_data = NULL;
-
- CRM_CHECK(rsc != NULL, return);
-
- get_bundle_variant_data(bundle_data, rsc);
- for (GList *gIter = bundle_data->replicas; gIter != NULL;
- gIter = gIter->next) {
- pe__bundle_replica_t *replica = gIter->data;
-
- CRM_ASSERT(replica);
- if (replica->ip != NULL) {
- replica->ip->cmds->output_actions(replica->ip);
- }
- if (replica->container != NULL) {
- replica->container->cmds->output_actions(replica->container);
- }
- if (replica->remote != NULL) {
- replica->remote->cmds->output_actions(replica->remote);
- }
- if (replica->child != NULL) {
- replica->child->cmds->output_actions(replica->child);
- }
- }
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_bundle));
+ pe__foreach_bundle_replica(rsc, output_replica_actions, NULL);
}
-// Bundle implementation of resource_alloc_functions_t:add_utilization()
+// Bundle implementation of pcmk_assignment_methods_t:add_utilization()
void
-pcmk__bundle_add_utilization(const pe_resource_t *rsc,
- const pe_resource_t *orig_rsc, GList *all_rscs,
+pcmk__bundle_add_utilization(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc, GList *all_rscs,
GHashTable *utilization)
{
- pe__bundle_variant_data_t *bundle_data = NULL;
- pe__bundle_replica_t *replica = NULL;
+ pcmk_resource_t *container = NULL;
- if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
- return;
- }
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_bundle));
- get_bundle_variant_data(bundle_data, rsc);
- if (bundle_data->replicas == NULL) {
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_unassigned)) {
return;
}
@@ -861,16 +1042,17 @@ pcmk__bundle_add_utilization(const pe_resource_t *rsc,
* is sufficient for any. Only the implicit container resource can have
* utilization values.
*/
- replica = (pe__bundle_replica_t *) bundle_data->replicas->data;
- if (replica->container != NULL) {
- replica->container->cmds->add_utilization(replica->container, orig_rsc,
- all_rscs, utilization);
+ container = pe__first_container(rsc);
+ if (container != NULL) {
+ container->cmds->add_utilization(container, orig_rsc, all_rscs,
+ utilization);
}
}
-// Bundle implementation of resource_alloc_functions_t:shutdown_lock()
+// Bundle implementation of pcmk_assignment_methods_t:shutdown_lock()
void
-pcmk__bundle_shutdown_lock(pe_resource_t *rsc)
+pcmk__bundle_shutdown_lock(pcmk_resource_t *rsc)
{
- return; // Bundles currently don't support shutdown locks
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_bundle));
+ // Bundles currently don't support shutdown locks
}
diff --git a/lib/pacemaker/pcmk_sched_clone.c b/lib/pacemaker/pcmk_sched_clone.c
index 934f512..7b422d8 100644
--- a/lib/pacemaker/pcmk_sched_clone.c
+++ b/lib/pacemaker/pcmk_sched_clone.c
@@ -18,200 +18,222 @@
* \internal
* \brief Assign a clone resource's instances to nodes
*
- * \param[in,out] rsc Clone resource to assign
- * \param[in] prefer Node to prefer, if all else is equal
+ * \param[in,out] rsc Clone resource to assign
+ * \param[in] prefer Node to prefer, if all else is equal
+ * \param[in] stop_if_fail If \c true and a primitive descendant of \p rsc
+ * can't be assigned to a node, set the
+ * descendant's next role to stopped and update
+ * existing actions
*
* \return NULL (clones are not assigned to a single node)
+ *
+ * \note If \p stop_if_fail is \c false, then \c pcmk__unassign_resource() can
+ * completely undo the assignment. A successful assignment can be either
+ * undone or left alone as final. A failed assignment has the same effect
+ * as calling pcmk__unassign_resource(); there are no side effects on
+ * roles or actions.
*/
-pe_node_t *
-pcmk__clone_assign(pe_resource_t *rsc, const pe_node_t *prefer)
+pcmk_node_t *
+pcmk__clone_assign(pcmk_resource_t *rsc, const pcmk_node_t *prefer,
+ bool stop_if_fail)
{
+ GList *colocations = NULL;
+
CRM_ASSERT(pe_rsc_is_clone(rsc));
- if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_unassigned)) {
return NULL; // Assignment has already been done
}
// Detect assignment loops
- if (pcmk_is_set(rsc->flags, pe_rsc_allocating)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_assigning)) {
pe_rsc_debug(rsc, "Breaking assignment loop involving %s", rsc->id);
return NULL;
}
- pe__set_resource_flags(rsc, pe_rsc_allocating);
+ pe__set_resource_flags(rsc, pcmk_rsc_assigning);
// If this clone is promotable, consider nodes' promotion scores
- if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_promotable)) {
pcmk__add_promotion_scores(rsc);
}
- /* If this clone is colocated with any other resources, assign those first.
- * Since the this_with_colocations() method boils down to a copy of rsc_cons
- * for clones, we can use that here directly for efficiency.
- */
- for (GList *iter = rsc->rsc_cons; iter != NULL; iter = iter->next) {
+ // If this clone is colocated with any other resources, assign those first
+ colocations = pcmk__this_with_colocations(rsc);
+ for (GList *iter = colocations; iter != NULL; iter = iter->next) {
pcmk__colocation_t *constraint = (pcmk__colocation_t *) iter->data;
pe_rsc_trace(rsc, "%s: Assigning colocation %s primary %s first",
rsc->id, constraint->id, constraint->primary->id);
- constraint->primary->cmds->assign(constraint->primary, prefer);
+ constraint->primary->cmds->assign(constraint->primary, prefer,
+ stop_if_fail);
}
+ g_list_free(colocations);
- /* If any resources are colocated with this one, consider their preferences.
- * Because the with_this_colocations() method boils down to a copy of
- * rsc_cons_lhs for clones, we can use that here directly for efficiency.
- */
- g_list_foreach(rsc->rsc_cons_lhs, pcmk__add_dependent_scores, rsc);
+ // If any resources are colocated with this one, consider their preferences
+ colocations = pcmk__with_this_colocations(rsc);
+ g_list_foreach(colocations, pcmk__add_dependent_scores, rsc);
+ g_list_free(colocations);
- pe__show_node_weights(!pcmk_is_set(rsc->cluster->flags, pe_flag_show_scores),
- rsc, __func__, rsc->allowed_nodes, rsc->cluster);
+ pe__show_node_scores(!pcmk_is_set(rsc->cluster->flags,
+ pcmk_sched_output_scores),
+ rsc, __func__, rsc->allowed_nodes, rsc->cluster);
rsc->children = g_list_sort(rsc->children, pcmk__cmp_instance);
pcmk__assign_instances(rsc, rsc->children, pe__clone_max(rsc),
pe__clone_node_max(rsc));
- if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_promotable)) {
pcmk__set_instance_roles(rsc);
}
- pe__clear_resource_flags(rsc, pe_rsc_provisional|pe_rsc_allocating);
+ pe__clear_resource_flags(rsc, pcmk_rsc_unassigned|pcmk_rsc_assigning);
pe_rsc_trace(rsc, "Assigned clone %s", rsc->id);
return NULL;
}
-static pe_action_t *
-find_rsc_action(pe_resource_t *rsc, const char *task)
+/*!
+ * \internal
+ * \brief Create all actions needed for a given clone resource
+ *
+ * \param[in,out] rsc Clone resource to create actions for
+ */
+void
+pcmk__clone_create_actions(pcmk_resource_t *rsc)
{
- pe_action_t *match = NULL;
- GList *actions = pe__resource_actions(rsc, NULL, task, FALSE);
-
- for (GList *item = actions; item != NULL; item = item->next) {
- pe_action_t *op = (pe_action_t *) item->data;
+ CRM_ASSERT(pe_rsc_is_clone(rsc));
- if (!pcmk_is_set(op->flags, pe_action_optional)) {
- if (match != NULL) {
- // More than one match, don't return any
- match = NULL;
- break;
- }
- match = op;
- }
+ pe_rsc_trace(rsc, "Creating actions for clone %s", rsc->id);
+ pcmk__create_instance_actions(rsc, rsc->children);
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_promotable)) {
+ pcmk__create_promotable_actions(rsc);
}
- g_list_free(actions);
- return match;
}
/*!
* \internal
- * \brief Order starts and stops of an ordered clone's instances
+ * \brief Create implicit constraints needed for a clone resource
*
- * \param[in,out] rsc Clone resource
+ * \param[in,out] rsc Clone resource to create implicit constraints for
*/
-static void
-order_instance_starts_stops(pe_resource_t *rsc)
+void
+pcmk__clone_internal_constraints(pcmk_resource_t *rsc)
{
- pe_action_t *last_stop = NULL;
- pe_action_t *last_start = NULL;
+ bool ordered = false;
- // Instances must be ordered by ascending instance number, so sort them
- rsc->children = g_list_sort(rsc->children, pcmk__cmp_instance_number);
+ CRM_ASSERT(pe_rsc_is_clone(rsc));
+ pe_rsc_trace(rsc, "Creating internal constraints for clone %s", rsc->id);
+
+ // Restart ordering: Stop -> stopped -> start -> started
+ pcmk__order_resource_actions(rsc, PCMK_ACTION_STOPPED,
+ rsc, PCMK_ACTION_START,
+ pcmk__ar_ordered);
+ pcmk__order_resource_actions(rsc, PCMK_ACTION_START,
+ rsc, PCMK_ACTION_RUNNING,
+ pcmk__ar_unrunnable_first_blocks);
+ pcmk__order_resource_actions(rsc, PCMK_ACTION_STOP,
+ rsc, PCMK_ACTION_STOPPED,
+ pcmk__ar_unrunnable_first_blocks);
+
+ // Demoted -> stop and started -> promote
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_promotable)) {
+ pcmk__order_resource_actions(rsc, PCMK_ACTION_DEMOTED,
+ rsc, PCMK_ACTION_STOP,
+ pcmk__ar_ordered);
+ pcmk__order_resource_actions(rsc, PCMK_ACTION_RUNNING,
+ rsc, PCMK_ACTION_PROMOTE,
+ pcmk__ar_unrunnable_first_blocks);
+ }
+
+ ordered = pe__clone_is_ordered(rsc);
+ if (ordered) {
+ /* Ordered clone instances must start and stop by instance number. The
+ * instances might have been previously shuffled for assignment or
+ * promotion purposes, so re-sort them.
+ */
+ rsc->children = g_list_sort(rsc->children, pcmk__cmp_instance_number);
+ }
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
- pe_resource_t *child = (pe_resource_t *) iter->data;
- pe_action_t *action = NULL;
-
- // Order this instance's stop after previous instance's stop
- // @TODO: Should instances be stopped in reverse order instead?
- action = find_rsc_action(child, RSC_STOP);
- if (action != NULL) {
- if (last_stop != NULL) {
- order_actions(action, last_stop, pe_order_optional);
- }
- last_stop = action;
- }
-
- // Order this instance's start after previous instance's start
- action = find_rsc_action(child, RSC_START);
- if (action != NULL) {
- if (last_start != NULL) {
- order_actions(last_start, action, pe_order_optional);
+ pcmk_resource_t *instance = (pcmk_resource_t *) iter->data;
+
+ instance->cmds->internal_constraints(instance);
+
+ // Start clone -> start instance -> clone started
+ pcmk__order_starts(rsc, instance, pcmk__ar_unrunnable_first_blocks
+ |pcmk__ar_then_implies_first_graphed);
+ pcmk__order_resource_actions(instance, PCMK_ACTION_START,
+ rsc, PCMK_ACTION_RUNNING,
+ pcmk__ar_first_implies_then_graphed);
+
+ // Stop clone -> stop instance -> clone stopped
+ pcmk__order_stops(rsc, instance, pcmk__ar_then_implies_first_graphed);
+ pcmk__order_resource_actions(instance, PCMK_ACTION_STOP,
+ rsc, PCMK_ACTION_STOPPED,
+ pcmk__ar_first_implies_then_graphed);
+
+ /* Instances of ordered clones must be started and stopped by instance
+ * number. Since only some instances may be starting or stopping, order
+ * each instance relative to every later instance.
+ */
+ if (ordered) {
+ for (GList *later = iter->next;
+ later != NULL; later = later->next) {
+ pcmk__order_starts(instance, (pcmk_resource_t *) later->data,
+ pcmk__ar_ordered);
+ pcmk__order_stops((pcmk_resource_t *) later->data, instance,
+ pcmk__ar_ordered);
}
- last_start = action;
}
}
-}
-
-void
-clone_create_actions(pe_resource_t *rsc)
-{
- pe_rsc_debug(rsc, "Creating actions for clone %s", rsc->id);
- pcmk__create_instance_actions(rsc, rsc->children);
- if (pe__clone_is_ordered(rsc)) {
- order_instance_starts_stops(rsc);
- }
- if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
- pcmk__create_promotable_actions(rsc);
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_promotable)) {
+ pcmk__order_promotable_instances(rsc);
}
}
-void
-clone_internal_constraints(pe_resource_t *rsc)
+/*!
+ * \internal
+ * \brief Check whether colocated resources can be interleaved
+ *
+ * \param[in] colocation Colocation constraint with clone as primary
+ *
+ * \return true if colocated resources can be interleaved, otherwise false
+ */
+static bool
+can_interleave(const pcmk__colocation_t *colocation)
{
- pe_resource_t *last_rsc = NULL;
- GList *gIter;
- bool ordered = pe__clone_is_ordered(rsc);
-
- pe_rsc_trace(rsc, "Internal constraints for %s", rsc->id);
- pcmk__order_resource_actions(rsc, RSC_STOPPED, rsc, RSC_START,
- pe_order_optional);
- pcmk__order_resource_actions(rsc, RSC_START, rsc, RSC_STARTED,
- pe_order_runnable_left);
- pcmk__order_resource_actions(rsc, RSC_STOP, rsc, RSC_STOPPED,
- pe_order_runnable_left);
+ const pcmk_resource_t *dependent = colocation->dependent;
- if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
- pcmk__order_resource_actions(rsc, RSC_DEMOTED, rsc, RSC_STOP,
- pe_order_optional);
- pcmk__order_resource_actions(rsc, RSC_STARTED, rsc, RSC_PROMOTE,
- pe_order_runnable_left);
+ // Only colocations between clone or bundle resources use interleaving
+ if (dependent->variant <= pcmk_rsc_variant_group) {
+ return false;
}
- if (ordered) {
- /* we have to maintain a consistent sorted child list when building order constraints */
- rsc->children = g_list_sort(rsc->children, pcmk__cmp_instance_number);
+ // Only the dependent needs to be marked for interleaving
+ if (!crm_is_true(g_hash_table_lookup(dependent->meta,
+ XML_RSC_ATTR_INTERLEAVE))) {
+ return false;
}
- for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
-
- child_rsc->cmds->internal_constraints(child_rsc);
-
- pcmk__order_starts(rsc, child_rsc,
- pe_order_runnable_left|pe_order_implies_first_printed);
- pcmk__order_resource_actions(child_rsc, RSC_START, rsc, RSC_STARTED,
- pe_order_implies_then_printed);
- if (ordered && (last_rsc != NULL)) {
- pcmk__order_starts(last_rsc, child_rsc, pe_order_optional);
- }
-
- pcmk__order_stops(rsc, child_rsc, pe_order_implies_first_printed);
- pcmk__order_resource_actions(child_rsc, RSC_STOP, rsc, RSC_STOPPED,
- pe_order_implies_then_printed);
- if (ordered && (last_rsc != NULL)) {
- pcmk__order_stops(child_rsc, last_rsc, pe_order_optional);
- }
- last_rsc = child_rsc;
- }
- if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
- pcmk__order_promotable_instances(rsc);
+ /* @TODO Do we actually care about multiple primary instances sharing a
+ * dependent instance?
+ */
+ if (dependent->fns->max_per_node(dependent)
+ != colocation->primary->fns->max_per_node(colocation->primary)) {
+ pcmk__config_err("Cannot interleave %s and %s because they do not "
+ "support the same number of instances per node",
+ dependent->id, colocation->primary->id);
+ return false;
}
+
+ return true;
}
/*!
* \internal
- * \brief Apply a colocation's score to node weights or resource priority
+ * \brief Apply a colocation's score to node scores or resource priority
*
* Given a colocation constraint, apply its score to the dependent's
- * allowed node weights (if we are still placing resources) or priority (if
+ * allowed node scores (if we are still placing resources) or priority (if
* we are choosing promotable clone instance roles).
*
* \param[in,out] dependent Dependent resource in colocation
@@ -220,289 +242,312 @@ clone_internal_constraints(pe_resource_t *rsc)
* \param[in] for_dependent true if called on behalf of dependent
*/
void
-pcmk__clone_apply_coloc_score(pe_resource_t *dependent,
- const pe_resource_t *primary,
+pcmk__clone_apply_coloc_score(pcmk_resource_t *dependent,
+ const pcmk_resource_t *primary,
const pcmk__colocation_t *colocation,
bool for_dependent)
{
- GList *gIter = NULL;
- gboolean do_interleave = FALSE;
- const char *interleave_s = NULL;
+ const GList *iter = NULL;
/* This should never be called for the clone itself as a dependent. Instead,
* we add its colocation constraints to its instances and call the
- * apply_coloc_score() for the instances as dependents.
+ * apply_coloc_score() method for the instances as dependents.
*/
CRM_ASSERT(!for_dependent);
- CRM_CHECK((colocation != NULL) && (dependent != NULL) && (primary != NULL),
- return);
- CRM_CHECK(dependent->variant == pe_native, return);
+ CRM_ASSERT((colocation != NULL) && pe_rsc_is_clone(primary)
+ && (dependent != NULL)
+ && (dependent->variant == pcmk_rsc_variant_primitive));
- pe_rsc_trace(primary, "Processing constraint %s: %s -> %s %d",
- colocation->id, dependent->id, primary->id, colocation->score);
+ if (pcmk_is_set(primary->flags, pcmk_rsc_unassigned)) {
+ pe_rsc_trace(primary,
+ "Delaying processing colocation %s "
+ "because cloned primary %s is still provisional",
+ colocation->id, primary->id);
+ return;
+ }
- if (pcmk_is_set(primary->flags, pe_rsc_promotable)) {
- if (pcmk_is_set(primary->flags, pe_rsc_provisional)) {
- // We haven't placed the primary yet, so we can't apply colocation
- pe_rsc_trace(primary, "%s is still provisional", primary->id);
- return;
+ pe_rsc_trace(primary, "Processing colocation %s (%s with clone %s @%s)",
+ colocation->id, dependent->id, primary->id,
+ pcmk_readable_score(colocation->score));
- } else if (colocation->primary_role == RSC_ROLE_UNKNOWN) {
- // This isn't a role-specfic colocation, so handle normally
- pe_rsc_trace(primary, "Handling %s as a clone colocation",
- colocation->id);
+ // Apply role-specific colocations
+ if (pcmk_is_set(primary->flags, pcmk_rsc_promotable)
+ && (colocation->primary_role != pcmk_role_unknown)) {
- } else if (pcmk_is_set(dependent->flags, pe_rsc_provisional)) {
- // We're placing the dependent
+ if (pcmk_is_set(dependent->flags, pcmk_rsc_unassigned)) {
+ // We're assigning the dependent to a node
pcmk__update_dependent_with_promotable(primary, dependent,
colocation);
return;
+ }
- } else if (colocation->dependent_role == RSC_ROLE_PROMOTED) {
- // We're choosing roles for the dependent
+ if (colocation->dependent_role == pcmk_role_promoted) {
+ // We're choosing a role for the dependent
pcmk__update_promotable_dependent_priority(primary, dependent,
colocation);
return;
}
}
- // Only the dependent needs to be marked for interleave
- interleave_s = g_hash_table_lookup(colocation->dependent->meta,
- XML_RSC_ATTR_INTERLEAVE);
- if (crm_is_true(interleave_s)
- && (colocation->dependent->variant > pe_group)) {
- /* @TODO Do we actually care about multiple primary copies sharing a
- * dependent copy anymore?
- */
- if (copies_per_node(colocation->dependent) != copies_per_node(colocation->primary)) {
- pcmk__config_err("Cannot interleave %s and %s because they do not "
- "support the same number of instances per node",
- colocation->dependent->id,
- colocation->primary->id);
-
- } else {
- do_interleave = TRUE;
- }
- }
-
- if (pcmk_is_set(primary->flags, pe_rsc_provisional)) {
- pe_rsc_trace(primary, "%s is still provisional", primary->id);
- return;
-
- } else if (do_interleave) {
- pe_resource_t *primary_instance = NULL;
+ // Apply interleaved colocations
+ if (can_interleave(colocation)) {
+ const pcmk_resource_t *primary_instance = NULL;
primary_instance = pcmk__find_compatible_instance(dependent, primary,
- RSC_ROLE_UNKNOWN,
+ pcmk_role_unknown,
false);
if (primary_instance != NULL) {
- pe_rsc_debug(primary, "Pairing %s with %s",
+ pe_rsc_debug(primary, "Interleaving %s with %s",
dependent->id, primary_instance->id);
dependent->cmds->apply_coloc_score(dependent, primary_instance,
colocation, true);
} else if (colocation->score >= INFINITY) {
- crm_notice("Cannot pair %s with instance of %s",
- dependent->id, primary->id);
- pcmk__assign_resource(dependent, NULL, true);
+ crm_notice("%s cannot run because it cannot interleave with "
+ "any instance of %s", dependent->id, primary->id);
+ pcmk__assign_resource(dependent, NULL, true, true);
} else {
- pe_rsc_debug(primary, "Cannot pair %s with instance of %s",
+ pe_rsc_debug(primary,
+ "%s will not colocate with %s "
+ "because no instance can interleave with it",
dependent->id, primary->id);
}
return;
+ }
- } else if (colocation->score >= INFINITY) {
- GList *affected_nodes = NULL;
+ // Apply mandatory colocations
+ if (colocation->score >= INFINITY) {
+ GList *primary_nodes = NULL;
- gIter = primary->children;
- for (; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
- pe_node_t *chosen = child_rsc->fns->location(child_rsc, NULL, FALSE);
+ // Dependent can run only where primary will have unblocked instances
+ for (iter = primary->children; iter != NULL; iter = iter->next) {
+ const pcmk_resource_t *instance = iter->data;
+ pcmk_node_t *chosen = instance->fns->location(instance, NULL, 0);
- if (chosen != NULL && is_set_recursive(child_rsc, pe_rsc_block, TRUE) == FALSE) {
+ if ((chosen != NULL)
+ && !is_set_recursive(instance, pcmk_rsc_blocked, TRUE)) {
pe_rsc_trace(primary, "Allowing %s: %s %d",
colocation->id, pe__node_name(chosen),
chosen->weight);
- affected_nodes = g_list_prepend(affected_nodes, chosen);
+ primary_nodes = g_list_prepend(primary_nodes, chosen);
}
}
-
- node_list_exclude(dependent->allowed_nodes, affected_nodes, FALSE);
- g_list_free(affected_nodes);
+ pcmk__colocation_intersect_nodes(dependent, primary, colocation,
+ primary_nodes, false);
+ g_list_free(primary_nodes);
return;
}
- gIter = primary->children;
- for (; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+ // Apply optional colocations
+ for (iter = primary->children; iter != NULL; iter = iter->next) {
+ const pcmk_resource_t *instance = iter->data;
- child_rsc->cmds->apply_coloc_score(dependent, child_rsc, colocation,
- false);
+ instance->cmds->apply_coloc_score(dependent, instance, colocation,
+ false);
}
}
-// Clone implementation of resource_alloc_functions_t:with_this_colocations()
+// Clone implementation of pcmk_assignment_methods_t:with_this_colocations()
void
-pcmk__with_clone_colocations(const pe_resource_t *rsc,
- const pe_resource_t *orig_rsc, GList **list)
+pcmk__with_clone_colocations(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc, GList **list)
{
CRM_CHECK((rsc != NULL) && (orig_rsc != NULL) && (list != NULL), return);
- if (rsc == orig_rsc) { // Colocations are wanted for clone itself
- pcmk__add_with_this_list(list, rsc->rsc_cons_lhs);
- } else {
- pcmk__add_collective_constraints(list, orig_rsc, rsc, true);
+ pcmk__add_with_this_list(list, rsc->rsc_cons_lhs, orig_rsc);
+
+ if (rsc->parent != NULL) {
+ rsc->parent->cmds->with_this_colocations(rsc->parent, orig_rsc, list);
}
}
-// Clone implementation of resource_alloc_functions_t:this_with_colocations()
+// Clone implementation of pcmk_assignment_methods_t:this_with_colocations()
void
-pcmk__clone_with_colocations(const pe_resource_t *rsc,
- const pe_resource_t *orig_rsc, GList **list)
+pcmk__clone_with_colocations(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc, GList **list)
{
CRM_CHECK((rsc != NULL) && (orig_rsc != NULL) && (list != NULL), return);
- if (rsc == orig_rsc) { // Colocations are wanted for clone itself
- pcmk__add_this_with_list(list, rsc->rsc_cons);
- } else {
- pcmk__add_collective_constraints(list, orig_rsc, rsc, false);
+ pcmk__add_this_with_list(list, rsc->rsc_cons, orig_rsc);
+
+ if (rsc->parent != NULL) {
+ rsc->parent->cmds->this_with_colocations(rsc->parent, orig_rsc, list);
}
}
-enum pe_action_flags
-clone_action_flags(pe_action_t *action, const pe_node_t *node)
+/*!
+ * \internal
+ * \brief Return action flags for a given clone resource action
+ *
+ * \param[in,out] action Action to get flags for
+ * \param[in] node If not NULL, limit effects to this node
+ *
+ * \return Flags appropriate to \p action on \p node
+ */
+uint32_t
+pcmk__clone_action_flags(pcmk_action_t *action, const pcmk_node_t *node)
{
+ CRM_ASSERT((action != NULL) && pe_rsc_is_clone(action->rsc));
+
return pcmk__collective_action_flags(action, action->rsc->children, node);
}
+/*!
+ * \internal
+ * \brief Apply a location constraint to a clone resource's allowed node scores
+ *
+ * \param[in,out] rsc Clone resource to apply constraint to
+ * \param[in,out] location Location constraint to apply
+ */
void
-clone_rsc_location(pe_resource_t *rsc, pe__location_t *constraint)
+pcmk__clone_apply_location(pcmk_resource_t *rsc, pe__location_t *location)
{
- GList *gIter = rsc->children;
+ CRM_CHECK((location != NULL) && pe_rsc_is_clone(rsc), return);
- pe_rsc_trace(rsc, "Processing location constraint %s for %s", constraint->id, rsc->id);
+ pcmk__apply_location(rsc, location);
- pcmk__apply_location(rsc, constraint);
-
- for (; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+ for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
+ pcmk_resource_t *instance = (pcmk_resource_t *) iter->data;
- child_rsc->cmds->apply_location(child_rsc, constraint);
+ instance->cmds->apply_location(instance, location);
}
}
+// GFunc wrapper for calling the action_flags() resource method
+static void
+call_action_flags(gpointer data, gpointer user_data)
+{
+ pcmk_resource_t *rsc = user_data;
+
+ rsc->cmds->action_flags((pcmk_action_t *) data, NULL);
+}
+
/*!
* \internal
- * \brief Add a resource's actions to the transition graph
+ * \brief Add a clone resource's actions to the transition graph
*
* \param[in,out] rsc Resource whose actions should be added
*/
void
-clone_expand(pe_resource_t *rsc)
+pcmk__clone_add_actions_to_graph(pcmk_resource_t *rsc)
{
- GList *gIter = NULL;
-
- g_list_foreach(rsc->actions, (GFunc) rsc->cmds->action_flags, NULL);
+ CRM_ASSERT(pe_rsc_is_clone(rsc));
+ g_list_foreach(rsc->actions, call_action_flags, rsc);
pe__create_clone_notifications(rsc);
- /* Now that the notifcations have been created we can expand the children */
-
- gIter = rsc->children;
- for (; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+ for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
+ pcmk_resource_t *child_rsc = (pcmk_resource_t *) iter->data;
child_rsc->cmds->add_actions_to_graph(child_rsc);
}
pcmk__add_rsc_actions_to_graph(rsc);
-
- /* The notifications are in the graph now, we can destroy the notify_data */
pe__free_clone_notification_data(rsc);
}
-// Check whether a resource or any of its children is known on node
+/*!
+ * \internal
+ * \brief Check whether a resource or any children have been probed on a node
+ *
+ * \param[in] rsc Resource to check
+ * \param[in] node Node to check
+ *
+ * \return true if \p node is in the known_on table of \p rsc or any of its
+ * children, otherwise false
+ */
static bool
-rsc_known_on(const pe_resource_t *rsc, const pe_node_t *node)
+rsc_probed_on(const pcmk_resource_t *rsc, const pcmk_node_t *node)
{
- if (rsc->children) {
+ if (rsc->children != NULL) {
for (GList *child_iter = rsc->children; child_iter != NULL;
child_iter = child_iter->next) {
- pe_resource_t *child = (pe_resource_t *) child_iter->data;
+ pcmk_resource_t *child = (pcmk_resource_t *) child_iter->data;
- if (rsc_known_on(child, node)) {
- return TRUE;
+ if (rsc_probed_on(child, node)) {
+ return true;
}
}
+ return false;
+ }
- } else if (rsc->known_on) {
+ if (rsc->known_on != NULL) {
GHashTableIter iter;
- pe_node_t *known_node = NULL;
+ pcmk_node_t *known_node = NULL;
g_hash_table_iter_init(&iter, rsc->known_on);
while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &known_node)) {
- if (node->details == known_node->details) {
- return TRUE;
+ if (pe__same_node(node, known_node)) {
+ return true;
}
}
}
- return FALSE;
+ return false;
}
-// Look for an instance of clone that is known on node
-static pe_resource_t *
-find_instance_on(const pe_resource_t *clone, const pe_node_t *node)
+/*!
+ * \internal
+ * \brief Find clone instance that has been probed on given node
+ *
+ * \param[in] clone Clone resource to check
+ * \param[in] node Node to check
+ *
+ * \return Instance of \p clone that has been probed on \p node if any,
+ * otherwise NULL
+ */
+static pcmk_resource_t *
+find_probed_instance_on(const pcmk_resource_t *clone, const pcmk_node_t *node)
{
- for (GList *gIter = clone->children; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child = (pe_resource_t *) gIter->data;
+ for (GList *iter = clone->children; iter != NULL; iter = iter->next) {
+ pcmk_resource_t *instance = (pcmk_resource_t *) iter->data;
- if (rsc_known_on(child, node)) {
- return child;
+ if (rsc_probed_on(instance, node)) {
+ return instance;
}
}
return NULL;
}
-// For anonymous clones, only a single instance needs to be probed
+/*!
+ * \internal
+ * \brief Probe an anonymous clone on a node
+ *
+ * \param[in,out] clone Anonymous clone to probe
+ * \param[in,out] node Node to probe \p clone on
+ */
static bool
-probe_anonymous_clone(pe_resource_t *rsc, pe_node_t *node,
- pe_working_set_t *data_set)
+probe_anonymous_clone(pcmk_resource_t *clone, pcmk_node_t *node)
{
- // First, check if we probed an instance on this node last time
- pe_resource_t *child = find_instance_on(rsc, node);
+ // Check whether we already probed an instance on this node
+ pcmk_resource_t *child = find_probed_instance_on(clone, node);
// Otherwise, check if we plan to start an instance on this node
- if (child == NULL) {
- for (GList *child_iter = rsc->children; child_iter && !child;
- child_iter = child_iter->next) {
-
- pe_node_t *local_node = NULL;
- pe_resource_t *child_rsc = (pe_resource_t *) child_iter->data;
-
- if (child_rsc) { /* make clang analyzer happy */
- local_node = child_rsc->fns->location(child_rsc, NULL, FALSE);
- if (local_node && (local_node->details == node->details)) {
- child = child_rsc;
- }
- }
+ for (GList *iter = clone->children; (iter != NULL) && (child == NULL);
+ iter = iter->next) {
+ pcmk_resource_t *instance = (pcmk_resource_t *) iter->data;
+ const pcmk_node_t *instance_node = NULL;
+
+ instance_node = instance->fns->location(instance, NULL, 0);
+ if (pe__same_node(instance_node, node)) {
+ child = instance;
}
}
// Otherwise, use the first clone instance
if (child == NULL) {
- child = rsc->children->data;
+ child = clone->children->data;
}
- CRM_ASSERT(child);
+
+ // Anonymous clones only need to probe a single instance
return child->cmds->create_probe(child, node);
}
/*!
* \internal
- *
* \brief Schedule any probes needed for a resource on a node
*
* \param[in,out] rsc Resource to create probe for
@@ -511,70 +556,87 @@ probe_anonymous_clone(pe_resource_t *rsc, pe_node_t *node,
* \return true if any probe was created, otherwise false
*/
bool
-clone_create_probe(pe_resource_t *rsc, pe_node_t *node)
+pcmk__clone_create_probe(pcmk_resource_t *rsc, pcmk_node_t *node)
{
- CRM_ASSERT(rsc);
-
- rsc->children = g_list_sort(rsc->children, pcmk__cmp_instance_number);
- if (rsc->children == NULL) {
- pe_warn("Clone %s has no children", rsc->id);
- return false;
- }
+ CRM_ASSERT((node != NULL) && pe_rsc_is_clone(rsc));
if (rsc->exclusive_discover) {
- pe_node_t *allowed = g_hash_table_lookup(rsc->allowed_nodes, node->details->id);
- if (allowed && allowed->rsc_discover_mode != pe_discover_exclusive) {
- /* exclusive discover is enabled and this node is not marked
- * as a node this resource should be discovered on
- *
- * remove the node from allowed_nodes so that the
- * notification contains only nodes that we might ever run
- * on
+ /* The clone is configured to be probed only where a location constraint
+ * exists with resource-discovery set to exclusive.
+ *
+ * This check is not strictly necessary here since the instance's
+ * create_probe() method would also check, but doing it here is more
+ * efficient (especially for unique clones with a large number of
+ * instances), and affects the CRM_meta_notify_available_uname variable
+ * passed with notify actions.
+ */
+ pcmk_node_t *allowed = g_hash_table_lookup(rsc->allowed_nodes,
+ node->details->id);
+
+ if ((allowed == NULL)
+ || (allowed->rsc_discover_mode != pcmk_probe_exclusive)) {
+ /* This node is not marked for resource discovery. Remove it from
+ * allowed_nodes so that notifications contain only nodes that the
+ * clone can possibly run on.
*/
+ pe_rsc_trace(rsc,
+ "Skipping probe for %s on %s because resource has "
+ "exclusive discovery but is not allowed on node",
+ rsc->id, pe__node_name(node));
g_hash_table_remove(rsc->allowed_nodes, node->details->id);
-
- /* Bit of a shortcut - might as well take it */
return false;
}
}
- if (pcmk_is_set(rsc->flags, pe_rsc_unique)) {
+ rsc->children = g_list_sort(rsc->children, pcmk__cmp_instance_number);
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_unique)) {
return pcmk__probe_resource_list(rsc->children, node);
} else {
- return probe_anonymous_clone(rsc, node, rsc->cluster);
+ return probe_anonymous_clone(rsc, node);
}
}
+/*!
+ * \internal
+ * \brief Add meta-attributes relevant to transition graph actions to XML
+ *
+ * Add clone-specific meta-attributes needed for transition graph actions.
+ *
+ * \param[in] rsc Clone resource whose meta-attributes should be added
+ * \param[in,out] xml Transition graph action attributes XML to add to
+ */
void
-clone_append_meta(const pe_resource_t *rsc, xmlNode *xml)
+pcmk__clone_add_graph_meta(const pcmk_resource_t *rsc, xmlNode *xml)
{
char *name = NULL;
+ CRM_ASSERT(pe_rsc_is_clone(rsc) && (xml != NULL));
+
name = crm_meta_name(XML_RSC_ATTR_UNIQUE);
- crm_xml_add(xml, name, pe__rsc_bool_str(rsc, pe_rsc_unique));
+ crm_xml_add(xml, name, pe__rsc_bool_str(rsc, pcmk_rsc_unique));
free(name);
name = crm_meta_name(XML_RSC_ATTR_NOTIFY);
- crm_xml_add(xml, name, pe__rsc_bool_str(rsc, pe_rsc_notify));
+ crm_xml_add(xml, name, pe__rsc_bool_str(rsc, pcmk_rsc_notify));
free(name);
- name = crm_meta_name(XML_RSC_ATTR_INCARNATION_MAX);
+ name = crm_meta_name(PCMK_META_CLONE_MAX);
crm_xml_add_int(xml, name, pe__clone_max(rsc));
free(name);
- name = crm_meta_name(XML_RSC_ATTR_INCARNATION_NODEMAX);
+ name = crm_meta_name(PCMK_META_CLONE_NODE_MAX);
crm_xml_add_int(xml, name, pe__clone_node_max(rsc));
free(name);
- if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_promotable)) {
int promoted_max = pe__clone_promoted_max(rsc);
int promoted_node_max = pe__clone_promoted_node_max(rsc);
- name = crm_meta_name(XML_RSC_ATTR_PROMOTED_MAX);
+ name = crm_meta_name(PCMK_META_PROMOTED_MAX);
crm_xml_add_int(xml, name, promoted_max);
free(name);
- name = crm_meta_name(XML_RSC_ATTR_PROMOTED_NODEMAX);
+ name = crm_meta_name(PCMK_META_PROMOTED_NODE_MAX);
crm_xml_add_int(xml, name, promoted_node_max);
free(name);
@@ -591,22 +653,25 @@ clone_append_meta(const pe_resource_t *rsc, xmlNode *xml)
}
}
-// Clone implementation of resource_alloc_functions_t:add_utilization()
+// Clone implementation of pcmk_assignment_methods_t:add_utilization()
void
-pcmk__clone_add_utilization(const pe_resource_t *rsc,
- const pe_resource_t *orig_rsc, GList *all_rscs,
+pcmk__clone_add_utilization(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc, GList *all_rscs,
GHashTable *utilization)
{
bool existing = false;
- pe_resource_t *child = NULL;
+ pcmk_resource_t *child = NULL;
- if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
+ CRM_ASSERT(pe_rsc_is_clone(rsc) && (orig_rsc != NULL)
+ && (utilization != NULL));
+
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_unassigned)) {
return;
}
// Look for any child already existing in the list
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
- child = (pe_resource_t *) iter->data;
+ child = (pcmk_resource_t *) iter->data;
if (g_list_find(all_rscs, child)) {
existing = true; // Keep checking remaining children
} else {
@@ -614,7 +679,7 @@ pcmk__clone_add_utilization(const pe_resource_t *rsc,
for (GList *member_iter = child->children; member_iter != NULL;
member_iter = member_iter->next) {
- pe_resource_t *member = (pe_resource_t *) member_iter->data;
+ pcmk_resource_t *member = (pcmk_resource_t *) member_iter->data;
if (g_list_find(all_rscs, member) != NULL) {
// Add *child's* utilization, not group member's
@@ -629,15 +694,16 @@ pcmk__clone_add_utilization(const pe_resource_t *rsc,
if (!existing && (rsc->children != NULL)) {
// If nothing was found, still add first child's utilization
- child = (pe_resource_t *) rsc->children->data;
+ child = (pcmk_resource_t *) rsc->children->data;
child->cmds->add_utilization(child, orig_rsc, all_rscs, utilization);
}
}
-// Clone implementation of resource_alloc_functions_t:shutdown_lock()
+// Clone implementation of pcmk_assignment_methods_t:shutdown_lock()
void
-pcmk__clone_shutdown_lock(pe_resource_t *rsc)
+pcmk__clone_shutdown_lock(pcmk_resource_t *rsc)
{
+ CRM_ASSERT(pe_rsc_is_clone(rsc));
return; // Clones currently don't support shutdown locks
}
diff --git a/lib/pacemaker/pcmk_sched_colocation.c b/lib/pacemaker/pcmk_sched_colocation.c
index eeef4f1..733d70a 100644
--- a/lib/pacemaker/pcmk_sched_colocation.c
+++ b/lib/pacemaker/pcmk_sched_colocation.c
@@ -13,6 +13,7 @@
#include <glib.h>
#include <crm/crm.h>
+#include <crm/common/scheduler_internal.h>
#include <crm/pengine/status.h>
#include <pacemaker-internal.h>
@@ -21,46 +22,69 @@
#include "crm/msg_xml.h"
#include "libpacemaker_private.h"
-#define EXPAND_CONSTRAINT_IDREF(__set, __rsc, __name) do { \
- __rsc = pcmk__find_constraint_resource(data_set->resources, __name); \
- if (__rsc == NULL) { \
- pcmk__config_err("%s: No resource found for %s", __set, __name); \
- return; \
- } \
- } while(0)
-
// Used to temporarily mark a node as unusable
#define INFINITY_HACK (INFINITY * -100)
+/*!
+ * \internal
+ * \brief Compare two colocations according to priority
+ *
+ * Compare two colocations according to the order in which they should be
+ * considered, based on either their dependent resources or their primary
+ * resources -- preferring (in order):
+ * * Colocation that is not \c NULL
+ * * Colocation whose resource has higher priority
+ * * Colocation whose resource is of a higher-level variant
+ * (bundle > clone > group > primitive)
+ * * Colocation whose resource is promotable, if both are clones
+ * * Colocation whose resource has lower ID in lexicographic order
+ *
+ * \param[in] colocation1 First colocation to compare
+ * \param[in] colocation2 Second colocation to compare
+ * \param[in] dependent If \c true, compare colocations by dependent
+ * priority; otherwise compare them by primary priority
+ *
+ * \return A negative number if \p colocation1 should be considered first,
+ * a positive number if \p colocation2 should be considered first,
+ * or 0 if order doesn't matter
+ */
static gint
-cmp_dependent_priority(gconstpointer a, gconstpointer b)
+cmp_colocation_priority(const pcmk__colocation_t *colocation1,
+ const pcmk__colocation_t *colocation2, bool dependent)
{
- const pcmk__colocation_t *rsc_constraint1 = (const pcmk__colocation_t *) a;
- const pcmk__colocation_t *rsc_constraint2 = (const pcmk__colocation_t *) b;
+ const pcmk_resource_t *rsc1 = NULL;
+ const pcmk_resource_t *rsc2 = NULL;
- if (a == NULL) {
+ if (colocation1 == NULL) {
return 1;
}
- if (b == NULL) {
+ if (colocation2 == NULL) {
return -1;
}
- CRM_ASSERT(rsc_constraint1->dependent != NULL);
- CRM_ASSERT(rsc_constraint1->primary != NULL);
+ if (dependent) {
+ rsc1 = colocation1->dependent;
+ rsc2 = colocation2->dependent;
+ CRM_ASSERT(colocation1->primary != NULL);
+ } else {
+ rsc1 = colocation1->primary;
+ rsc2 = colocation2->primary;
+ CRM_ASSERT(colocation1->dependent != NULL);
+ }
+ CRM_ASSERT((rsc1 != NULL) && (rsc2 != NULL));
- if (rsc_constraint1->dependent->priority > rsc_constraint2->dependent->priority) {
+ if (rsc1->priority > rsc2->priority) {
return -1;
}
-
- if (rsc_constraint1->dependent->priority < rsc_constraint2->dependent->priority) {
+ if (rsc1->priority < rsc2->priority) {
return 1;
}
- /* Process clones before primitives and groups */
- if (rsc_constraint1->dependent->variant > rsc_constraint2->dependent->variant) {
+ // Process clones before primitives and groups
+ if (rsc1->variant > rsc2->variant) {
return -1;
}
- if (rsc_constraint1->dependent->variant < rsc_constraint2->dependent->variant) {
+ if (rsc1->variant < rsc2->variant) {
return 1;
}
@@ -68,66 +92,70 @@ cmp_dependent_priority(gconstpointer a, gconstpointer b)
* clones (probably unnecessary, but avoids having to update regression
* tests)
*/
- if (rsc_constraint1->dependent->variant == pe_clone) {
- if (pcmk_is_set(rsc_constraint1->dependent->flags, pe_rsc_promotable)
- && !pcmk_is_set(rsc_constraint2->dependent->flags, pe_rsc_promotable)) {
+ if (rsc1->variant == pcmk_rsc_variant_clone) {
+ if (pcmk_is_set(rsc1->flags, pcmk_rsc_promotable)
+ && !pcmk_is_set(rsc2->flags, pcmk_rsc_promotable)) {
return -1;
- } else if (!pcmk_is_set(rsc_constraint1->dependent->flags, pe_rsc_promotable)
- && pcmk_is_set(rsc_constraint2->dependent->flags, pe_rsc_promotable)) {
+ }
+ if (!pcmk_is_set(rsc1->flags, pcmk_rsc_promotable)
+ && pcmk_is_set(rsc2->flags, pcmk_rsc_promotable)) {
return 1;
}
}
- return strcmp(rsc_constraint1->dependent->id,
- rsc_constraint2->dependent->id);
+ return strcmp(rsc1->id, rsc2->id);
}
+/*!
+ * \internal
+ * \brief Compare two colocations according to priority based on dependents
+ *
+ * Compare two colocations according to the order in which they should be
+ * considered, based on their dependent resources -- preferring (in order):
+ * * Colocation that is not \c NULL
+ * * Colocation whose resource has higher priority
+ * * Colocation whose resource is of a higher-level variant
+ * (bundle > clone > group > primitive)
+ * * Colocation whose resource is promotable, if both are clones
+ * * Colocation whose resource has lower ID in lexicographic order
+ *
+ * \param[in] a First colocation to compare
+ * \param[in] b Second colocation to compare
+ *
+ * \return A negative number if \p a should be considered first,
+ * a positive number if \p b should be considered first,
+ * or 0 if order doesn't matter
+ */
static gint
-cmp_primary_priority(gconstpointer a, gconstpointer b)
+cmp_dependent_priority(gconstpointer a, gconstpointer b)
{
- const pcmk__colocation_t *rsc_constraint1 = (const pcmk__colocation_t *) a;
- const pcmk__colocation_t *rsc_constraint2 = (const pcmk__colocation_t *) b;
-
- if (a == NULL) {
- return 1;
- }
- if (b == NULL) {
- return -1;
- }
-
- CRM_ASSERT(rsc_constraint1->dependent != NULL);
- CRM_ASSERT(rsc_constraint1->primary != NULL);
-
- if (rsc_constraint1->primary->priority > rsc_constraint2->primary->priority) {
- return -1;
- }
-
- if (rsc_constraint1->primary->priority < rsc_constraint2->primary->priority) {
- return 1;
- }
-
- /* Process clones before primitives and groups */
- if (rsc_constraint1->primary->variant > rsc_constraint2->primary->variant) {
- return -1;
- } else if (rsc_constraint1->primary->variant < rsc_constraint2->primary->variant) {
- return 1;
- }
-
- /* @COMPAT scheduler <2.0.0: Process promotable clones before nonpromotable
- * clones (probably unnecessary, but avoids having to update regression
- * tests)
- */
- if (rsc_constraint1->primary->variant == pe_clone) {
- if (pcmk_is_set(rsc_constraint1->primary->flags, pe_rsc_promotable)
- && !pcmk_is_set(rsc_constraint2->primary->flags, pe_rsc_promotable)) {
- return -1;
- } else if (!pcmk_is_set(rsc_constraint1->primary->flags, pe_rsc_promotable)
- && pcmk_is_set(rsc_constraint2->primary->flags, pe_rsc_promotable)) {
- return 1;
- }
- }
+ return cmp_colocation_priority(a, b, true);
+}
- return strcmp(rsc_constraint1->primary->id, rsc_constraint2->primary->id);
+/*!
+ * \internal
+ * \brief Compare two colocations according to priority based on primaries
+ *
+ * Compare two colocations according to the order in which they should be
+ * considered, based on their primary resources -- preferring (in order):
+ * * Colocation that is not \c NULL
+ * * Colocation whose primary has higher priority
+ * * Colocation whose primary is of a higher-level variant
+ * (bundle > clone > group > primitive)
+ * * Colocation whose primary is promotable, if both are clones
+ * * Colocation whose primary has lower ID in lexicographic order
+ *
+ * \param[in] a First colocation to compare
+ * \param[in] b Second colocation to compare
+ *
+ * \return A negative number if \p a should be considered first,
+ * a positive number if \p b should be considered first,
+ * or 0 if order doesn't matter
+ */
+static gint
+cmp_primary_priority(gconstpointer a, gconstpointer b)
+{
+ return cmp_colocation_priority(a, b, false);
}
/*!
@@ -136,21 +164,23 @@ cmp_primary_priority(gconstpointer a, gconstpointer b)
*
* \param[in,out] list List of constraints to add \p colocation to
* \param[in] colocation Colocation constraint to add to \p list
+ * \param[in] rsc Resource whose colocations we're getting (for
+ * logging only)
*
* \note The list will be sorted using cmp_primary_priority().
*/
void
-pcmk__add_this_with(GList **list, const pcmk__colocation_t *colocation)
+pcmk__add_this_with(GList **list, const pcmk__colocation_t *colocation,
+ const pcmk_resource_t *rsc)
{
- CRM_ASSERT((list != NULL) && (colocation != NULL));
-
- crm_trace("Adding colocation %s (%s with %s%s%s @%d) "
- "to 'this with' list",
- colocation->id, colocation->dependent->id,
- colocation->primary->id,
- (colocation->node_attribute == NULL)? "" : " using ",
- pcmk__s(colocation->node_attribute, ""),
- colocation->score);
+ CRM_ASSERT((list != NULL) && (colocation != NULL) && (rsc != NULL));
+
+ pe_rsc_trace(rsc,
+ "Adding colocation %s (%s with %s using %s @%s) to "
+ "'this with' list for %s",
+ colocation->id, colocation->dependent->id,
+ colocation->primary->id, colocation->node_attribute,
+ pcmk_readable_score(colocation->score), rsc->id);
*list = g_list_insert_sorted(*list, (gpointer) colocation,
cmp_primary_priority);
}
@@ -161,23 +191,30 @@ pcmk__add_this_with(GList **list, const pcmk__colocation_t *colocation)
*
* \param[in,out] list List of constraints to add \p addition to
* \param[in] addition List of colocation constraints to add to \p list
+ * \param[in] rsc Resource whose colocations we're getting (for
+ * logging only)
*
* \note The lists must be pre-sorted by cmp_primary_priority().
*/
void
-pcmk__add_this_with_list(GList **list, GList *addition)
+pcmk__add_this_with_list(GList **list, GList *addition,
+ const pcmk_resource_t *rsc)
{
- CRM_CHECK((list != NULL), return);
-
- if (*list == NULL) { // Trivial case for efficiency
- crm_trace("Copying %u 'this with' colocations to new list",
- g_list_length(addition));
- *list = g_list_copy(addition);
- } else {
- while (addition != NULL) {
- pcmk__add_this_with(list, addition->data);
- addition = addition->next;
+ CRM_ASSERT((list != NULL) && (rsc != NULL));
+
+ pcmk__if_tracing(
+ {}, // Always add each colocation individually if tracing
+ {
+ if (*list == NULL) {
+ // Trivial case for efficiency if not tracing
+ *list = g_list_copy(addition);
+ return;
+ }
}
+ );
+
+ for (const GList *iter = addition; iter != NULL; iter = iter->next) {
+ pcmk__add_this_with(list, addition->data, rsc);
}
}
@@ -187,21 +224,23 @@ pcmk__add_this_with_list(GList **list, GList *addition)
*
* \param[in,out] list List of constraints to add \p colocation to
* \param[in] colocation Colocation constraint to add to \p list
+ * \param[in] rsc Resource whose colocations we're getting (for
+ * logging only)
*
* \note The list will be sorted using cmp_dependent_priority().
*/
void
-pcmk__add_with_this(GList **list, const pcmk__colocation_t *colocation)
+pcmk__add_with_this(GList **list, const pcmk__colocation_t *colocation,
+ const pcmk_resource_t *rsc)
{
- CRM_ASSERT((list != NULL) && (colocation != NULL));
-
- crm_trace("Adding colocation %s (%s with %s%s%s @%d) "
- "to 'with this' list",
- colocation->id, colocation->dependent->id,
- colocation->primary->id,
- (colocation->node_attribute == NULL)? "" : " using ",
- pcmk__s(colocation->node_attribute, ""),
- colocation->score);
+ CRM_ASSERT((list != NULL) && (colocation != NULL) && (rsc != NULL));
+
+ pe_rsc_trace(rsc,
+ "Adding colocation %s (%s with %s using %s @%s) to "
+ "'with this' list for %s",
+ colocation->id, colocation->dependent->id,
+ colocation->primary->id, colocation->node_attribute,
+ pcmk_readable_score(colocation->score), rsc->id);
*list = g_list_insert_sorted(*list, (gpointer) colocation,
cmp_dependent_priority);
}
@@ -212,23 +251,30 @@ pcmk__add_with_this(GList **list, const pcmk__colocation_t *colocation)
*
* \param[in,out] list List of constraints to add \p addition to
* \param[in] addition List of colocation constraints to add to \p list
+ * \param[in] rsc Resource whose colocations we're getting (for
+ * logging only)
*
* \note The lists must be pre-sorted by cmp_dependent_priority().
*/
void
-pcmk__add_with_this_list(GList **list, GList *addition)
+pcmk__add_with_this_list(GList **list, GList *addition,
+ const pcmk_resource_t *rsc)
{
- CRM_CHECK((list != NULL), return);
-
- if (*list == NULL) { // Trivial case for efficiency
- crm_trace("Copying %u 'with this' colocations to new list",
- g_list_length(addition));
- *list = g_list_copy(addition);
- } else {
- while (addition != NULL) {
- pcmk__add_with_this(list, addition->data);
- addition = addition->next;
+ CRM_ASSERT((list != NULL) && (rsc != NULL));
+
+ pcmk__if_tracing(
+ {}, // Always add each colocation individually if tracing
+ {
+ if (*list == NULL) {
+ // Trivial case for efficiency if not tracing
+ *list = g_list_copy(addition);
+ return;
+ }
}
+ );
+
+ for (const GList *iter = addition; iter != NULL; iter = iter->next) {
+ pcmk__add_with_this(list, addition->data, rsc);
}
}
@@ -242,33 +288,33 @@ pcmk__add_with_this_list(GList **list, GList *addition)
* \param[in] then_role Anti-colocation role of \p then_rsc
*/
static void
-anti_colocation_order(pe_resource_t *first_rsc, int first_role,
- pe_resource_t *then_rsc, int then_role)
+anti_colocation_order(pcmk_resource_t *first_rsc, int first_role,
+ pcmk_resource_t *then_rsc, int then_role)
{
const char *first_tasks[] = { NULL, NULL };
const char *then_tasks[] = { NULL, NULL };
/* Actions to make first_rsc lose first_role */
- if (first_role == RSC_ROLE_PROMOTED) {
- first_tasks[0] = CRMD_ACTION_DEMOTE;
+ if (first_role == pcmk_role_promoted) {
+ first_tasks[0] = PCMK_ACTION_DEMOTE;
} else {
- first_tasks[0] = CRMD_ACTION_STOP;
+ first_tasks[0] = PCMK_ACTION_STOP;
- if (first_role == RSC_ROLE_UNPROMOTED) {
- first_tasks[1] = CRMD_ACTION_PROMOTE;
+ if (first_role == pcmk_role_unpromoted) {
+ first_tasks[1] = PCMK_ACTION_PROMOTE;
}
}
/* Actions to make then_rsc gain then_role */
- if (then_role == RSC_ROLE_PROMOTED) {
- then_tasks[0] = CRMD_ACTION_PROMOTE;
+ if (then_role == pcmk_role_promoted) {
+ then_tasks[0] = PCMK_ACTION_PROMOTE;
} else {
- then_tasks[0] = CRMD_ACTION_START;
+ then_tasks[0] = PCMK_ACTION_START;
- if (then_role == RSC_ROLE_UNPROMOTED) {
- then_tasks[1] = CRMD_ACTION_DEMOTE;
+ if (then_role == pcmk_role_unpromoted) {
+ then_tasks[1] = PCMK_ACTION_DEMOTE;
}
}
@@ -280,14 +326,14 @@ anti_colocation_order(pe_resource_t *first_rsc, int first_role,
pcmk__order_resource_actions(first_rsc, first_tasks[first_lpc],
then_rsc, then_tasks[then_lpc],
- pe_order_anti_colocation);
+ pcmk__ar_if_required_on_same_node);
}
}
}
/*!
* \internal
- * \brief Add a new colocation constraint to a cluster working set
+ * \brief Add a new colocation constraint to scheduler data
*
* \param[in] id XML ID for this constraint
* \param[in] node_attr Colocate by this attribute (NULL for #uname)
@@ -296,40 +342,42 @@ anti_colocation_order(pe_resource_t *first_rsc, int first_role,
* \param[in,out] primary Resource to colocate \p dependent with
* \param[in] dependent_role Current role of \p dependent
* \param[in] primary_role Current role of \p primary
- * \param[in] influence Whether colocation constraint has influence
- * \param[in,out] data_set Cluster working set to add constraint to
+ * \param[in] flags Group of enum pcmk__coloc_flags
*/
void
pcmk__new_colocation(const char *id, const char *node_attr, int score,
- pe_resource_t *dependent, pe_resource_t *primary,
+ pcmk_resource_t *dependent, pcmk_resource_t *primary,
const char *dependent_role, const char *primary_role,
- bool influence, pe_working_set_t *data_set)
+ uint32_t flags)
{
pcmk__colocation_t *new_con = NULL;
- if (score == 0) {
- crm_trace("Ignoring colocation '%s' because score is 0", id);
- return;
- }
+ CRM_CHECK(id != NULL, return);
+
if ((dependent == NULL) || (primary == NULL)) {
pcmk__config_err("Ignoring colocation '%s' because resource "
"does not exist", id);
return;
}
- new_con = calloc(1, sizeof(pcmk__colocation_t));
- if (new_con == NULL) {
+ if (score == 0) {
+ pe_rsc_trace(dependent,
+ "Ignoring colocation '%s' (%s with %s) because score is 0",
+ id, dependent->id, primary->id);
return;
}
- if (pcmk__str_eq(dependent_role, RSC_ROLE_STARTED_S,
+ new_con = calloc(1, sizeof(pcmk__colocation_t));
+ CRM_ASSERT(new_con != NULL);
+
+ if (pcmk__str_eq(dependent_role, PCMK__ROLE_STARTED,
pcmk__str_null_matches|pcmk__str_casei)) {
- dependent_role = RSC_ROLE_UNKNOWN_S;
+ dependent_role = PCMK__ROLE_UNKNOWN;
}
- if (pcmk__str_eq(primary_role, RSC_ROLE_STARTED_S,
+ if (pcmk__str_eq(primary_role, PCMK__ROLE_STARTED,
pcmk__str_null_matches|pcmk__str_casei)) {
- primary_role = RSC_ROLE_UNKNOWN_S;
+ primary_role = PCMK__ROLE_UNKNOWN;
}
new_con->id = id;
@@ -338,21 +386,14 @@ pcmk__new_colocation(const char *id, const char *node_attr, int score,
new_con->score = score;
new_con->dependent_role = text2role(dependent_role);
new_con->primary_role = text2role(primary_role);
- new_con->node_attribute = node_attr;
- new_con->influence = influence;
-
- if (node_attr == NULL) {
- node_attr = CRM_ATTR_UNAME;
- }
-
- pe_rsc_trace(dependent, "%s ==> %s (%s %d)",
- dependent->id, primary->id, node_attr, score);
+ new_con->node_attribute = pcmk__s(node_attr, CRM_ATTR_UNAME);
+ new_con->flags = flags;
- pcmk__add_this_with(&(dependent->rsc_cons), new_con);
- pcmk__add_with_this(&(primary->rsc_cons_lhs), new_con);
+ pcmk__add_this_with(&(dependent->rsc_cons), new_con, dependent);
+ pcmk__add_with_this(&(primary->rsc_cons_lhs), new_con, primary);
- data_set->colocation_constraints = g_list_append(data_set->colocation_constraints,
- new_con);
+ dependent->cluster->colocation_constraints = g_list_prepend(
+ dependent->cluster->colocation_constraints, new_con);
if (score <= -INFINITY) {
anti_colocation_order(dependent, new_con->dependent_role, primary,
@@ -370,11 +411,12 @@ pcmk__new_colocation(const char *id, const char *node_attr, int score,
* \param[in] rsc Resource involved in constraint (for default)
* \param[in] influence_s String value of influence option
*
- * \return true if string evaluates true, false if string evaluates false,
- * or value of resource's critical option if string is NULL or invalid
+ * \return pcmk__coloc_influence if string evaluates true, or string is NULL or
+ * invalid and resource's critical option evaluates true, otherwise
+ * pcmk__coloc_none
*/
-static bool
-unpack_influence(const char *coloc_id, const pe_resource_t *rsc,
+static uint32_t
+unpack_influence(const char *coloc_id, const pcmk_resource_t *rsc,
const char *influence_s)
{
if (influence_s != NULL) {
@@ -385,25 +427,29 @@ unpack_influence(const char *coloc_id, const pe_resource_t *rsc,
XML_COLOC_ATTR_INFLUENCE " (using default)",
coloc_id);
} else {
- return (influence_i != 0);
+ return (influence_i == 0)? pcmk__coloc_none : pcmk__coloc_influence;
}
}
- return pcmk_is_set(rsc->flags, pe_rsc_critical);
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_critical)) {
+ return pcmk__coloc_influence;
+ }
+ return pcmk__coloc_none;
}
static void
unpack_colocation_set(xmlNode *set, int score, const char *coloc_id,
- const char *influence_s, pe_working_set_t *data_set)
+ const char *influence_s, pcmk_scheduler_t *scheduler)
{
xmlNode *xml_rsc = NULL;
- pe_resource_t *with = NULL;
- pe_resource_t *resource = NULL;
+ pcmk_resource_t *other = NULL;
+ pcmk_resource_t *resource = NULL;
const char *set_id = ID(set);
const char *role = crm_element_value(set, "role");
- const char *ordering = crm_element_value(set, "ordering");
+ bool with_previous = false;
int local_score = score;
bool sequential = false;
-
+ uint32_t flags = pcmk__coloc_none;
+ const char *xml_rsc_id = NULL;
const char *score_s = crm_element_value(set, XML_RULE_ATTR_SCORE);
if (score_s) {
@@ -415,46 +461,53 @@ unpack_colocation_set(xmlNode *set, int score, const char *coloc_id,
return;
}
- if (ordering == NULL) {
- ordering = "group";
+ /* @COMPAT The deprecated "ordering" attribute specifies whether resources
+ * in a positive-score set are colocated with the previous or next resource.
+ */
+ if (pcmk__str_eq(crm_element_value(set, "ordering"), "group",
+ pcmk__str_null_matches|pcmk__str_casei)) {
+ with_previous = true;
+ } else {
+ pe_warn_once(pcmk__wo_set_ordering,
+ "Support for 'ordering' other than 'group' in "
+ XML_CONS_TAG_RSC_SET " (such as %s) is deprecated and "
+ "will be removed in a future release", set_id);
}
- if (pcmk__xe_get_bool_attr(set, "sequential", &sequential) == pcmk_rc_ok && !sequential) {
+ if ((pcmk__xe_get_bool_attr(set, "sequential", &sequential) == pcmk_rc_ok)
+ && !sequential) {
return;
+ }
- } else if ((local_score > 0)
- && pcmk__str_eq(ordering, "group", pcmk__str_casei)) {
+ if (local_score > 0) {
for (xml_rsc = first_named_child(set, XML_TAG_RESOURCE_REF);
xml_rsc != NULL; xml_rsc = crm_next_same_xml(xml_rsc)) {
- EXPAND_CONSTRAINT_IDREF(set_id, resource, ID(xml_rsc));
- if (with != NULL) {
- pe_rsc_trace(resource, "Colocating %s with %s", resource->id, with->id);
- pcmk__new_colocation(set_id, NULL, local_score, resource,
- with, role, role,
- unpack_influence(coloc_id, resource,
- influence_s), data_set);
+ xml_rsc_id = ID(xml_rsc);
+ resource = pcmk__find_constraint_resource(scheduler->resources,
+ xml_rsc_id);
+ if (resource == NULL) {
+ // Should be possible only with validation disabled
+ pcmk__config_err("Ignoring %s and later resources in set %s: "
+ "No such resource", xml_rsc_id, set_id);
+ return;
}
- with = resource;
- }
-
- } else if (local_score > 0) {
- pe_resource_t *last = NULL;
-
- for (xml_rsc = first_named_child(set, XML_TAG_RESOURCE_REF);
- xml_rsc != NULL; xml_rsc = crm_next_same_xml(xml_rsc)) {
-
- EXPAND_CONSTRAINT_IDREF(set_id, resource, ID(xml_rsc));
- if (last != NULL) {
- pe_rsc_trace(resource, "Colocating %s with %s",
- last->id, resource->id);
- pcmk__new_colocation(set_id, NULL, local_score, last,
- resource, role, role,
- unpack_influence(coloc_id, last,
- influence_s), data_set);
+ if (other != NULL) {
+ flags = pcmk__coloc_explicit
+ | unpack_influence(coloc_id, resource, influence_s);
+ if (with_previous) {
+ pe_rsc_trace(resource, "Colocating %s with %s in set %s",
+ resource->id, other->id, set_id);
+ pcmk__new_colocation(set_id, NULL, local_score, resource,
+ other, role, role, flags);
+ } else {
+ pe_rsc_trace(resource, "Colocating %s with %s in set %s",
+ other->id, resource->id, set_id);
+ pcmk__new_colocation(set_id, NULL, local_score, other,
+ resource, role, role, flags);
+ }
}
-
- last = resource;
+ other = resource;
}
} else {
@@ -467,117 +520,187 @@ unpack_colocation_set(xmlNode *set, int score, const char *coloc_id,
xml_rsc != NULL; xml_rsc = crm_next_same_xml(xml_rsc)) {
xmlNode *xml_rsc_with = NULL;
- bool influence = true;
-
- EXPAND_CONSTRAINT_IDREF(set_id, resource, ID(xml_rsc));
- influence = unpack_influence(coloc_id, resource, influence_s);
+ xml_rsc_id = ID(xml_rsc);
+ resource = pcmk__find_constraint_resource(scheduler->resources,
+ xml_rsc_id);
+ if (resource == NULL) {
+ // Should be possible only with validation disabled
+ pcmk__config_err("Ignoring %s and later resources in set %s: "
+ "No such resource", xml_rsc_id, set_id);
+ return;
+ }
+ flags = pcmk__coloc_explicit
+ | unpack_influence(coloc_id, resource, influence_s);
for (xml_rsc_with = first_named_child(set, XML_TAG_RESOURCE_REF);
xml_rsc_with != NULL;
xml_rsc_with = crm_next_same_xml(xml_rsc_with)) {
- if (pcmk__str_eq(resource->id, ID(xml_rsc_with),
- pcmk__str_casei)) {
+ xml_rsc_id = ID(xml_rsc_with);
+ if (pcmk__str_eq(resource->id, xml_rsc_id, pcmk__str_none)) {
break;
}
- EXPAND_CONSTRAINT_IDREF(set_id, with, ID(xml_rsc_with));
- pe_rsc_trace(resource, "Anti-Colocating %s with %s", resource->id,
- with->id);
+ other = pcmk__find_constraint_resource(scheduler->resources,
+ xml_rsc_id);
+ CRM_ASSERT(other != NULL); // We already processed it
pcmk__new_colocation(set_id, NULL, local_score,
- resource, with, role, role,
- influence, data_set);
+ resource, other, role, role, flags);
}
}
}
}
+/*!
+ * \internal
+ * \brief Colocate two resource sets relative to each other
+ *
+ * \param[in] id Colocation XML ID
+ * \param[in] set1 Dependent set
+ * \param[in] set2 Primary set
+ * \param[in] score Colocation score
+ * \param[in] influence_s Value of colocation's "influence" attribute
+ * \param[in,out] scheduler Scheduler data
+ */
static void
-colocate_rsc_sets(const char *id, xmlNode *set1, xmlNode *set2, int score,
- const char *influence_s, pe_working_set_t *data_set)
+colocate_rsc_sets(const char *id, const xmlNode *set1, const xmlNode *set2,
+ int score, const char *influence_s,
+ pcmk_scheduler_t *scheduler)
{
xmlNode *xml_rsc = NULL;
- pe_resource_t *rsc_1 = NULL;
- pe_resource_t *rsc_2 = NULL;
+ pcmk_resource_t *rsc_1 = NULL;
+ pcmk_resource_t *rsc_2 = NULL;
+ const char *xml_rsc_id = NULL;
const char *role_1 = crm_element_value(set1, "role");
const char *role_2 = crm_element_value(set2, "role");
int rc = pcmk_rc_ok;
bool sequential = false;
+ uint32_t flags = pcmk__coloc_none;
if (score == 0) {
- crm_trace("Ignoring colocation '%s' between sets because score is 0",
- id);
+ crm_trace("Ignoring colocation '%s' between sets %s and %s "
+ "because score is 0", id, ID(set1), ID(set2));
return;
}
rc = pcmk__xe_get_bool_attr(set1, "sequential", &sequential);
- if (rc != pcmk_rc_ok || sequential) {
+ if ((rc != pcmk_rc_ok) || sequential) {
// Get the first one
xml_rsc = first_named_child(set1, XML_TAG_RESOURCE_REF);
if (xml_rsc != NULL) {
- EXPAND_CONSTRAINT_IDREF(id, rsc_1, ID(xml_rsc));
+ xml_rsc_id = ID(xml_rsc);
+ rsc_1 = pcmk__find_constraint_resource(scheduler->resources,
+ xml_rsc_id);
+ if (rsc_1 == NULL) {
+ // Should be possible only with validation disabled
+ pcmk__config_err("Ignoring colocation of set %s with set %s "
+ "because first resource %s not found",
+ ID(set1), ID(set2), xml_rsc_id);
+ return;
+ }
}
}
rc = pcmk__xe_get_bool_attr(set2, "sequential", &sequential);
- if (rc != pcmk_rc_ok || sequential) {
+ if ((rc != pcmk_rc_ok) || sequential) {
// Get the last one
- const char *rid = NULL;
-
for (xml_rsc = first_named_child(set2, XML_TAG_RESOURCE_REF);
xml_rsc != NULL; xml_rsc = crm_next_same_xml(xml_rsc)) {
- rid = ID(xml_rsc);
+ xml_rsc_id = ID(xml_rsc);
+ }
+ rsc_2 = pcmk__find_constraint_resource(scheduler->resources,
+ xml_rsc_id);
+ if (rsc_2 == NULL) {
+ // Should be possible only with validation disabled
+ pcmk__config_err("Ignoring colocation of set %s with set %s "
+ "because last resource %s not found",
+ ID(set1), ID(set2), xml_rsc_id);
+ return;
}
- EXPAND_CONSTRAINT_IDREF(id, rsc_2, rid);
}
- if ((rsc_1 != NULL) && (rsc_2 != NULL)) {
+ if ((rsc_1 != NULL) && (rsc_2 != NULL)) { // Both sets are sequential
+ flags = pcmk__coloc_explicit | unpack_influence(id, rsc_1, influence_s);
pcmk__new_colocation(id, NULL, score, rsc_1, rsc_2, role_1, role_2,
- unpack_influence(id, rsc_1, influence_s),
- data_set);
-
- } else if (rsc_1 != NULL) {
- bool influence = unpack_influence(id, rsc_1, influence_s);
+ flags);
+ } else if (rsc_1 != NULL) { // Only set1 is sequential
+ flags = pcmk__coloc_explicit | unpack_influence(id, rsc_1, influence_s);
for (xml_rsc = first_named_child(set2, XML_TAG_RESOURCE_REF);
xml_rsc != NULL; xml_rsc = crm_next_same_xml(xml_rsc)) {
- EXPAND_CONSTRAINT_IDREF(id, rsc_2, ID(xml_rsc));
+ xml_rsc_id = ID(xml_rsc);
+ rsc_2 = pcmk__find_constraint_resource(scheduler->resources,
+ xml_rsc_id);
+ if (rsc_2 == NULL) {
+ // Should be possible only with validation disabled
+ pcmk__config_err("Ignoring set %s colocation with resource %s "
+ "in set %s: No such resource",
+ ID(set1), xml_rsc_id, ID(set2));
+ continue;
+ }
pcmk__new_colocation(id, NULL, score, rsc_1, rsc_2, role_1,
- role_2, influence, data_set);
+ role_2, flags);
}
- } else if (rsc_2 != NULL) {
+ } else if (rsc_2 != NULL) { // Only set2 is sequential
for (xml_rsc = first_named_child(set1, XML_TAG_RESOURCE_REF);
xml_rsc != NULL; xml_rsc = crm_next_same_xml(xml_rsc)) {
- EXPAND_CONSTRAINT_IDREF(id, rsc_1, ID(xml_rsc));
+ xml_rsc_id = ID(xml_rsc);
+ rsc_1 = pcmk__find_constraint_resource(scheduler->resources,
+ xml_rsc_id);
+ if (rsc_1 == NULL) {
+ // Should be possible only with validation disabled
+ pcmk__config_err("Ignoring colocation of set %s resource %s "
+ "with set %s: No such resource",
+ ID(set1), xml_rsc_id, ID(set2));
+ continue;
+ }
+ flags = pcmk__coloc_explicit
+ | unpack_influence(id, rsc_1, influence_s);
pcmk__new_colocation(id, NULL, score, rsc_1, rsc_2, role_1,
- role_2,
- unpack_influence(id, rsc_1, influence_s),
- data_set);
+ role_2, flags);
}
- } else {
+ } else { // Neither set is sequential
for (xml_rsc = first_named_child(set1, XML_TAG_RESOURCE_REF);
xml_rsc != NULL; xml_rsc = crm_next_same_xml(xml_rsc)) {
xmlNode *xml_rsc_2 = NULL;
- bool influence = true;
- EXPAND_CONSTRAINT_IDREF(id, rsc_1, ID(xml_rsc));
- influence = unpack_influence(id, rsc_1, influence_s);
+ xml_rsc_id = ID(xml_rsc);
+ rsc_1 = pcmk__find_constraint_resource(scheduler->resources,
+ xml_rsc_id);
+ if (rsc_1 == NULL) {
+ // Should be possible only with validation disabled
+ pcmk__config_err("Ignoring colocation of set %s resource %s "
+ "with set %s: No such resource",
+ ID(set1), xml_rsc_id, ID(set2));
+ continue;
+ }
+ flags = pcmk__coloc_explicit
+ | unpack_influence(id, rsc_1, influence_s);
for (xml_rsc_2 = first_named_child(set2, XML_TAG_RESOURCE_REF);
xml_rsc_2 != NULL;
xml_rsc_2 = crm_next_same_xml(xml_rsc_2)) {
- EXPAND_CONSTRAINT_IDREF(id, rsc_2, ID(xml_rsc_2));
+ xml_rsc_id = ID(xml_rsc_2);
+ rsc_2 = pcmk__find_constraint_resource(scheduler->resources,
+ xml_rsc_id);
+ if (rsc_2 == NULL) {
+ // Should be possible only with validation disabled
+ pcmk__config_err("Ignoring colocation of set %s resource "
+ "%s with set %s resource %s: No such "
+ "resource", ID(set1), ID(xml_rsc),
+ ID(set2), xml_rsc_id);
+ continue;
+ }
pcmk__new_colocation(id, NULL, score, rsc_1, rsc_2,
- role_1, role_2, influence,
- data_set);
+ role_1, role_2, flags);
}
}
}
@@ -585,9 +708,10 @@ colocate_rsc_sets(const char *id, xmlNode *set1, xmlNode *set2, int score,
static void
unpack_simple_colocation(xmlNode *xml_obj, const char *id,
- const char *influence_s, pe_working_set_t *data_set)
+ const char *influence_s, pcmk_scheduler_t *scheduler)
{
int score_i = 0;
+ uint32_t flags = pcmk__coloc_none;
const char *score = crm_element_value(xml_obj, XML_RULE_ATTR_SCORE);
const char *dependent_id = crm_element_value(xml_obj,
@@ -599,26 +723,27 @@ unpack_simple_colocation(xmlNode *xml_obj, const char *id,
XML_COLOC_ATTR_TARGET_ROLE);
const char *attr = crm_element_value(xml_obj, XML_COLOC_ATTR_NODE_ATTR);
- // @COMPAT: Deprecated since 2.1.5
- const char *dependent_instance = crm_element_value(xml_obj,
- XML_COLOC_ATTR_SOURCE_INSTANCE);
- // @COMPAT: Deprecated since 2.1.5
- const char *primary_instance = crm_element_value(xml_obj,
- XML_COLOC_ATTR_TARGET_INSTANCE);
+ const char *primary_instance = NULL;
+ const char *dependent_instance = NULL;
+ pcmk_resource_t *primary = NULL;
+ pcmk_resource_t *dependent = NULL;
- pe_resource_t *dependent = pcmk__find_constraint_resource(data_set->resources,
- dependent_id);
- pe_resource_t *primary = pcmk__find_constraint_resource(data_set->resources,
- primary_id);
+ primary = pcmk__find_constraint_resource(scheduler->resources, primary_id);
+ dependent = pcmk__find_constraint_resource(scheduler->resources,
+ dependent_id);
+ // @COMPAT: Deprecated since 2.1.5
+ primary_instance = crm_element_value(xml_obj,
+ XML_COLOC_ATTR_TARGET_INSTANCE);
+ dependent_instance = crm_element_value(xml_obj,
+ XML_COLOC_ATTR_SOURCE_INSTANCE);
if (dependent_instance != NULL) {
- pe_warn_once(pe_wo_coloc_inst,
+ pe_warn_once(pcmk__wo_coloc_inst,
"Support for " XML_COLOC_ATTR_SOURCE_INSTANCE " is "
"deprecated and will be removed in a future release.");
}
-
if (primary_instance != NULL) {
- pe_warn_once(pe_wo_coloc_inst,
+ pe_warn_once(pcmk__wo_coloc_inst,
"Support for " XML_COLOC_ATTR_TARGET_INSTANCE " is "
"deprecated and will be removed in a future release.");
}
@@ -676,15 +801,15 @@ unpack_simple_colocation(xmlNode *xml_obj, const char *id,
score_i = char2score(score);
}
+ flags = pcmk__coloc_explicit | unpack_influence(id, dependent, influence_s);
pcmk__new_colocation(id, attr, score_i, dependent, primary,
- dependent_role, primary_role,
- unpack_influence(id, dependent, influence_s), data_set);
+ dependent_role, primary_role, flags);
}
// \return Standard Pacemaker return code
static int
unpack_colocation_tags(xmlNode *xml_obj, xmlNode **expanded_xml,
- pe_working_set_t *data_set)
+ pcmk_scheduler_t *scheduler)
{
const char *id = NULL;
const char *dependent_id = NULL;
@@ -692,11 +817,11 @@ unpack_colocation_tags(xmlNode *xml_obj, xmlNode **expanded_xml,
const char *dependent_role = NULL;
const char *primary_role = NULL;
- pe_resource_t *dependent = NULL;
- pe_resource_t *primary = NULL;
+ pcmk_resource_t *dependent = NULL;
+ pcmk_resource_t *primary = NULL;
- pe_tag_t *dependent_tag = NULL;
- pe_tag_t *primary_tag = NULL;
+ pcmk_tag_t *dependent_tag = NULL;
+ pcmk_tag_t *primary_tag = NULL;
xmlNode *dependent_set = NULL;
xmlNode *primary_set = NULL;
@@ -709,12 +834,12 @@ unpack_colocation_tags(xmlNode *xml_obj, xmlNode **expanded_xml,
id = ID(xml_obj);
if (id == NULL) {
pcmk__config_err("Ignoring <%s> constraint without " XML_ATTR_ID,
- crm_element_name(xml_obj));
+ xml_obj->name);
return pcmk_rc_unpack_error;
}
// Check whether there are any resource sets with template or tag references
- *expanded_xml = pcmk__expand_tags_in_sets(xml_obj, data_set);
+ *expanded_xml = pcmk__expand_tags_in_sets(xml_obj, scheduler);
if (*expanded_xml != NULL) {
crm_log_xml_trace(*expanded_xml, "Expanded rsc_colocation");
return pcmk_rc_ok;
@@ -726,14 +851,14 @@ unpack_colocation_tags(xmlNode *xml_obj, xmlNode **expanded_xml,
return pcmk_rc_ok;
}
- if (!pcmk__valid_resource_or_tag(data_set, dependent_id, &dependent,
+ if (!pcmk__valid_resource_or_tag(scheduler, dependent_id, &dependent,
&dependent_tag)) {
pcmk__config_err("Ignoring constraint '%s' because '%s' is not a "
"valid resource or tag", id, dependent_id);
return pcmk_rc_unpack_error;
}
- if (!pcmk__valid_resource_or_tag(data_set, primary_id, &primary,
+ if (!pcmk__valid_resource_or_tag(scheduler, primary_id, &primary,
&primary_tag)) {
pcmk__config_err("Ignoring constraint '%s' because '%s' is not a "
"valid resource or tag", id, primary_id);
@@ -757,9 +882,9 @@ unpack_colocation_tags(xmlNode *xml_obj, xmlNode **expanded_xml,
*expanded_xml = copy_xml(xml_obj);
- // Convert template/tag reference in "rsc" into resource_set under constraint
+ // Convert dependent's template/tag reference into constraint resource_set
if (!pcmk__tag_to_set(*expanded_xml, &dependent_set, XML_COLOC_ATTR_SOURCE,
- true, data_set)) {
+ true, scheduler)) {
free_xml(*expanded_xml);
*expanded_xml = NULL;
return pcmk_rc_unpack_error;
@@ -774,9 +899,9 @@ unpack_colocation_tags(xmlNode *xml_obj, xmlNode **expanded_xml,
any_sets = true;
}
- // Convert template/tag reference in "with-rsc" into resource_set under constraint
+ // Convert primary's template/tag reference into constraint resource_set
if (!pcmk__tag_to_set(*expanded_xml, &primary_set, XML_COLOC_ATTR_TARGET,
- true, data_set)) {
+ true, scheduler)) {
free_xml(*expanded_xml);
*expanded_xml = NULL;
return pcmk_rc_unpack_error;
@@ -803,13 +928,13 @@ unpack_colocation_tags(xmlNode *xml_obj, xmlNode **expanded_xml,
/*!
* \internal
- * \brief Parse a colocation constraint from XML into a cluster working set
+ * \brief Parse a colocation constraint from XML into scheduler data
*
- * \param[in,out] xml_obj Colocation constraint XML to unpack
- * \param[in,out] data_set Cluster working set to add constraint to
+ * \param[in,out] xml_obj Colocation constraint XML to unpack
+ * \param[in,out] scheduler Scheduler data to add constraint to
*/
void
-pcmk__unpack_colocation(xmlNode *xml_obj, pe_working_set_t *data_set)
+pcmk__unpack_colocation(xmlNode *xml_obj, pcmk_scheduler_t *scheduler)
{
int score_i = 0;
xmlNode *set = NULL;
@@ -819,27 +944,34 @@ pcmk__unpack_colocation(xmlNode *xml_obj, pe_working_set_t *data_set)
xmlNode *expanded_xml = NULL;
const char *id = crm_element_value(xml_obj, XML_ATTR_ID);
- const char *score = crm_element_value(xml_obj, XML_RULE_ATTR_SCORE);
- const char *influence_s = crm_element_value(xml_obj,
- XML_COLOC_ATTR_INFLUENCE);
+ const char *score = NULL;
+ const char *influence_s = NULL;
- if (score) {
- score_i = char2score(score);
+ if (pcmk__str_empty(id)) {
+ pcmk__config_err("Ignoring " XML_CONS_TAG_RSC_DEPEND
+ " without " CRM_ATTR_ID);
+ return;
}
if (unpack_colocation_tags(xml_obj, &expanded_xml,
- data_set) != pcmk_rc_ok) {
+ scheduler) != pcmk_rc_ok) {
return;
}
- if (expanded_xml) {
+ if (expanded_xml != NULL) {
orig_xml = xml_obj;
xml_obj = expanded_xml;
}
+ score = crm_element_value(xml_obj, XML_RULE_ATTR_SCORE);
+ if (score != NULL) {
+ score_i = char2score(score);
+ }
+ influence_s = crm_element_value(xml_obj, XML_COLOC_ATTR_INFLUENCE);
+
for (set = first_named_child(xml_obj, XML_CONS_TAG_RSC_SET); set != NULL;
set = crm_next_same_xml(set)) {
- set = expand_idref(set, data_set->input);
+ set = expand_idref(set, scheduler->input);
if (set == NULL) { // Configuration error, message already logged
if (expanded_xml != NULL) {
free_xml(expanded_xml);
@@ -847,10 +979,15 @@ pcmk__unpack_colocation(xmlNode *xml_obj, pe_working_set_t *data_set)
return;
}
- unpack_colocation_set(set, score_i, id, influence_s, data_set);
+ if (pcmk__str_empty(ID(set))) {
+ pcmk__config_err("Ignoring " XML_CONS_TAG_RSC_SET
+ " without " CRM_ATTR_ID);
+ continue;
+ }
+ unpack_colocation_set(set, score_i, id, influence_s, scheduler);
if (last != NULL) {
- colocate_rsc_sets(id, last, set, score_i, influence_s, data_set);
+ colocate_rsc_sets(id, last, set, score_i, influence_s, scheduler);
}
last = set;
}
@@ -861,7 +998,7 @@ pcmk__unpack_colocation(xmlNode *xml_obj, pe_working_set_t *data_set)
}
if (last == NULL) {
- unpack_simple_colocation(xml_obj, id, influence_s, data_set);
+ unpack_simple_colocation(xml_obj, id, influence_s, scheduler);
}
}
@@ -874,27 +1011,28 @@ pcmk__unpack_colocation(xmlNode *xml_obj, pe_working_set_t *data_set)
* \param[in] reason Unrunnable start action causing the block
*/
static void
-mark_action_blocked(pe_resource_t *rsc, const char *task,
- const pe_resource_t *reason)
+mark_action_blocked(pcmk_resource_t *rsc, const char *task,
+ const pcmk_resource_t *reason)
{
+ GList *iter = NULL;
char *reason_text = crm_strdup_printf("colocation with %s", reason->id);
- for (GList *gIter = rsc->actions; gIter != NULL; gIter = gIter->next) {
- pe_action_t *action = (pe_action_t *) gIter->data;
+ for (iter = rsc->actions; iter != NULL; iter = iter->next) {
+ pcmk_action_t *action = iter->data;
- if (pcmk_is_set(action->flags, pe_action_runnable)
- && pcmk__str_eq(action->task, task, pcmk__str_casei)) {
+ if (pcmk_is_set(action->flags, pcmk_action_runnable)
+ && pcmk__str_eq(action->task, task, pcmk__str_none)) {
- pe__clear_action_flags(action, pe_action_runnable);
+ pe__clear_action_flags(action, pcmk_action_runnable);
pe_action_set_reason(action, reason_text, false);
- pcmk__block_colocation_dependents(action, rsc->cluster);
+ pcmk__block_colocation_dependents(action);
pcmk__update_action_for_orderings(action, rsc->cluster);
}
}
// If parent resource can't perform an action, neither can any children
- for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
- mark_action_blocked((pe_resource_t *) (iter->data), task, reason);
+ for (iter = rsc->children; iter != NULL; iter = iter->next) {
+ mark_action_blocked((pcmk_resource_t *) (iter->data), task, reason);
}
free(reason_text);
}
@@ -907,24 +1045,23 @@ mark_action_blocked(pe_resource_t *rsc, const char *task,
* promote actions of resources colocated with it, as appropriate to the
* colocations' configured roles.
*
- * \param[in,out] action Action to check
- * \param[in] data_set Cluster working set (ignored)
+ * \param[in,out] action Action to check
*/
void
-pcmk__block_colocation_dependents(pe_action_t *action,
- pe_working_set_t *data_set)
+pcmk__block_colocation_dependents(pcmk_action_t *action)
{
- GList *gIter = NULL;
+ GList *iter = NULL;
GList *colocations = NULL;
- pe_resource_t *rsc = NULL;
+ pcmk_resource_t *rsc = NULL;
bool is_start = false;
- if (pcmk_is_set(action->flags, pe_action_runnable)) {
+ if (pcmk_is_set(action->flags, pcmk_action_runnable)) {
return; // Only unrunnable actions block dependents
}
- is_start = pcmk__str_eq(action->task, RSC_START, pcmk__str_none);
- if (!is_start && !pcmk__str_eq(action->task, RSC_PROMOTE, pcmk__str_none)) {
+ is_start = pcmk__str_eq(action->task, PCMK_ACTION_START, pcmk__str_none);
+ if (!is_start
+ && !pcmk__str_eq(action->task, PCMK_ACTION_PROMOTE, pcmk__str_none)) {
return; // Only unrunnable starts and promotes block dependents
}
@@ -940,13 +1077,13 @@ pcmk__block_colocation_dependents(pe_action_t *action,
}
// Colocation fails only if entire primary can't reach desired role
- for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child = (pe_resource_t *) gIter->data;
- pe_action_t *child_action = find_first_action(child->actions, NULL,
- action->task, NULL);
+ for (iter = rsc->children; iter != NULL; iter = iter->next) {
+ pcmk_resource_t *child = iter->data;
+ pcmk_action_t *child_action = find_first_action(child->actions, NULL,
+ action->task, NULL);
if ((child_action == NULL)
- || pcmk_is_set(child_action->flags, pe_action_runnable)) {
+ || pcmk_is_set(child_action->flags, pcmk_action_runnable)) {
crm_trace("Not blocking %s colocation dependents because "
"at least %s has runnable %s",
rsc->id, child->id, action->task);
@@ -959,8 +1096,8 @@ pcmk__block_colocation_dependents(pe_action_t *action,
// Check each colocation where this resource is primary
colocations = pcmk__with_this_colocations(rsc);
- for (gIter = colocations; gIter != NULL; gIter = gIter->next) {
- pcmk__colocation_t *colocation = (pcmk__colocation_t *) gIter->data;
+ for (iter = colocations; iter != NULL; iter = iter->next) {
+ pcmk__colocation_t *colocation = iter->data;
if (colocation->score < INFINITY) {
continue; // Only mandatory colocations block dependent
@@ -972,16 +1109,17 @@ pcmk__block_colocation_dependents(pe_action_t *action,
* If the primary can't be promoted, the dependent can't reach its
* colocated role if the primary's colocation role is promoted.
*/
- if (!is_start && (colocation->primary_role != RSC_ROLE_PROMOTED)) {
+ if (!is_start && (colocation->primary_role != pcmk_role_promoted)) {
continue;
}
// Block the dependent from reaching its colocated role
- if (colocation->dependent_role == RSC_ROLE_PROMOTED) {
- mark_action_blocked(colocation->dependent, RSC_PROMOTE,
+ if (colocation->dependent_role == pcmk_role_promoted) {
+ mark_action_blocked(colocation->dependent, PCMK_ACTION_PROMOTE,
action->rsc);
} else {
- mark_action_blocked(colocation->dependent, RSC_START, action->rsc);
+ mark_action_blocked(colocation->dependent, PCMK_ACTION_START,
+ action->rsc);
}
}
g_list_free(colocations);
@@ -989,6 +1127,37 @@ pcmk__block_colocation_dependents(pe_action_t *action,
/*!
* \internal
+ * \brief Get the resource to use for role comparisons
+ *
+ * A bundle replica includes a container and possibly an instance of the bundled
+ * resource. The dependent in a "with bundle" colocation is colocated with a
+ * particular bundle container. However, if the colocation includes a role, then
+ * the role must be checked on the bundled resource instance inside the
+ * container. The container itself will never be promoted; the bundled resource
+ * may be.
+ *
+ * If the given resource is a bundle replica container, return the resource
+ * inside it, if any. Otherwise, return the resource itself.
+ *
+ * \param[in] rsc Resource to check
+ *
+ * \return Resource to use for role comparisons
+ */
+static const pcmk_resource_t *
+get_resource_for_role(const pcmk_resource_t *rsc)
+{
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_replica_container)) {
+ const pcmk_resource_t *child = pe__get_rsc_in_container(rsc);
+
+ if (child != NULL) {
+ return child;
+ }
+ }
+ return rsc;
+}
+
+/*!
+ * \internal
* \brief Determine how a colocation constraint should affect a resource
*
* Colocation constraints have different effects at different points in the
@@ -1001,39 +1170,48 @@ pcmk__block_colocation_dependents(pe_action_t *action,
* \param[in] dependent Dependent resource in colocation
* \param[in] primary Primary resource in colocation
* \param[in] colocation Colocation constraint
- * \param[in] preview If true, pretend resources have already been allocated
+ * \param[in] preview If true, pretend resources have already been assigned
*
* \return How colocation constraint should be applied at this point
*/
enum pcmk__coloc_affects
-pcmk__colocation_affects(const pe_resource_t *dependent,
- const pe_resource_t *primary,
+pcmk__colocation_affects(const pcmk_resource_t *dependent,
+ const pcmk_resource_t *primary,
const pcmk__colocation_t *colocation, bool preview)
{
- if (!preview && pcmk_is_set(primary->flags, pe_rsc_provisional)) {
- // Primary resource has not been allocated yet, so we can't do anything
+ const pcmk_resource_t *dependent_role_rsc = NULL;
+ const pcmk_resource_t *primary_role_rsc = NULL;
+
+ CRM_ASSERT((dependent != NULL) && (primary != NULL)
+ && (colocation != NULL));
+
+ if (!preview && pcmk_is_set(primary->flags, pcmk_rsc_unassigned)) {
+ // Primary resource has not been assigned yet, so we can't do anything
return pcmk__coloc_affects_nothing;
}
- if ((colocation->dependent_role >= RSC_ROLE_UNPROMOTED)
- && (dependent->parent != NULL)
- && pcmk_is_set(dependent->parent->flags, pe_rsc_promotable)
- && !pcmk_is_set(dependent->flags, pe_rsc_provisional)) {
+ dependent_role_rsc = get_resource_for_role(dependent);
+ primary_role_rsc = get_resource_for_role(primary);
+
+ if ((colocation->dependent_role >= pcmk_role_unpromoted)
+ && (dependent_role_rsc->parent != NULL)
+ && pcmk_is_set(dependent_role_rsc->parent->flags, pcmk_rsc_promotable)
+ && !pcmk_is_set(dependent_role_rsc->flags, pcmk_rsc_unassigned)) {
/* This is a colocation by role, and the dependent is a promotable clone
- * that has already been allocated, so the colocation should now affect
+ * that has already been assigned, so the colocation should now affect
* the role.
*/
return pcmk__coloc_affects_role;
}
- if (!preview && !pcmk_is_set(dependent->flags, pe_rsc_provisional)) {
- /* The dependent resource has already been through allocation, so the
+ if (!preview && !pcmk_is_set(dependent->flags, pcmk_rsc_unassigned)) {
+ /* The dependent resource has already been through assignment, so the
* constraint no longer has any effect. Log an error if a mandatory
* colocation constraint has been violated.
*/
- const pe_node_t *primary_node = primary->allocated_to;
+ const pcmk_node_t *primary_node = primary->allocated_to;
if (dependent->allocated_to == NULL) {
crm_trace("Skipping colocation '%s': %s will not run anywhere",
@@ -1042,8 +1220,7 @@ pcmk__colocation_affects(const pe_resource_t *dependent,
} else if (colocation->score >= INFINITY) {
// Dependent resource must colocate with primary resource
- if ((primary_node == NULL) ||
- (primary_node->details != dependent->allocated_to->details)) {
+ if (!pe__same_node(primary_node, dependent->allocated_to)) {
crm_err("%s must be colocated with %s but is not (%s vs. %s)",
dependent->id, primary->id,
pe__node_name(dependent->allocated_to),
@@ -1053,51 +1230,35 @@ pcmk__colocation_affects(const pe_resource_t *dependent,
} else if (colocation->score <= -CRM_SCORE_INFINITY) {
// Dependent resource must anti-colocate with primary resource
- if ((primary_node != NULL) &&
- (dependent->allocated_to->details == primary_node->details)) {
- crm_err("%s and %s must be anti-colocated but are allocated "
+ if (pe__same_node(dependent->allocated_to, primary_node)) {
+ crm_err("%s and %s must be anti-colocated but are assigned "
"to the same node (%s)",
- dependent->id, primary->id, pe__node_name(primary_node));
+ dependent->id, primary->id,
+ pe__node_name(primary_node));
}
}
return pcmk__coloc_affects_nothing;
}
- if ((colocation->score > 0)
- && (colocation->dependent_role != RSC_ROLE_UNKNOWN)
- && (colocation->dependent_role != dependent->next_role)) {
+ if ((colocation->dependent_role != pcmk_role_unknown)
+ && (colocation->dependent_role != dependent_role_rsc->next_role)) {
+ crm_trace("Skipping %scolocation '%s': dependent limited to %s role "
- crm_trace("Skipping colocation '%s': dependent limited to %s role "
"but %s next role is %s",
+ ((colocation->score < 0)? "anti-" : ""),
colocation->id, role2text(colocation->dependent_role),
- dependent->id, role2text(dependent->next_role));
+ dependent_role_rsc->id,
+ role2text(dependent_role_rsc->next_role));
return pcmk__coloc_affects_nothing;
}
- if ((colocation->score > 0)
- && (colocation->primary_role != RSC_ROLE_UNKNOWN)
- && (colocation->primary_role != primary->next_role)) {
-
- crm_trace("Skipping colocation '%s': primary limited to %s role "
+ if ((colocation->primary_role != pcmk_role_unknown)
+ && (colocation->primary_role != primary_role_rsc->next_role)) {
+ crm_trace("Skipping %scolocation '%s': primary limited to %s role "
"but %s next role is %s",
+ ((colocation->score < 0)? "anti-" : ""),
colocation->id, role2text(colocation->primary_role),
- primary->id, role2text(primary->next_role));
- return pcmk__coloc_affects_nothing;
- }
-
- if ((colocation->score < 0)
- && (colocation->dependent_role != RSC_ROLE_UNKNOWN)
- && (colocation->dependent_role == dependent->next_role)) {
- crm_trace("Skipping anti-colocation '%s': dependent role %s matches",
- colocation->id, role2text(colocation->dependent_role));
- return pcmk__coloc_affects_nothing;
- }
-
- if ((colocation->score < 0)
- && (colocation->primary_role != RSC_ROLE_UNKNOWN)
- && (colocation->primary_role == primary->next_role)) {
- crm_trace("Skipping anti-colocation '%s': primary role %s matches",
- colocation->id, role2text(colocation->primary_role));
+ primary_role_rsc->id, role2text(primary_role_rsc->next_role));
return pcmk__coloc_affects_nothing;
}
@@ -1106,32 +1267,29 @@ pcmk__colocation_affects(const pe_resource_t *dependent,
/*!
* \internal
- * \brief Apply colocation to dependent for allocation purposes
+ * \brief Apply colocation to dependent for assignment purposes
*
- * Update the allowed node weights of the dependent resource in a colocation,
- * for the purposes of allocating it to a node
+ * Update the allowed node scores of the dependent resource in a colocation,
+ * for the purposes of assigning it to a node.
*
* \param[in,out] dependent Dependent resource in colocation
* \param[in] primary Primary resource in colocation
* \param[in] colocation Colocation constraint
*/
void
-pcmk__apply_coloc_to_weights(pe_resource_t *dependent,
- const pe_resource_t *primary,
- const pcmk__colocation_t *colocation)
+pcmk__apply_coloc_to_scores(pcmk_resource_t *dependent,
+ const pcmk_resource_t *primary,
+ const pcmk__colocation_t *colocation)
{
- const char *attribute = CRM_ATTR_ID;
+ const char *attr = colocation->node_attribute;
const char *value = NULL;
GHashTable *work = NULL;
GHashTableIter iter;
- pe_node_t *node = NULL;
-
- if (colocation->node_attribute != NULL) {
- attribute = colocation->node_attribute;
- }
+ pcmk_node_t *node = NULL;
if (primary->allocated_to != NULL) {
- value = pe_node_attribute_raw(primary->allocated_to, attribute);
+ value = pcmk__colocation_node_attr(primary->allocated_to, attr,
+ primary);
} else if (colocation->score < 0) {
// Nothing to do (anti-colocation with something that is not running)
@@ -1150,9 +1308,12 @@ pcmk__apply_coloc_to_weights(pe_resource_t *dependent,
colocation->id, dependent->id, pe__node_name(node),
pcmk_readable_score(node->weight),
pcmk_readable_score(colocation->score), primary->id);
+ continue;
+ }
+
+ if (pcmk__str_eq(pcmk__colocation_node_attr(node, attr, dependent),
+ value, pcmk__str_casei)) {
- } else if (pcmk__str_eq(pe_node_attribute_raw(node, attribute), value,
- pcmk__str_casei)) {
/* Add colocation score only if optional (or minus infinity). A
* mandatory colocation is a requirement rather than a preference,
* so we don't need to consider it for relative assignment purposes.
@@ -1169,8 +1330,10 @@ pcmk__apply_coloc_to_weights(pe_resource_t *dependent,
pcmk_readable_score(node->weight),
pcmk_readable_score(colocation->score));
}
+ continue;
+ }
- } else if (colocation->score >= CRM_SCORE_INFINITY) {
+ if (colocation->score >= CRM_SCORE_INFINITY) {
/* Only mandatory colocations are relevant when the colocation
* attribute doesn't match, because an attribute not matching is not
* a negative preference -- the colocation is simply relevant only
@@ -1181,7 +1344,7 @@ pcmk__apply_coloc_to_weights(pe_resource_t *dependent,
"Banned %s from %s because colocation %s attribute %s "
"does not match",
dependent->id, pe__node_name(node), colocation->id,
- attribute);
+ attr);
}
}
@@ -1215,40 +1378,45 @@ pcmk__apply_coloc_to_weights(pe_resource_t *dependent,
* \param[in] colocation Colocation constraint
*/
void
-pcmk__apply_coloc_to_priority(pe_resource_t *dependent,
- const pe_resource_t *primary,
+pcmk__apply_coloc_to_priority(pcmk_resource_t *dependent,
+ const pcmk_resource_t *primary,
const pcmk__colocation_t *colocation)
{
const char *dependent_value = NULL;
const char *primary_value = NULL;
- const char *attribute = CRM_ATTR_ID;
+ const char *attr = colocation->node_attribute;
int score_multiplier = 1;
+ const pcmk_resource_t *primary_role_rsc = NULL;
+
+ CRM_ASSERT((dependent != NULL) && (primary != NULL) &&
+ (colocation != NULL));
+
if ((primary->allocated_to == NULL) || (dependent->allocated_to == NULL)) {
return;
}
- if (colocation->node_attribute != NULL) {
- attribute = colocation->node_attribute;
- }
+ dependent_value = pcmk__colocation_node_attr(dependent->allocated_to, attr,
+ dependent);
+ primary_value = pcmk__colocation_node_attr(primary->allocated_to, attr,
+ primary);
- dependent_value = pe_node_attribute_raw(dependent->allocated_to, attribute);
- primary_value = pe_node_attribute_raw(primary->allocated_to, attribute);
+ primary_role_rsc = get_resource_for_role(primary);
if (!pcmk__str_eq(dependent_value, primary_value, pcmk__str_casei)) {
if ((colocation->score == INFINITY)
- && (colocation->dependent_role == RSC_ROLE_PROMOTED)) {
+ && (colocation->dependent_role == pcmk_role_promoted)) {
dependent->priority = -INFINITY;
}
return;
}
- if ((colocation->primary_role != RSC_ROLE_UNKNOWN)
- && (colocation->primary_role != primary->next_role)) {
+ if ((colocation->primary_role != pcmk_role_unknown)
+ && (colocation->primary_role != primary_role_rsc->next_role)) {
return;
}
- if (colocation->dependent_role == RSC_ROLE_UNPROMOTED) {
+ if (colocation->dependent_role == pcmk_role_unpromoted) {
score_multiplier = -1;
}
@@ -1271,11 +1439,11 @@ pcmk__apply_coloc_to_priority(pe_resource_t *dependent,
* \param[in] value Colocation attribute value to require
*/
static int
-best_node_score_matching_attr(const pe_resource_t *rsc, const char *attr,
+best_node_score_matching_attr(const pcmk_resource_t *rsc, const char *attr,
const char *value)
{
GHashTableIter iter;
- pe_node_t *node = NULL;
+ pcmk_node_t *node = NULL;
int best_score = -INFINITY;
const char *best_node = NULL;
@@ -1283,15 +1451,17 @@ best_node_score_matching_attr(const pe_resource_t *rsc, const char *attr,
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
- if ((node->weight > best_score) && pcmk__node_available(node, false, false)
- && pcmk__str_eq(value, pe_node_attribute_raw(node, attr), pcmk__str_casei)) {
+ if ((node->weight > best_score)
+ && pcmk__node_available(node, false, false)
+ && pcmk__str_eq(value, pcmk__colocation_node_attr(node, attr, rsc),
+ pcmk__str_casei)) {
best_score = node->weight;
best_node = node->details->uname;
}
}
- if (!pcmk__str_eq(attr, CRM_ATTR_UNAME, pcmk__str_casei)) {
+ if (!pcmk__str_eq(attr, CRM_ATTR_UNAME, pcmk__str_none)) {
if (best_node == NULL) {
crm_info("No allowed node for %s matches node attribute %s=%s",
rsc->id, attr, value);
@@ -1306,50 +1476,113 @@ best_node_score_matching_attr(const pe_resource_t *rsc, const char *attr,
/*!
* \internal
- * \brief Add resource's colocation matches to current node allocation scores
+ * \brief Check whether a resource is allowed only on a single node
+ *
+ * \param[in] rsc Resource to check
+ *
+ * \return \c true if \p rsc is allowed only on one node, otherwise \c false
+ */
+static bool
+allowed_on_one(const pcmk_resource_t *rsc)
+{
+ GHashTableIter iter;
+ pcmk_node_t *allowed_node = NULL;
+ int allowed_nodes = 0;
+
+ g_hash_table_iter_init(&iter, rsc->allowed_nodes);
+ while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &allowed_node)) {
+ if ((allowed_node->weight >= 0) && (++allowed_nodes > 1)) {
+ pe_rsc_trace(rsc, "%s is allowed on multiple nodes", rsc->id);
+ return false;
+ }
+ }
+ pe_rsc_trace(rsc, "%s is allowed %s", rsc->id,
+ ((allowed_nodes == 1)? "on a single node" : "nowhere"));
+ return (allowed_nodes == 1);
+}
+
+/*!
+ * \internal
+ * \brief Add resource's colocation matches to current node assignment scores
*
* For each node in a given table, if any of a given resource's allowed nodes
* have a matching value for the colocation attribute, add the highest of those
* nodes' scores to the node's score.
*
- * \param[in,out] nodes Hash table of nodes with allocation scores so far
- * \param[in] rsc Resource whose allowed nodes should be compared
- * \param[in] attr Colocation attribute that must match (NULL for default)
- * \param[in] factor Factor by which to multiply scores being added
+ * \param[in,out] nodes Table of nodes with assignment scores so far
+ * \param[in] source_rsc Resource whose node scores to add
+ * \param[in] target_rsc Resource on whose behalf to update \p nodes
+ * \param[in] colocation Original colocation constraint (used to get
+ * configured primary resource's stickiness, and
+ * to get colocation node attribute; pass NULL to
+ * ignore stickiness and use default attribute)
+ * \param[in] factor Factor by which to multiply scores being added
* \param[in] only_positive Whether to add only positive scores
*/
static void
-add_node_scores_matching_attr(GHashTable *nodes, const pe_resource_t *rsc,
- const char *attr, float factor,
- bool only_positive)
+add_node_scores_matching_attr(GHashTable *nodes,
+ const pcmk_resource_t *source_rsc,
+ const pcmk_resource_t *target_rsc,
+ const pcmk__colocation_t *colocation,
+ float factor, bool only_positive)
{
GHashTableIter iter;
- pe_node_t *node = NULL;
-
- if (attr == NULL) {
- attr = CRM_ATTR_UNAME;
- }
+ pcmk_node_t *node = NULL;
+ const char *attr = colocation->node_attribute;
// Iterate through each node
g_hash_table_iter_init(&iter, nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
- float weight_f = 0;
- int weight = 0;
+ float delta_f = 0;
+ int delta = 0;
int score = 0;
int new_score = 0;
+ const char *value = pcmk__colocation_node_attr(node, attr, target_rsc);
- score = best_node_score_matching_attr(rsc, attr,
- pe_node_attribute_raw(node, attr));
+ score = best_node_score_matching_attr(source_rsc, attr, value);
if ((factor < 0) && (score < 0)) {
- /* Negative preference for a node with a negative score
- * should not become a positive preference.
+ /* If the dependent is anti-colocated, we generally don't want the
+ * primary to prefer nodes that the dependent avoids. That could
+ * lead to unnecessary shuffling of the primary when the dependent
+ * hits its migration threshold somewhere, for example.
+ *
+ * However, there are cases when it is desirable. If the dependent
+ * can't run anywhere but where the primary is, it would be
+ * worthwhile to move the primary for the sake of keeping the
+ * dependent active.
+ *
+ * We can't know that exactly at this point since we don't know
+ * where the primary will be assigned, but we can limit considering
+ * the preference to when the dependent is allowed only on one node.
+ * This is less than ideal for multiple reasons:
+ *
+ * - the dependent could be allowed on more than one node but have
+ * anti-colocation primaries on each;
+ * - the dependent could be a clone or bundle with multiple
+ * instances, and the dependent as a whole is allowed on multiple
+ * nodes but some instance still can't run
+ * - the dependent has considered node-specific criteria such as
+ * location constraints and stickiness by this point, but might
+ * have other factors that end up disallowing a node
+ *
+ * but the alternative is making the primary move when it doesn't
+ * need to.
*
- * @TODO Consider filtering only if weight is -INFINITY
+ * We also consider the primary's stickiness and influence, so the
+ * user has some say in the matter. (This is the configured primary,
+ * not a particular instance of the primary, but that doesn't matter
+ * unless stickiness uses a rule to vary by node, and that seems
+ * acceptable to ignore.)
*/
- crm_trace("%s: Filtering %d + %f * %d (double negative disallowed)",
- pe__node_name(node), node->weight, factor, score);
- continue;
+ if ((colocation->primary->stickiness >= -score)
+ || !pcmk__colocation_has_influence(colocation, NULL)
+ || !allowed_on_one(colocation->dependent)) {
+ crm_trace("%s: Filtering %d + %f * %d "
+ "(double negative disallowed)",
+ pe__node_name(node), node->weight, factor, score);
+ continue;
+ }
}
if (node->weight == INFINITY_HACK) {
@@ -1358,24 +1591,24 @@ add_node_scores_matching_attr(GHashTable *nodes, const pe_resource_t *rsc,
continue;
}
- weight_f = factor * score;
+ delta_f = factor * score;
// Round the number; see http://c-faq.com/fp/round.html
- weight = (int) ((weight_f < 0)? (weight_f - 0.5) : (weight_f + 0.5));
+ delta = (int) ((delta_f < 0)? (delta_f - 0.5) : (delta_f + 0.5));
/* Small factors can obliterate the small scores that are often actually
* used in configurations. If the score and factor are nonzero, ensure
* that the result is nonzero as well.
*/
- if ((weight == 0) && (score != 0)) {
+ if ((delta == 0) && (score != 0)) {
if (factor > 0.0) {
- weight = 1;
+ delta = 1;
} else if (factor < 0.0) {
- weight = -1;
+ delta = -1;
}
}
- new_score = pcmk__add_scores(weight, node->weight);
+ new_score = pcmk__add_scores(delta, node->weight);
if (only_positive && (new_score < 0) && (node->weight > 0)) {
crm_trace("%s: Filtering %d + %f * %d = %d "
@@ -1407,52 +1640,69 @@ add_node_scores_matching_attr(GHashTable *nodes, const pe_resource_t *rsc,
* scores of the best nodes matching the attribute used for each of the
* resource's relevant colocations.
*
- * \param[in,out] rsc Resource to check colocations for
- * \param[in] log_id Resource ID to use in logs (if NULL, use \p rsc ID)
- * \param[in,out] nodes Nodes to update
- * \param[in] attr Colocation attribute (NULL to use default)
- * \param[in] factor Incorporate scores multiplied by this factor
- * \param[in] flags Bitmask of enum pcmk__coloc_select values
+ * \param[in,out] source_rsc Resource whose node scores to add
+ * \param[in] target_rsc Resource on whose behalf to update \p *nodes
+ * \param[in] log_id Resource ID for logs (if \c NULL, use
+ * \p source_rsc ID)
+ * \param[in,out] nodes Nodes to update (set initial contents to \c NULL
+ * to copy allowed nodes from \p source_rsc)
+ * \param[in] colocation Original colocation constraint (used to get
+ * configured primary resource's stickiness, and
+ * to get colocation node attribute; if \c NULL,
+ * <tt>source_rsc</tt>'s own matching node scores
+ * will not be added, and \p *nodes must be \c NULL
+ * as well)
+ * \param[in] factor Incorporate scores multiplied by this factor
+ * \param[in] flags Bitmask of enum pcmk__coloc_select values
*
+ * \note \c NULL \p target_rsc, \c NULL \p *nodes, \c NULL \p colocation, and
+ * the \c pcmk__coloc_select_this_with flag are used together (and only by
+ * \c cmp_resources()).
* \note The caller remains responsible for freeing \p *nodes.
+ * \note This is the shared implementation of
+ * \c pcmk_assignment_methods_t:add_colocated_node_scores().
*/
void
-pcmk__add_colocated_node_scores(pe_resource_t *rsc, const char *log_id,
- GHashTable **nodes, const char *attr,
+pcmk__add_colocated_node_scores(pcmk_resource_t *source_rsc,
+ const pcmk_resource_t *target_rsc,
+ const char *log_id,
+ GHashTable **nodes,
+ const pcmk__colocation_t *colocation,
float factor, uint32_t flags)
{
GHashTable *work = NULL;
- CRM_CHECK((rsc != NULL) && (nodes != NULL), return);
+ CRM_ASSERT((source_rsc != NULL) && (nodes != NULL)
+ && ((colocation != NULL)
+ || ((target_rsc == NULL) && (*nodes == NULL))));
if (log_id == NULL) {
- log_id = rsc->id;
+ log_id = source_rsc->id;
}
// Avoid infinite recursion
- if (pcmk_is_set(rsc->flags, pe_rsc_merging)) {
- pe_rsc_info(rsc, "%s: Breaking dependency loop at %s",
- log_id, rsc->id);
+ if (pcmk_is_set(source_rsc->flags, pcmk_rsc_updating_nodes)) {
+ pe_rsc_info(source_rsc, "%s: Breaking dependency loop at %s",
+ log_id, source_rsc->id);
return;
}
- pe__set_resource_flags(rsc, pe_rsc_merging);
+ pe__set_resource_flags(source_rsc, pcmk_rsc_updating_nodes);
if (*nodes == NULL) {
- /* Only cmp_resources() passes a NULL nodes table, which indicates we
- * should initialize it with the resource's allowed node scores.
- */
- work = pcmk__copy_node_table(rsc->allowed_nodes);
+ work = pcmk__copy_node_table(source_rsc->allowed_nodes);
+ target_rsc = source_rsc;
} else {
- pe_rsc_trace(rsc, "%s: Merging scores from %s (at %.6f)",
- log_id, rsc->id, factor);
+ const bool pos = pcmk_is_set(flags, pcmk__coloc_select_nonnegative);
+
+ pe_rsc_trace(source_rsc, "%s: Merging %s scores from %s (at %.6f)",
+ log_id, (pos? "positive" : "all"), source_rsc->id, factor);
work = pcmk__copy_node_table(*nodes);
- add_node_scores_matching_attr(work, rsc, attr, factor,
- pcmk_is_set(flags,
- pcmk__coloc_select_nonnegative));
+ add_node_scores_matching_attr(work, source_rsc, target_rsc, colocation,
+ factor, pos);
}
if (work == NULL) {
- pe__clear_resource_flags(rsc, pe_rsc_merging);
+ pe__clear_resource_flags(source_rsc, pcmk_rsc_updating_nodes);
return;
}
@@ -1460,22 +1710,24 @@ pcmk__add_colocated_node_scores(pe_resource_t *rsc, const char *log_id,
GList *colocations = NULL;
if (pcmk_is_set(flags, pcmk__coloc_select_this_with)) {
- colocations = pcmk__this_with_colocations(rsc);
- pe_rsc_trace(rsc,
- "Checking additional %d optional '%s with' constraints",
- g_list_length(colocations), rsc->id);
+ colocations = pcmk__this_with_colocations(source_rsc);
+ pe_rsc_trace(source_rsc,
+ "Checking additional %d optional '%s with' "
+ "constraints",
+ g_list_length(colocations), source_rsc->id);
} else {
- colocations = pcmk__with_this_colocations(rsc);
- pe_rsc_trace(rsc,
- "Checking additional %d optional 'with %s' constraints",
- g_list_length(colocations), rsc->id);
+ colocations = pcmk__with_this_colocations(source_rsc);
+ pe_rsc_trace(source_rsc,
+ "Checking additional %d optional 'with %s' "
+ "constraints",
+ g_list_length(colocations), source_rsc->id);
}
flags |= pcmk__coloc_select_active;
for (GList *iter = colocations; iter != NULL; iter = iter->next) {
- pcmk__colocation_t *constraint = (pcmk__colocation_t *) iter->data;
+ pcmk__colocation_t *constraint = iter->data;
- pe_resource_t *other = NULL;
+ pcmk_resource_t *other = NULL;
float other_factor = factor * constraint->score / (float) INFINITY;
if (pcmk_is_set(flags, pcmk__coloc_select_this_with)) {
@@ -1486,27 +1738,29 @@ pcmk__add_colocated_node_scores(pe_resource_t *rsc, const char *log_id,
other = constraint->dependent;
}
- pe_rsc_trace(rsc, "Optionally merging score of '%s' constraint (%s with %s)",
+ pe_rsc_trace(source_rsc,
+ "Optionally merging score of '%s' constraint "
+ "(%s with %s)",
constraint->id, constraint->dependent->id,
constraint->primary->id);
- other->cmds->add_colocated_node_scores(other, log_id, &work,
- constraint->node_attribute,
+ other->cmds->add_colocated_node_scores(other, target_rsc, log_id,
+ &work, constraint,
other_factor, flags);
- pe__show_node_weights(true, NULL, log_id, work, rsc->cluster);
+ pe__show_node_scores(true, NULL, log_id, work, source_rsc->cluster);
}
g_list_free(colocations);
} else if (pcmk_is_set(flags, pcmk__coloc_select_active)) {
- pe_rsc_info(rsc, "%s: Rolling back optional scores from %s",
- log_id, rsc->id);
+ pe_rsc_info(source_rsc, "%s: Rolling back optional scores from %s",
+ log_id, source_rsc->id);
g_hash_table_destroy(work);
- pe__clear_resource_flags(rsc, pe_rsc_merging);
+ pe__clear_resource_flags(source_rsc, pcmk_rsc_updating_nodes);
return;
}
if (pcmk_is_set(flags, pcmk__coloc_select_nonnegative)) {
- pe_node_t *node = NULL;
+ pcmk_node_t *node = NULL;
GHashTableIter iter;
g_hash_table_iter_init(&iter, work);
@@ -1522,7 +1776,7 @@ pcmk__add_colocated_node_scores(pe_resource_t *rsc, const char *log_id,
}
*nodes = work;
- pe__clear_resource_flags(rsc, pe_rsc_merging);
+ pe__clear_resource_flags(source_rsc, pcmk_rsc_updating_nodes);
}
/*!
@@ -1535,25 +1789,83 @@ pcmk__add_colocated_node_scores(pe_resource_t *rsc, const char *log_id,
void
pcmk__add_dependent_scores(gpointer data, gpointer user_data)
{
- pcmk__colocation_t *colocation = (pcmk__colocation_t *) data;
- pe_resource_t *rsc = (pe_resource_t *) user_data;
+ pcmk__colocation_t *colocation = data;
+ pcmk_resource_t *target_rsc = user_data;
- pe_resource_t *other = colocation->dependent;
+ pcmk_resource_t *source_rsc = colocation->dependent;
const float factor = colocation->score / (float) INFINITY;
uint32_t flags = pcmk__coloc_select_active;
if (!pcmk__colocation_has_influence(colocation, NULL)) {
return;
}
- if (rsc->variant == pe_clone) {
+ if (target_rsc->variant == pcmk_rsc_variant_clone) {
flags |= pcmk__coloc_select_nonnegative;
}
- pe_rsc_trace(rsc,
+ pe_rsc_trace(target_rsc,
"%s: Incorporating attenuated %s assignment scores due "
- "to colocation %s", rsc->id, other->id, colocation->id);
- other->cmds->add_colocated_node_scores(other, rsc->id, &rsc->allowed_nodes,
- colocation->node_attribute, factor,
- flags);
+ "to colocation %s",
+ target_rsc->id, source_rsc->id, colocation->id);
+ source_rsc->cmds->add_colocated_node_scores(source_rsc, target_rsc,
+ source_rsc->id,
+ &target_rsc->allowed_nodes,
+ colocation, factor, flags);
+}
+
+/*!
+ * \internal
+ * \brief Exclude nodes from a dependent's node table if not in a given list
+ *
+ * Given a dependent resource in a colocation and a list of nodes where the
+ * primary resource will run, set a node's score to \c -INFINITY in the
+ * dependent's node table if not found in the primary nodes list.
+ *
+ * \param[in,out] dependent Dependent resource
+ * \param[in] primary Primary resource (for logging only)
+ * \param[in] colocation Colocation constraint (for logging only)
+ * \param[in] primary_nodes List of nodes where the primary will have
+ * unblocked instances in a suitable role
+ * \param[in] merge_scores If \c true and a node is found in both \p table
+ * and \p list, add the node's score in \p list to
+ * the node's score in \p table
+ */
+void
+pcmk__colocation_intersect_nodes(pcmk_resource_t *dependent,
+ const pcmk_resource_t *primary,
+ const pcmk__colocation_t *colocation,
+ const GList *primary_nodes, bool merge_scores)
+{
+ GHashTableIter iter;
+ pcmk_node_t *dependent_node = NULL;
+
+ CRM_ASSERT((dependent != NULL) && (primary != NULL)
+ && (colocation != NULL));
+
+ g_hash_table_iter_init(&iter, dependent->allowed_nodes);
+ while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &dependent_node)) {
+ const pcmk_node_t *primary_node = NULL;
+
+ primary_node = pe_find_node_id(primary_nodes,
+ dependent_node->details->id);
+ if (primary_node == NULL) {
+ dependent_node->weight = -INFINITY;
+ pe_rsc_trace(dependent,
+ "Banning %s from %s (no primary instance) for %s",
+ dependent->id, pe__node_name(dependent_node),
+ colocation->id);
+
+ } else if (merge_scores) {
+ dependent_node->weight = pcmk__add_scores(dependent_node->weight,
+ primary_node->weight);
+ pe_rsc_trace(dependent,
+ "Added %s's score %s to %s's score for %s (now %s) "
+ "for colocation %s",
+ primary->id, pcmk_readable_score(primary_node->weight),
+ dependent->id, pe__node_name(dependent_node),
+ pcmk_readable_score(dependent_node->weight),
+ colocation->id);
+ }
+ }
}
/*!
@@ -1567,7 +1879,7 @@ pcmk__add_dependent_scores(gpointer data, gpointer user_data)
* \note This is a convenience wrapper for the with_this_colocations() method.
*/
GList *
-pcmk__with_this_colocations(const pe_resource_t *rsc)
+pcmk__with_this_colocations(const pcmk_resource_t *rsc)
{
GList *list = NULL;
@@ -1586,7 +1898,7 @@ pcmk__with_this_colocations(const pe_resource_t *rsc)
* \note This is a convenience wrapper for the this_with_colocations() method.
*/
GList *
-pcmk__this_with_colocations(const pe_resource_t *rsc)
+pcmk__this_with_colocations(const pcmk_resource_t *rsc)
{
GList *list = NULL;
diff --git a/lib/pacemaker/pcmk_sched_constraints.c b/lib/pacemaker/pcmk_sched_constraints.c
index bae6827..0d1beb9 100644
--- a/lib/pacemaker/pcmk_sched_constraints.c
+++ b/lib/pacemaker/pcmk_sched_constraints.c
@@ -28,16 +28,16 @@
#include "libpacemaker_private.h"
static bool
-evaluate_lifetime(xmlNode *lifetime, pe_working_set_t *data_set)
+evaluate_lifetime(xmlNode *lifetime, pcmk_scheduler_t *scheduler)
{
bool result = FALSE;
crm_time_t *next_change = crm_time_new_undefined();
- result = pe_evaluate_rules(lifetime, NULL, data_set->now, next_change);
+ result = pe_evaluate_rules(lifetime, NULL, scheduler->now, next_change);
if (crm_time_is_defined(next_change)) {
time_t recheck = (time_t) crm_time_get_seconds_since_epoch(next_change);
- pe__update_recheck_time(recheck, data_set);
+ pe__update_recheck_time(recheck, scheduler, "constraint lifetime");
}
crm_time_free(next_change);
return result;
@@ -47,15 +47,15 @@ evaluate_lifetime(xmlNode *lifetime, pe_working_set_t *data_set)
* \internal
* \brief Unpack constraints from XML
*
- * Given a cluster working set, unpack all constraints from its input XML into
+ * Given scheduler data, unpack all constraints from its input XML into
* data structures.
*
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*/
void
-pcmk__unpack_constraints(pe_working_set_t *data_set)
+pcmk__unpack_constraints(pcmk_scheduler_t *scheduler)
{
- xmlNode *xml_constraints = pcmk_find_cib_element(data_set->input,
+ xmlNode *xml_constraints = pcmk_find_cib_element(scheduler->input,
XML_CIB_TAG_CONSTRAINTS);
for (xmlNode *xml_obj = pcmk__xe_first_child(xml_constraints);
@@ -63,7 +63,7 @@ pcmk__unpack_constraints(pe_working_set_t *data_set)
xmlNode *lifetime = NULL;
const char *id = crm_element_value(xml_obj, XML_ATTR_ID);
- const char *tag = crm_element_name(xml_obj);
+ const char *tag = (const char *) xml_obj->name;
if (id == NULL) {
pcmk__config_err("Ignoring <%s> constraint without "
@@ -81,20 +81,21 @@ pcmk__unpack_constraints(pe_working_set_t *data_set)
"constraint object)", id);
}
- if ((lifetime != NULL) && !evaluate_lifetime(lifetime, data_set)) {
+ if ((lifetime != NULL) && !evaluate_lifetime(lifetime, scheduler)) {
crm_info("Constraint %s %s is not active", tag, id);
- } else if (pcmk__str_eq(XML_CONS_TAG_RSC_ORDER, tag, pcmk__str_casei)) {
- pcmk__unpack_ordering(xml_obj, data_set);
+ } else if (pcmk__str_eq(XML_CONS_TAG_RSC_ORDER, tag, pcmk__str_none)) {
+ pcmk__unpack_ordering(xml_obj, scheduler);
- } else if (pcmk__str_eq(XML_CONS_TAG_RSC_DEPEND, tag, pcmk__str_casei)) {
- pcmk__unpack_colocation(xml_obj, data_set);
+ } else if (pcmk__str_eq(XML_CONS_TAG_RSC_DEPEND, tag, pcmk__str_none)) {
+ pcmk__unpack_colocation(xml_obj, scheduler);
- } else if (pcmk__str_eq(XML_CONS_TAG_RSC_LOCATION, tag, pcmk__str_casei)) {
- pcmk__unpack_location(xml_obj, data_set);
+ } else if (pcmk__str_eq(XML_CONS_TAG_RSC_LOCATION, tag,
+ pcmk__str_none)) {
+ pcmk__unpack_location(xml_obj, scheduler);
- } else if (pcmk__str_eq(XML_CONS_TAG_RSC_TICKET, tag, pcmk__str_casei)) {
- pcmk__unpack_rsc_ticket(xml_obj, data_set);
+ } else if (pcmk__str_eq(XML_CONS_TAG_RSC_TICKET, tag, pcmk__str_none)) {
+ pcmk__unpack_rsc_ticket(xml_obj, scheduler);
} else {
pe_err("Unsupported constraint type: %s", tag);
@@ -102,18 +103,19 @@ pcmk__unpack_constraints(pe_working_set_t *data_set)
}
}
-pe_resource_t *
+pcmk_resource_t *
pcmk__find_constraint_resource(GList *rsc_list, const char *id)
{
- GList *rIter = NULL;
-
- for (rIter = rsc_list; id && rIter; rIter = rIter->next) {
- pe_resource_t *parent = rIter->data;
- pe_resource_t *match = parent->fns->find_rsc(parent, id, NULL,
- pe_find_renamed);
+ if (id == NULL) {
+ return NULL;
+ }
+ for (GList *iter = rsc_list; iter != NULL; iter = iter->next) {
+ pcmk_resource_t *parent = iter->data;
+ pcmk_resource_t *match = parent->fns->find_rsc(parent, id, NULL,
+ pcmk_rsc_match_history);
if (match != NULL) {
- if(!pcmk__str_eq(match->id, id, pcmk__str_casei)) {
+ if (!pcmk__str_eq(match->id, id, pcmk__str_none)) {
/* We found an instance of a clone instead */
match = uber_parent(match);
crm_debug("Found %s for %s", match->id, id);
@@ -129,21 +131,21 @@ pcmk__find_constraint_resource(GList *rsc_list, const char *id)
* \internal
* \brief Check whether an ID references a resource tag
*
- * \param[in] data_set Cluster working set
- * \param[in] id Tag ID to search for
- * \param[out] tag Where to store tag, if found
+ * \param[in] scheduler Scheduler data
+ * \param[in] id Tag ID to search for
+ * \param[out] tag Where to store tag, if found
*
* \return true if ID refers to a tagged resource or resource set template,
* otherwise false
*/
static bool
-find_constraint_tag(const pe_working_set_t *data_set, const char *id,
- pe_tag_t **tag)
+find_constraint_tag(const pcmk_scheduler_t *scheduler, const char *id,
+ pcmk_tag_t **tag)
{
*tag = NULL;
// Check whether id refers to a resource set template
- if (g_hash_table_lookup_extended(data_set->template_rsc_sets, id,
+ if (g_hash_table_lookup_extended(scheduler->template_rsc_sets, id,
NULL, (gpointer *) tag)) {
if (*tag == NULL) {
crm_warn("No resource is derived from template '%s'", id);
@@ -153,7 +155,7 @@ find_constraint_tag(const pe_working_set_t *data_set, const char *id,
}
// If not, check whether id refers to a tag
- if (g_hash_table_lookup_extended(data_set->tags, id,
+ if (g_hash_table_lookup_extended(scheduler->tags, id,
NULL, (gpointer *) tag)) {
if (*tag == NULL) {
crm_warn("No resource is tagged with '%s'", id);
@@ -170,27 +172,27 @@ find_constraint_tag(const pe_working_set_t *data_set, const char *id,
* \brief
* \internal Check whether an ID refers to a valid resource or tag
*
- * \param[in] data_set Cluster working set
- * \param[in] id ID to search for
- * \param[out] rsc Where to store resource, if found (or NULL to skip
- * searching resources)
- * \param[out] tag Where to store tag, if found (or NULL to skip searching
- * tags)
+ * \param[in] scheduler Scheduler data
+ * \param[in] id ID to search for
+ * \param[out] rsc Where to store resource, if found
+ * (or NULL to skip searching resources)
+ * \param[out] tag Where to store tag, if found
+ * (or NULL to skip searching tags)
*
* \return true if id refers to a resource (possibly indirectly via a tag)
*/
bool
-pcmk__valid_resource_or_tag(const pe_working_set_t *data_set, const char *id,
- pe_resource_t **rsc, pe_tag_t **tag)
+pcmk__valid_resource_or_tag(const pcmk_scheduler_t *scheduler, const char *id,
+ pcmk_resource_t **rsc, pcmk_tag_t **tag)
{
if (rsc != NULL) {
- *rsc = pcmk__find_constraint_resource(data_set->resources, id);
+ *rsc = pcmk__find_constraint_resource(scheduler->resources, id);
if (*rsc != NULL) {
return true;
}
}
- if ((tag != NULL) && find_constraint_tag(data_set, id, tag)) {
+ if ((tag != NULL) && find_constraint_tag(scheduler, id, tag)) {
return true;
}
@@ -205,14 +207,14 @@ pcmk__valid_resource_or_tag(const pe_working_set_t *data_set, const char *id,
* entries that list tags rather than resource IDs, and replace any found with
* resource_ref entries for the corresponding resource IDs.
*
- * \param[in,out] xml_obj Constraint XML
- * \param[in] data_set Cluster working set
+ * \param[in,out] xml_obj Constraint XML
+ * \param[in] scheduler Scheduler data
*
* \return Equivalent XML with resource tags replaced (or NULL if none)
* \note It is the caller's responsibility to free the result with free_xml().
*/
xmlNode *
-pcmk__expand_tags_in_sets(xmlNode *xml_obj, const pe_working_set_t *data_set)
+pcmk__expand_tags_in_sets(xmlNode *xml_obj, const pcmk_scheduler_t *scheduler)
{
xmlNode *new_xml = NULL;
bool any_refs = false;
@@ -228,15 +230,15 @@ pcmk__expand_tags_in_sets(xmlNode *xml_obj, const pe_working_set_t *data_set)
set != NULL; set = crm_next_same_xml(set)) {
GList *tag_refs = NULL;
- GList *gIter = NULL;
+ GList *iter = NULL;
for (xmlNode *xml_rsc = first_named_child(set, XML_TAG_RESOURCE_REF);
xml_rsc != NULL; xml_rsc = crm_next_same_xml(xml_rsc)) {
- pe_resource_t *rsc = NULL;
- pe_tag_t *tag = NULL;
+ pcmk_resource_t *rsc = NULL;
+ pcmk_tag_t *tag = NULL;
- if (!pcmk__valid_resource_or_tag(data_set, ID(xml_rsc), &rsc,
+ if (!pcmk__valid_resource_or_tag(scheduler, ID(xml_rsc), &rsc,
&tag)) {
pcmk__config_err("Ignoring resource sets for constraint '%s' "
"because '%s' is not a valid resource or tag",
@@ -248,37 +250,36 @@ pcmk__expand_tags_in_sets(xmlNode *xml_obj, const pe_working_set_t *data_set)
continue;
} else if (tag) {
- /* The resource_ref under the resource_set references a template/tag */
+ // resource_ref under resource_set references template or tag
xmlNode *last_ref = xml_rsc;
- /* A sample:
-
- Original XML:
-
- <resource_set id="tag1-colocation-0" sequential="true">
- <resource_ref id="rsc1"/>
- <resource_ref id="tag1"/>
- <resource_ref id="rsc4"/>
- </resource_set>
-
- Now we are appending rsc2 and rsc3 which are tagged with tag1 right after it:
-
- <resource_set id="tag1-colocation-0" sequential="true">
- <resource_ref id="rsc1"/>
- <resource_ref id="tag1"/>
- <resource_ref id="rsc2"/>
- <resource_ref id="rsc3"/>
- <resource_ref id="rsc4"/>
- </resource_set>
-
+ /* For example, given the original XML:
+ *
+ * <resource_set id="tag1-colocation-0" sequential="true">
+ * <resource_ref id="rsc1"/>
+ * <resource_ref id="tag1"/>
+ * <resource_ref id="rsc4"/>
+ * </resource_set>
+ *
+ * If rsc2 and rsc3 are tagged with tag1, we add them after it:
+ *
+ * <resource_set id="tag1-colocation-0" sequential="true">
+ * <resource_ref id="rsc1"/>
+ * <resource_ref id="tag1"/>
+ * <resource_ref id="rsc2"/>
+ * <resource_ref id="rsc3"/>
+ * <resource_ref id="rsc4"/>
+ * </resource_set>
*/
- for (gIter = tag->refs; gIter != NULL; gIter = gIter->next) {
- const char *obj_ref = (const char *) gIter->data;
+ for (iter = tag->refs; iter != NULL; iter = iter->next) {
+ const char *obj_ref = iter->data;
xmlNode *new_rsc_ref = NULL;
- new_rsc_ref = xmlNewDocRawNode(getDocPtr(set), NULL,
- (pcmkXmlStr) XML_TAG_RESOURCE_REF, NULL);
+ new_rsc_ref = xmlNewDocRawNode(set->doc, NULL,
+ (pcmkXmlStr)
+ XML_TAG_RESOURCE_REF,
+ NULL);
crm_xml_add(new_rsc_ref, XML_ATTR_ID, obj_ref);
xmlAddNextSibling(last_ref, new_rsc_ref);
@@ -304,8 +305,8 @@ pcmk__expand_tags_in_sets(xmlNode *xml_obj, const pe_working_set_t *data_set)
</resource_set>
*/
- for (gIter = tag_refs; gIter != NULL; gIter = gIter->next) {
- xmlNode *tag_ref = gIter->data;
+ for (iter = tag_refs; iter != NULL; iter = iter->next) {
+ xmlNode *tag_ref = iter->data;
free_xml(tag_ref);
}
@@ -324,20 +325,21 @@ pcmk__expand_tags_in_sets(xmlNode *xml_obj, const pe_working_set_t *data_set)
* \brief Convert a tag into a resource set of tagged resources
*
* \param[in,out] xml_obj Constraint XML
- * \param[out] rsc_set Where to store resource set XML created based on tag
- * \param[in] attr Name of XML attribute containing resource or tag ID
- * \param[in] convert_rsc Convert to set even if \p attr references a resource
- * \param[in] data_set Cluster working set
+ * \param[out] rsc_set Where to store resource set XML
+ * \param[in] attr Name of XML attribute with resource or tag ID
+ * \param[in] convert_rsc If true, convert to set even if \p attr
+ * references a resource
+ * \param[in] scheduler Scheduler data
*/
bool
pcmk__tag_to_set(xmlNode *xml_obj, xmlNode **rsc_set, const char *attr,
- bool convert_rsc, const pe_working_set_t *data_set)
+ bool convert_rsc, const pcmk_scheduler_t *scheduler)
{
const char *cons_id = NULL;
const char *id = NULL;
- pe_resource_t *rsc = NULL;
- pe_tag_t *tag = NULL;
+ pcmk_resource_t *rsc = NULL;
+ pcmk_tag_t *tag = NULL;
*rsc_set = NULL;
@@ -346,7 +348,7 @@ pcmk__tag_to_set(xmlNode *xml_obj, xmlNode **rsc_set, const char *attr,
cons_id = ID(xml_obj);
if (cons_id == NULL) {
pcmk__config_err("Ignoring <%s> constraint without " XML_ATTR_ID,
- crm_element_name(xml_obj));
+ xml_obj->name);
return false;
}
@@ -355,22 +357,21 @@ pcmk__tag_to_set(xmlNode *xml_obj, xmlNode **rsc_set, const char *attr,
return true;
}
- if (!pcmk__valid_resource_or_tag(data_set, id, &rsc, &tag)) {
+ if (!pcmk__valid_resource_or_tag(scheduler, id, &rsc, &tag)) {
pcmk__config_err("Ignoring constraint '%s' because '%s' is not a "
"valid resource or tag", cons_id, id);
return false;
} else if (tag) {
- GList *gIter = NULL;
-
- /* A template/tag is referenced by the "attr" attribute (first, then, rsc or with-rsc).
- Add the template/tag's corresponding "resource_set" which contains the resources derived
- from it or tagged with it under the constraint. */
+ /* The "attr" attribute (for a resource in a constraint) specifies a
+ * template or tag. Add the corresponding resource_set containing the
+ * resources derived from or tagged with it.
+ */
*rsc_set = create_xml_node(xml_obj, XML_CONS_TAG_RSC_SET);
crm_xml_add(*rsc_set, XML_ATTR_ID, id);
- for (gIter = tag->refs; gIter != NULL; gIter = gIter->next) {
- const char *obj_ref = (const char *) gIter->data;
+ for (GList *iter = tag->refs; iter != NULL; iter = iter->next) {
+ const char *obj_ref = iter->data;
xmlNode *rsc_ref = NULL;
rsc_ref = create_xml_node(*rsc_set, XML_TAG_RESOURCE_REF);
@@ -381,8 +382,10 @@ pcmk__tag_to_set(xmlNode *xml_obj, xmlNode **rsc_set, const char *attr,
pcmk__xe_set_bool_attr(*rsc_set, "sequential", false);
} else if ((rsc != NULL) && convert_rsc) {
- /* Even a regular resource is referenced by "attr", convert it into a resource_set.
- Because the other side of the constraint could be a template/tag reference. */
+ /* Even if a regular resource is referenced by "attr", convert it into a
+ * resource_set, because the other resource reference in the constraint
+ * could be a template or tag.
+ */
xmlNode *rsc_ref = NULL;
*rsc_set = create_xml_node(xml_obj, XML_CONS_TAG_RSC_SET);
@@ -407,14 +410,14 @@ pcmk__tag_to_set(xmlNode *xml_obj, xmlNode **rsc_set, const char *attr,
* \internal
* \brief Create constraints inherent to resource types
*
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*/
void
-pcmk__create_internal_constraints(pe_working_set_t *data_set)
+pcmk__create_internal_constraints(pcmk_scheduler_t *scheduler)
{
crm_trace("Create internal constraints");
- for (GList *iter = data_set->resources; iter != NULL; iter = iter->next) {
- pe_resource_t *rsc = (pe_resource_t *) iter->data;
+ for (GList *iter = scheduler->resources; iter != NULL; iter = iter->next) {
+ pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
rsc->cmds->internal_constraints(rsc);
}
diff --git a/lib/pacemaker/pcmk_sched_fencing.c b/lib/pacemaker/pcmk_sched_fencing.c
index c912640..3fe9ebc 100644
--- a/lib/pacemaker/pcmk_sched_fencing.c
+++ b/lib/pacemaker/pcmk_sched_fencing.c
@@ -26,14 +26,15 @@
* \return TRUE if resource (or parent if an anonymous clone) is known
*/
static bool
-rsc_is_known_on(const pe_resource_t *rsc, const pe_node_t *node)
+rsc_is_known_on(const pcmk_resource_t *rsc, const pcmk_node_t *node)
{
- if (pe_hash_table_lookup(rsc->known_on, node->details->id)) {
+ if (g_hash_table_lookup(rsc->known_on, node->details->id) != NULL) {
return TRUE;
- } else if ((rsc->variant == pe_native)
+ } else if ((rsc->variant == pcmk_rsc_variant_primitive)
&& pe_rsc_is_anon_clone(rsc->parent)
- && pe_hash_table_lookup(rsc->parent->known_on, node->details->id)) {
+ && (g_hash_table_lookup(rsc->parent->known_on,
+ node->details->id) != NULL)) {
/* We check only the parent, not the uber-parent, because we cannot
* assume that the resource is known if it is in an anonymously cloned
* group (which may be only partially known).
@@ -51,29 +52,30 @@ rsc_is_known_on(const pe_resource_t *rsc, const pe_node_t *node)
* \param[in,out] stonith_op Fence action
*/
static void
-order_start_vs_fencing(pe_resource_t *rsc, pe_action_t *stonith_op)
+order_start_vs_fencing(pcmk_resource_t *rsc, pcmk_action_t *stonith_op)
{
- pe_node_t *target;
- GList *gIter = NULL;
+ pcmk_node_t *target;
CRM_CHECK(stonith_op && stonith_op->node, return);
target = stonith_op->node;
- for (gIter = rsc->actions; gIter != NULL; gIter = gIter->next) {
- pe_action_t *action = (pe_action_t *) gIter->data;
+ for (GList *iter = rsc->actions; iter != NULL; iter = iter->next) {
+ pcmk_action_t *action = iter->data;
switch (action->needs) {
- case rsc_req_nothing:
+ case pcmk_requires_nothing:
// Anything other than start or promote requires nothing
break;
- case rsc_req_stonith:
- order_actions(stonith_op, action, pe_order_optional);
+ case pcmk_requires_fencing:
+ order_actions(stonith_op, action, pcmk__ar_ordered);
break;
- case rsc_req_quorum:
- if (pcmk__str_eq(action->task, RSC_START, pcmk__str_casei)
- && pe_hash_table_lookup(rsc->allowed_nodes, target->details->id)
+ case pcmk_requires_quorum:
+ if (pcmk__str_eq(action->task, PCMK_ACTION_START,
+ pcmk__str_none)
+ && (g_hash_table_lookup(rsc->allowed_nodes,
+ target->details->id) != NULL)
&& !rsc_is_known_on(rsc, target)) {
/* If we don't know the status of the resource on the node
@@ -85,10 +87,11 @@ order_start_vs_fencing(pe_resource_t *rsc, pe_action_t *stonith_op)
* The most likely explanation is that the DC died and took
* its status with it.
*/
- pe_rsc_debug(rsc, "Ordering %s after %s recovery", action->uuid,
- pe__node_name(target));
+ pe_rsc_debug(rsc, "Ordering %s after %s recovery",
+ action->uuid, pe__node_name(target));
order_actions(stonith_op, action,
- pe_order_optional | pe_order_runnable_left);
+ pcmk__ar_ordered
+ |pcmk__ar_unrunnable_first_blocks);
}
break;
}
@@ -103,21 +106,21 @@ order_start_vs_fencing(pe_resource_t *rsc, pe_action_t *stonith_op)
* \param[in,out] stonith_op Fence action
*/
static void
-order_stop_vs_fencing(pe_resource_t *rsc, pe_action_t *stonith_op)
+order_stop_vs_fencing(pcmk_resource_t *rsc, pcmk_action_t *stonith_op)
{
- GList *gIter = NULL;
+ GList *iter = NULL;
GList *action_list = NULL;
bool order_implicit = false;
- pe_resource_t *top = uber_parent(rsc);
- pe_action_t *parent_stop = NULL;
- pe_node_t *target;
+ pcmk_resource_t *top = uber_parent(rsc);
+ pcmk_action_t *parent_stop = NULL;
+ pcmk_node_t *target;
CRM_CHECK(stonith_op && stonith_op->node, return);
target = stonith_op->node;
/* Get a list of stop actions potentially implied by the fencing */
- action_list = pe__resource_actions(rsc, target, RSC_STOP, FALSE);
+ action_list = pe__resource_actions(rsc, target, PCMK_ACTION_STOP, FALSE);
/* If resource requires fencing, implicit actions must occur after fencing.
*
@@ -125,25 +128,24 @@ order_stop_vs_fencing(pe_resource_t *rsc, pe_action_t *stonith_op)
* ordered after fencing, even if the resource does not require fencing,
* because guest node "fencing" is actually just a resource stop.
*/
- if (pcmk_is_set(rsc->flags, pe_rsc_needs_fencing)
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_needs_fencing)
|| pe__is_guest_node(target)) {
order_implicit = true;
}
if (action_list && order_implicit) {
- parent_stop = find_first_action(top->actions, NULL, RSC_STOP, NULL);
+ parent_stop = find_first_action(top->actions, NULL, PCMK_ACTION_STOP,
+ NULL);
}
- for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
- pe_action_t *action = (pe_action_t *) gIter->data;
+ for (iter = action_list; iter != NULL; iter = iter->next) {
+ pcmk_action_t *action = iter->data;
// The stop would never complete, so convert it into a pseudo-action.
- pe__set_action_flags(action, pe_action_pseudo|pe_action_runnable);
+ pe__set_action_flags(action, pcmk_action_pseudo|pcmk_action_runnable);
if (order_implicit) {
- pe__set_action_flags(action, pe_action_implied_by_stonith);
-
/* Order the stonith before the parent stop (if any).
*
* Also order the stonith before the resource stop, unless the
@@ -152,17 +154,17 @@ order_stop_vs_fencing(pe_resource_t *rsc, pe_action_t *stonith_op)
*
* User constraints must not order a resource in a guest node
* relative to the guest node container resource. The
- * pe_order_preserve flag marks constraints as generated by the
+ * pcmk__ar_guest_allowed flag marks constraints as generated by the
* cluster and thus immune to that check (and is irrelevant if
* target is not a guest).
*/
if (!pe_rsc_is_bundled(rsc)) {
- order_actions(stonith_op, action, pe_order_preserve);
+ order_actions(stonith_op, action, pcmk__ar_guest_allowed);
}
- order_actions(stonith_op, parent_stop, pe_order_preserve);
+ order_actions(stonith_op, parent_stop, pcmk__ar_guest_allowed);
}
- if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_failed)) {
crm_notice("Stop of failed resource %s is implicit %s %s is fenced",
rsc->id, (order_implicit? "after" : "because"),
pe__node_name(target));
@@ -172,7 +174,7 @@ order_stop_vs_fencing(pe_resource_t *rsc, pe_action_t *stonith_op)
pe__node_name(target));
}
- if (pcmk_is_set(rsc->flags, pe_rsc_notify)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_notify)) {
pe__order_notifs_after_fencing(action, rsc, stonith_op);
}
@@ -198,25 +200,26 @@ order_stop_vs_fencing(pe_resource_t *rsc, pe_action_t *stonith_op)
crm_info("Moving healthy resource %s off %s before fencing",
rsc->id, pe__node_name(node));
pcmk__new_ordering(rsc, stop_key(rsc), NULL, NULL,
- strdup(CRM_OP_FENCE), stonith_op,
- pe_order_optional, rsc->cluster);
+ strdup(PCMK_ACTION_STONITH), stonith_op,
+ pcmk__ar_ordered, rsc->cluster);
#endif
}
g_list_free(action_list);
/* Get a list of demote actions potentially implied by the fencing */
- action_list = pe__resource_actions(rsc, target, RSC_DEMOTE, FALSE);
+ action_list = pe__resource_actions(rsc, target, PCMK_ACTION_DEMOTE, FALSE);
- for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
- pe_action_t *action = (pe_action_t *) gIter->data;
+ for (iter = action_list; iter != NULL; iter = iter->next) {
+ pcmk_action_t *action = iter->data;
if (!(action->node->details->online) || action->node->details->unclean
- || pcmk_is_set(rsc->flags, pe_rsc_failed)) {
+ || pcmk_is_set(rsc->flags, pcmk_rsc_failed)) {
- if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_failed)) {
pe_rsc_info(rsc,
- "Demote of failed resource %s is implicit after %s is fenced",
+ "Demote of failed resource %s is implicit "
+ "after %s is fenced",
rsc->id, pe__node_name(target));
} else {
pe_rsc_info(rsc, "%s is implicit after %s is fenced",
@@ -226,13 +229,15 @@ order_stop_vs_fencing(pe_resource_t *rsc, pe_action_t *stonith_op)
/* The demote would never complete and is now implied by the
* fencing, so convert it into a pseudo-action.
*/
- pe__set_action_flags(action, pe_action_pseudo|pe_action_runnable);
+ pe__set_action_flags(action,
+ pcmk_action_pseudo|pcmk_action_runnable);
if (pe_rsc_is_bundled(rsc)) {
- // Do nothing, let recovery be ordered after parent's implied stop
+ // Recovery will be ordered as usual after parent's implied stop
} else if (order_implicit) {
- order_actions(stonith_op, action, pe_order_preserve|pe_order_optional);
+ order_actions(stonith_op, action,
+ pcmk__ar_guest_allowed|pcmk__ar_ordered);
}
}
}
@@ -248,18 +253,16 @@ order_stop_vs_fencing(pe_resource_t *rsc, pe_action_t *stonith_op)
* \param[in,out] stonith_op Fencing operation to be ordered against
*/
static void
-rsc_stonith_ordering(pe_resource_t *rsc, pe_action_t *stonith_op)
+rsc_stonith_ordering(pcmk_resource_t *rsc, pcmk_action_t *stonith_op)
{
if (rsc->children) {
- GList *gIter = NULL;
-
- for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+ for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
+ pcmk_resource_t *child_rsc = iter->data;
rsc_stonith_ordering(child_rsc, stonith_op);
}
- } else if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ } else if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
pe_rsc_trace(rsc,
"Skipping fencing constraints for unmanaged resource: %s",
rsc->id);
@@ -279,14 +282,14 @@ rsc_stonith_ordering(pe_resource_t *rsc, pe_action_t *stonith_op)
* pseudo-actions, etc.
*
* \param[in,out] stonith_op Fencing operation
- * \param[in,out] data_set Working set of cluster
+ * \param[in,out] scheduler Scheduler data
*/
void
-pcmk__order_vs_fence(pe_action_t *stonith_op, pe_working_set_t *data_set)
+pcmk__order_vs_fence(pcmk_action_t *stonith_op, pcmk_scheduler_t *scheduler)
{
- CRM_CHECK(stonith_op && data_set, return);
- for (GList *r = data_set->resources; r != NULL; r = r->next) {
- rsc_stonith_ordering((pe_resource_t *) r->data, stonith_op);
+ CRM_CHECK(stonith_op && scheduler, return);
+ for (GList *r = scheduler->resources; r != NULL; r = r->next) {
+ rsc_stonith_ordering((pcmk_resource_t *) r->data, stonith_op);
}
}
@@ -300,8 +303,9 @@ pcmk__order_vs_fence(pe_action_t *stonith_op, pe_working_set_t *data_set)
* \param[in] order Ordering flags
*/
void
-pcmk__order_vs_unfence(const pe_resource_t *rsc, pe_node_t *node,
- pe_action_t *action, enum pe_ordering order)
+pcmk__order_vs_unfence(const pcmk_resource_t *rsc, pcmk_node_t *node,
+ pcmk_action_t *action,
+ enum pcmk__action_relation_flags order)
{
/* When unfencing is in use, we order unfence actions before any probe or
* start of resources that require unfencing, and also of fence devices.
@@ -310,16 +314,16 @@ pcmk__order_vs_unfence(const pe_resource_t *rsc, pe_node_t *node,
* only quorum. However, fence agents that unfence often don't have enough
* information to even probe or start unless the node is first unfenced.
*/
- if ((pcmk_is_set(rsc->flags, pe_rsc_fence_device)
- && pcmk_is_set(rsc->cluster->flags, pe_flag_enable_unfencing))
- || pcmk_is_set(rsc->flags, pe_rsc_needs_unfencing)) {
+ if ((pcmk_is_set(rsc->flags, pcmk_rsc_fence_device)
+ && pcmk_is_set(rsc->cluster->flags, pcmk_sched_enable_unfencing))
+ || pcmk_is_set(rsc->flags, pcmk_rsc_needs_unfencing)) {
/* Start with an optional ordering. Requiring unfencing would result in
* the node being unfenced, and all its resources being stopped,
* whenever a new resource is added -- which would be highly suboptimal.
*/
- pe_action_t *unfence = pe_fence_op(node, "on", TRUE, NULL, FALSE,
- node->details->data_set);
+ pcmk_action_t *unfence = pe_fence_op(node, PCMK_ACTION_ON, TRUE, NULL,
+ FALSE, node->details->data_set);
order_actions(unfence, action, order);
@@ -342,11 +346,11 @@ pcmk__order_vs_unfence(const pe_resource_t *rsc, pe_node_t *node,
* \param[in,out] node Guest node to fence
*/
void
-pcmk__fence_guest(pe_node_t *node)
+pcmk__fence_guest(pcmk_node_t *node)
{
- pe_resource_t *container = NULL;
- pe_action_t *stop = NULL;
- pe_action_t *stonith_op = NULL;
+ pcmk_resource_t *container = NULL;
+ pcmk_action_t *stop = NULL;
+ pcmk_action_t *stonith_op = NULL;
/* The fence action is just a label; we don't do anything differently for
* off vs. reboot. We specify it explicitly, rather than let it default to
@@ -354,7 +358,7 @@ pcmk__fence_guest(pe_node_t *node)
* are creating a pseudo-event to describe fencing that is already occurring
* by other means (container recovery).
*/
- const char *fence_action = "off";
+ const char *fence_action = PCMK_ACTION_OFF;
CRM_ASSERT(node != NULL);
@@ -363,12 +367,12 @@ pcmk__fence_guest(pe_node_t *node)
*/
container = node->details->remote_rsc->container;
if (container) {
- stop = find_first_action(container->actions, NULL, CRMD_ACTION_STOP,
+ stop = find_first_action(container->actions, NULL, PCMK_ACTION_STOP,
NULL);
- if (find_first_action(container->actions, NULL, CRMD_ACTION_START,
+ if (find_first_action(container->actions, NULL, PCMK_ACTION_START,
NULL)) {
- fence_action = "reboot";
+ fence_action = PCMK_ACTION_REBOOT;
}
}
@@ -377,14 +381,14 @@ pcmk__fence_guest(pe_node_t *node)
*/
stonith_op = pe_fence_op(node, fence_action, FALSE, "guest is unclean",
FALSE, node->details->data_set);
- pe__set_action_flags(stonith_op, pe_action_pseudo|pe_action_runnable);
+ pe__set_action_flags(stonith_op, pcmk_action_pseudo|pcmk_action_runnable);
/* We want to imply stops/demotes after the guest is stopped, not wait until
* it is restarted, so we always order pseudo-fencing after stop, not start
* (even though start might be closer to what is done for a real reboot).
*/
- if ((stop != NULL) && pcmk_is_set(stop->flags, pe_action_pseudo)) {
- pe_action_t *parent_stonith_op = pe_fence_op(stop->node, NULL, FALSE,
+ if ((stop != NULL) && pcmk_is_set(stop->flags, pcmk_action_pseudo)) {
+ pcmk_action_t *parent_stonith_op = pe_fence_op(stop->node, NULL, FALSE,
NULL, FALSE,
node->details->data_set);
@@ -392,11 +396,13 @@ pcmk__fence_guest(pe_node_t *node)
pe__node_name(node), stonith_op->id,
pe__node_name(stop->node));
order_actions(parent_stonith_op, stonith_op,
- pe_order_runnable_left|pe_order_implies_then);
+ pcmk__ar_unrunnable_first_blocks
+ |pcmk__ar_first_implies_then);
} else if (stop) {
order_actions(stop, stonith_op,
- pe_order_runnable_left|pe_order_implies_then);
+ pcmk__ar_unrunnable_first_blocks
+ |pcmk__ar_first_implies_then);
crm_info("Implying guest %s is down (action %d) "
"after container %s is stopped (action %d)",
pe__node_name(node), stonith_op->id,
@@ -410,10 +416,10 @@ pcmk__fence_guest(pe_node_t *node)
* which will be ordered after any container (re-)probe.
*/
stop = find_first_action(node->details->remote_rsc->actions, NULL,
- RSC_STOP, NULL);
+ PCMK_ACTION_STOP, NULL);
if (stop) {
- order_actions(stop, stonith_op, pe_order_optional);
+ order_actions(stop, stonith_op, pcmk__ar_ordered);
crm_info("Implying guest %s is down (action %d) "
"after connection is stopped (action %d)",
pe__node_name(node), stonith_op->id, stop->id);
@@ -440,7 +446,7 @@ pcmk__fence_guest(pe_node_t *node)
* otherwise false
*/
bool
-pcmk__node_unfenced(const pe_node_t *node)
+pcmk__node_unfenced(const pcmk_node_t *node)
{
const char *unfenced = pe_node_attribute_raw(node, CRM_ATTR_UNFENCED);
@@ -457,11 +463,11 @@ pcmk__node_unfenced(const pe_node_t *node)
void
pcmk__order_restart_vs_unfence(gpointer data, gpointer user_data)
{
- pe_node_t *node = (pe_node_t *) data;
- pe_resource_t *rsc = (pe_resource_t *) user_data;
+ pcmk_node_t *node = (pcmk_node_t *) data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) user_data;
- pe_action_t *unfence = pe_fence_op(node, "on", true, NULL, false,
- rsc->cluster);
+ pcmk_action_t *unfence = pe_fence_op(node, PCMK_ACTION_ON, true, NULL,
+ false, rsc->cluster);
crm_debug("Ordering any stops of %s before %s, and any starts after",
rsc->id, unfence->uuid);
@@ -483,11 +489,12 @@ pcmk__order_restart_vs_unfence(gpointer data, gpointer user_data)
*/
pcmk__new_ordering(rsc, stop_key(rsc), NULL,
NULL, strdup(unfence->uuid), unfence,
- pe_order_optional|pe_order_same_node,
+ pcmk__ar_ordered|pcmk__ar_if_on_same_node,
rsc->cluster);
pcmk__new_ordering(NULL, strdup(unfence->uuid), unfence,
rsc, start_key(rsc), NULL,
- pe_order_implies_then_on_node|pe_order_same_node,
+ pcmk__ar_first_implies_same_node_then
+ |pcmk__ar_if_on_same_node,
rsc->cluster);
}
diff --git a/lib/pacemaker/pcmk_sched_group.c b/lib/pacemaker/pcmk_sched_group.c
index cb139f7..9983c1f 100644
--- a/lib/pacemaker/pcmk_sched_group.c
+++ b/lib/pacemaker/pcmk_sched_group.c
@@ -20,23 +20,33 @@
* \internal
* \brief Assign a group resource to a node
*
- * \param[in,out] rsc Group resource to assign to a node
- * \param[in] prefer Node to prefer, if all else is equal
+ * \param[in,out] rsc Group resource to assign to a node
+ * \param[in] prefer Node to prefer, if all else is equal
+ * \param[in] stop_if_fail If \c true and a child of \p rsc can't be
+ * assigned to a node, set the child's next role to
+ * stopped and update existing actions
*
* \return Node that \p rsc is assigned to, if assigned entirely to one node
+ *
+ * \note If \p stop_if_fail is \c false, then \c pcmk__unassign_resource() can
+ * completely undo the assignment. A successful assignment can be either
+ * undone or left alone as final. A failed assignment has the same effect
+ * as calling pcmk__unassign_resource(); there are no side effects on
+ * roles or actions.
*/
-pe_node_t *
-pcmk__group_assign(pe_resource_t *rsc, const pe_node_t *prefer)
+pcmk_node_t *
+pcmk__group_assign(pcmk_resource_t *rsc, const pcmk_node_t *prefer,
+ bool stop_if_fail)
{
- pe_node_t *first_assigned_node = NULL;
- pe_resource_t *first_member = NULL;
+ pcmk_node_t *first_assigned_node = NULL;
+ pcmk_resource_t *first_member = NULL;
- CRM_ASSERT(rsc != NULL);
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_group));
- if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_unassigned)) {
return rsc->allocated_to; // Assignment already done
}
- if (pcmk_is_set(rsc->flags, pe_rsc_allocating)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_assigning)) {
pe_rsc_debug(rsc, "Assignment dependency loop detected involving %s",
rsc->id);
return NULL;
@@ -44,33 +54,34 @@ pcmk__group_assign(pe_resource_t *rsc, const pe_node_t *prefer)
if (rsc->children == NULL) {
// No members to assign
- pe__clear_resource_flags(rsc, pe_rsc_provisional);
+ pe__clear_resource_flags(rsc, pcmk_rsc_unassigned);
return NULL;
}
- pe__set_resource_flags(rsc, pe_rsc_allocating);
- first_member = (pe_resource_t *) rsc->children->data;
+ pe__set_resource_flags(rsc, pcmk_rsc_assigning);
+ first_member = (pcmk_resource_t *) rsc->children->data;
rsc->role = first_member->role;
- pe__show_node_weights(!pcmk_is_set(rsc->cluster->flags, pe_flag_show_scores),
- rsc, __func__, rsc->allowed_nodes, rsc->cluster);
+ pe__show_node_scores(!pcmk_is_set(rsc->cluster->flags,
+ pcmk_sched_output_scores),
+ rsc, __func__, rsc->allowed_nodes, rsc->cluster);
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
- pe_resource_t *member = (pe_resource_t *) iter->data;
- pe_node_t *node = NULL;
+ pcmk_resource_t *member = (pcmk_resource_t *) iter->data;
+ pcmk_node_t *node = NULL;
pe_rsc_trace(rsc, "Assigning group %s member %s",
rsc->id, member->id);
- node = member->cmds->assign(member, prefer);
+ node = member->cmds->assign(member, prefer, stop_if_fail);
if (first_assigned_node == NULL) {
first_assigned_node = node;
}
}
pe__set_next_role(rsc, first_member->next_role, "first group member");
- pe__clear_resource_flags(rsc, pe_rsc_allocating|pe_rsc_provisional);
+ pe__clear_resource_flags(rsc, pcmk_rsc_assigning|pcmk_rsc_unassigned);
- if (!pe__group_flag_is_set(rsc, pe__group_colocated)) {
+ if (!pe__group_flag_is_set(rsc, pcmk__group_colocated)) {
return NULL;
}
return first_assigned_node;
@@ -85,12 +96,12 @@ pcmk__group_assign(pe_resource_t *rsc, const pe_node_t *prefer)
*
* \return Newly created pseudo-operation
*/
-static pe_action_t *
-create_group_pseudo_op(pe_resource_t *group, const char *action)
+static pcmk_action_t *
+create_group_pseudo_op(pcmk_resource_t *group, const char *action)
{
- pe_action_t *op = custom_action(group, pcmk__op_key(group->id, action, 0),
- action, NULL, TRUE, TRUE, group->cluster);
- pe__set_action_flags(op, pe_action_pseudo|pe_action_runnable);
+ pcmk_action_t *op = custom_action(group, pcmk__op_key(group->id, action, 0),
+ action, NULL, TRUE, group->cluster);
+ pe__set_action_flags(op, pcmk_action_pseudo|pcmk_action_runnable);
return op;
}
@@ -101,29 +112,29 @@ create_group_pseudo_op(pe_resource_t *group, const char *action)
* \param[in,out] rsc Group resource to create actions for
*/
void
-pcmk__group_create_actions(pe_resource_t *rsc)
+pcmk__group_create_actions(pcmk_resource_t *rsc)
{
- CRM_ASSERT(rsc != NULL);
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_group));
pe_rsc_trace(rsc, "Creating actions for group %s", rsc->id);
// Create actions for individual group members
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
- pe_resource_t *member = (pe_resource_t *) iter->data;
+ pcmk_resource_t *member = (pcmk_resource_t *) iter->data;
member->cmds->create_actions(member);
}
// Create pseudo-actions for group itself to serve as ordering points
- create_group_pseudo_op(rsc, RSC_START);
- create_group_pseudo_op(rsc, RSC_STARTED);
- create_group_pseudo_op(rsc, RSC_STOP);
- create_group_pseudo_op(rsc, RSC_STOPPED);
+ create_group_pseudo_op(rsc, PCMK_ACTION_START);
+ create_group_pseudo_op(rsc, PCMK_ACTION_RUNNING);
+ create_group_pseudo_op(rsc, PCMK_ACTION_STOP);
+ create_group_pseudo_op(rsc, PCMK_ACTION_STOPPED);
if (crm_is_true(g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_PROMOTABLE))) {
- create_group_pseudo_op(rsc, RSC_DEMOTE);
- create_group_pseudo_op(rsc, RSC_DEMOTED);
- create_group_pseudo_op(rsc, RSC_PROMOTE);
- create_group_pseudo_op(rsc, RSC_PROMOTED);
+ create_group_pseudo_op(rsc, PCMK_ACTION_DEMOTE);
+ create_group_pseudo_op(rsc, PCMK_ACTION_DEMOTED);
+ create_group_pseudo_op(rsc, PCMK_ACTION_PROMOTE);
+ create_group_pseudo_op(rsc, PCMK_ACTION_PROMOTED);
}
}
@@ -134,8 +145,8 @@ struct member_data {
bool colocated;
bool promotable;
- pe_resource_t *last_active;
- pe_resource_t *previous_member;
+ pcmk_resource_t *last_active;
+ pcmk_resource_t *previous_member;
};
/*!
@@ -148,14 +159,14 @@ struct member_data {
static void
member_internal_constraints(gpointer data, gpointer user_data)
{
- pe_resource_t *member = (pe_resource_t *) data;
+ pcmk_resource_t *member = (pcmk_resource_t *) data;
struct member_data *member_data = (struct member_data *) user_data;
// For ordering demote vs demote or stop vs stop
- uint32_t down_flags = pe_order_implies_first_printed;
+ uint32_t down_flags = pcmk__ar_then_implies_first_graphed;
// For ordering demote vs demoted or stop vs stopped
- uint32_t post_down_flags = pe_order_implies_then_printed;
+ uint32_t post_down_flags = pcmk__ar_first_implies_then_graphed;
// Create the individual member's implicit constraints
member->cmds->internal_constraints(member);
@@ -163,76 +174,85 @@ member_internal_constraints(gpointer data, gpointer user_data)
if (member_data->previous_member == NULL) {
// This is first member
if (member_data->ordered) {
- pe__set_order_flags(down_flags, pe_order_optional);
- post_down_flags = pe_order_implies_then;
+ pe__set_order_flags(down_flags, pcmk__ar_ordered);
+ post_down_flags = pcmk__ar_first_implies_then;
}
} else if (member_data->colocated) {
+ uint32_t flags = pcmk__coloc_none;
+
+ if (pcmk_is_set(member->flags, pcmk_rsc_critical)) {
+ flags |= pcmk__coloc_influence;
+ }
+
// Colocate this member with the previous one
- pcmk__new_colocation("group:internal_colocation", NULL, INFINITY,
- member, member_data->previous_member, NULL, NULL,
- pcmk_is_set(member->flags, pe_rsc_critical),
- member->cluster);
+ pcmk__new_colocation("#group-members", NULL, INFINITY, member,
+ member_data->previous_member, NULL, NULL, flags);
}
if (member_data->promotable) {
// Demote group -> demote member -> group is demoted
- pcmk__order_resource_actions(member->parent, RSC_DEMOTE,
- member, RSC_DEMOTE, down_flags);
- pcmk__order_resource_actions(member, RSC_DEMOTE,
- member->parent, RSC_DEMOTED,
+ pcmk__order_resource_actions(member->parent, PCMK_ACTION_DEMOTE,
+ member, PCMK_ACTION_DEMOTE, down_flags);
+ pcmk__order_resource_actions(member, PCMK_ACTION_DEMOTE,
+ member->parent, PCMK_ACTION_DEMOTED,
post_down_flags);
// Promote group -> promote member -> group is promoted
- pcmk__order_resource_actions(member, RSC_PROMOTE,
- member->parent, RSC_PROMOTED,
- pe_order_runnable_left
- |pe_order_implies_then
- |pe_order_implies_then_printed);
- pcmk__order_resource_actions(member->parent, RSC_PROMOTE,
- member, RSC_PROMOTE,
- pe_order_implies_first_printed);
+ pcmk__order_resource_actions(member, PCMK_ACTION_PROMOTE,
+ member->parent, PCMK_ACTION_PROMOTED,
+ pcmk__ar_unrunnable_first_blocks
+ |pcmk__ar_first_implies_then
+ |pcmk__ar_first_implies_then_graphed);
+ pcmk__order_resource_actions(member->parent, PCMK_ACTION_PROMOTE,
+ member, PCMK_ACTION_PROMOTE,
+ pcmk__ar_then_implies_first_graphed);
}
// Stop group -> stop member -> group is stopped
pcmk__order_stops(member->parent, member, down_flags);
- pcmk__order_resource_actions(member, RSC_STOP, member->parent, RSC_STOPPED,
+ pcmk__order_resource_actions(member, PCMK_ACTION_STOP,
+ member->parent, PCMK_ACTION_STOPPED,
post_down_flags);
// Start group -> start member -> group is started
- pcmk__order_starts(member->parent, member, pe_order_implies_first_printed);
- pcmk__order_resource_actions(member, RSC_START, member->parent, RSC_STARTED,
- pe_order_runnable_left
- |pe_order_implies_then
- |pe_order_implies_then_printed);
+ pcmk__order_starts(member->parent, member,
+ pcmk__ar_then_implies_first_graphed);
+ pcmk__order_resource_actions(member, PCMK_ACTION_START,
+ member->parent, PCMK_ACTION_RUNNING,
+ pcmk__ar_unrunnable_first_blocks
+ |pcmk__ar_first_implies_then
+ |pcmk__ar_first_implies_then_graphed);
if (!member_data->ordered) {
pcmk__order_starts(member->parent, member,
- pe_order_implies_then
- |pe_order_runnable_left
- |pe_order_implies_first_printed);
+ pcmk__ar_first_implies_then
+ |pcmk__ar_unrunnable_first_blocks
+ |pcmk__ar_then_implies_first_graphed);
if (member_data->promotable) {
- pcmk__order_resource_actions(member->parent, RSC_PROMOTE, member,
- RSC_PROMOTE,
- pe_order_implies_then
- |pe_order_runnable_left
- |pe_order_implies_first_printed);
+ pcmk__order_resource_actions(member->parent, PCMK_ACTION_PROMOTE,
+ member, PCMK_ACTION_PROMOTE,
+ pcmk__ar_first_implies_then
+ |pcmk__ar_unrunnable_first_blocks
+ |pcmk__ar_then_implies_first_graphed);
}
} else if (member_data->previous_member == NULL) {
- pcmk__order_starts(member->parent, member, pe_order_none);
+ pcmk__order_starts(member->parent, member, pcmk__ar_none);
if (member_data->promotable) {
- pcmk__order_resource_actions(member->parent, RSC_PROMOTE, member,
- RSC_PROMOTE, pe_order_none);
+ pcmk__order_resource_actions(member->parent, PCMK_ACTION_PROMOTE,
+ member, PCMK_ACTION_PROMOTE,
+ pcmk__ar_none);
}
} else {
// Order this member relative to the previous one
pcmk__order_starts(member_data->previous_member, member,
- pe_order_implies_then|pe_order_runnable_left);
+ pcmk__ar_first_implies_then
+ |pcmk__ar_unrunnable_first_blocks);
pcmk__order_stops(member, member_data->previous_member,
- pe_order_optional|pe_order_restart);
+ pcmk__ar_ordered|pcmk__ar_intermediate_stop);
/* In unusual circumstances (such as adding a new member to the middle
* of a group with unmanaged later members), this member may be active
@@ -242,20 +262,22 @@ member_internal_constraints(gpointer data, gpointer user_data)
*/
if ((member->running_on != NULL)
&& (member_data->previous_member->running_on == NULL)) {
- pcmk__order_resource_actions(member, RSC_STOP,
- member_data->previous_member, RSC_START,
- pe_order_implies_first
- |pe_order_runnable_left);
+ pcmk__order_resource_actions(member, PCMK_ACTION_STOP,
+ member_data->previous_member,
+ PCMK_ACTION_START,
+ pcmk__ar_then_implies_first
+ |pcmk__ar_unrunnable_first_blocks);
}
if (member_data->promotable) {
pcmk__order_resource_actions(member_data->previous_member,
- RSC_PROMOTE, member, RSC_PROMOTE,
- pe_order_implies_then
- |pe_order_runnable_left);
- pcmk__order_resource_actions(member, RSC_DEMOTE,
+ PCMK_ACTION_PROMOTE, member,
+ PCMK_ACTION_PROMOTE,
+ pcmk__ar_first_implies_then
+ |pcmk__ar_unrunnable_first_blocks);
+ pcmk__order_resource_actions(member, PCMK_ACTION_DEMOTE,
member_data->previous_member,
- RSC_DEMOTE, pe_order_optional);
+ PCMK_ACTION_DEMOTE, pcmk__ar_ordered);
}
}
@@ -265,7 +287,8 @@ member_internal_constraints(gpointer data, gpointer user_data)
&& (member_data->previous_member->running_on == NULL)
&& (member_data->last_active != NULL)
&& (member_data->last_active->running_on != NULL)) {
- pcmk__order_stops(member, member_data->last_active, pe_order_optional);
+ pcmk__order_stops(member, member_data->last_active,
+ pcmk__ar_ordered);
}
member_data->last_active = member;
}
@@ -280,35 +303,40 @@ member_internal_constraints(gpointer data, gpointer user_data)
* \param[in,out] rsc Group resource to create implicit constraints for
*/
void
-pcmk__group_internal_constraints(pe_resource_t *rsc)
+pcmk__group_internal_constraints(pcmk_resource_t *rsc)
{
struct member_data member_data = { false, };
+ const pcmk_resource_t *top = NULL;
- CRM_ASSERT(rsc != NULL);
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_group));
/* Order group pseudo-actions relative to each other for restarting:
* stop group -> group is stopped -> start group -> group is started
*/
- pcmk__order_resource_actions(rsc, RSC_STOP, rsc, RSC_STOPPED,
- pe_order_runnable_left);
- pcmk__order_resource_actions(rsc, RSC_STOPPED, rsc, RSC_START,
- pe_order_optional);
- pcmk__order_resource_actions(rsc, RSC_START, rsc, RSC_STARTED,
- pe_order_runnable_left);
-
- member_data.ordered = pe__group_flag_is_set(rsc, pe__group_ordered);
- member_data.colocated = pe__group_flag_is_set(rsc, pe__group_colocated);
- member_data.promotable = pcmk_is_set(pe__const_top_resource(rsc, false)->flags,
- pe_rsc_promotable);
+ pcmk__order_resource_actions(rsc, PCMK_ACTION_STOP,
+ rsc, PCMK_ACTION_STOPPED,
+ pcmk__ar_unrunnable_first_blocks);
+ pcmk__order_resource_actions(rsc, PCMK_ACTION_STOPPED,
+ rsc, PCMK_ACTION_START,
+ pcmk__ar_ordered);
+ pcmk__order_resource_actions(rsc, PCMK_ACTION_START,
+ rsc, PCMK_ACTION_RUNNING,
+ pcmk__ar_unrunnable_first_blocks);
+
+ top = pe__const_top_resource(rsc, false);
+
+ member_data.ordered = pe__group_flag_is_set(rsc, pcmk__group_ordered);
+ member_data.colocated = pe__group_flag_is_set(rsc, pcmk__group_colocated);
+ member_data.promotable = pcmk_is_set(top->flags, pcmk_rsc_promotable);
g_list_foreach(rsc->children, member_internal_constraints, &member_data);
}
/*!
* \internal
- * \brief Apply a colocation's score to node weights or resource priority
+ * \brief Apply a colocation's score to node scores or resource priority
*
* Given a colocation constraint for a group with some other resource, apply the
- * score to the dependent's allowed node weights (if we are still placing
+ * score to the dependent's allowed node scores (if we are still placing
* resources) or priority (if we are choosing promotable clone instance roles).
*
* \param[in,out] dependent Dependent group resource in colocation
@@ -316,10 +344,10 @@ pcmk__group_internal_constraints(pe_resource_t *rsc)
* \param[in] colocation Colocation constraint to apply
*/
static void
-colocate_group_with(pe_resource_t *dependent, const pe_resource_t *primary,
+colocate_group_with(pcmk_resource_t *dependent, const pcmk_resource_t *primary,
const pcmk__colocation_t *colocation)
{
- pe_resource_t *member = NULL;
+ pcmk_resource_t *member = NULL;
if (dependent->children == NULL) {
return;
@@ -328,9 +356,9 @@ colocate_group_with(pe_resource_t *dependent, const pe_resource_t *primary,
pe_rsc_trace(primary, "Processing %s (group %s with %s) for dependent",
colocation->id, dependent->id, primary->id);
- if (pe__group_flag_is_set(dependent, pe__group_colocated)) {
+ if (pe__group_flag_is_set(dependent, pcmk__group_colocated)) {
// Colocate first member (internal colocations will handle the rest)
- member = (pe_resource_t *) dependent->children->data;
+ member = (pcmk_resource_t *) dependent->children->data;
member->cmds->apply_coloc_score(member, primary, colocation, true);
return;
}
@@ -344,17 +372,17 @@ colocate_group_with(pe_resource_t *dependent, const pe_resource_t *primary,
// Colocate each member individually
for (GList *iter = dependent->children; iter != NULL; iter = iter->next) {
- member = (pe_resource_t *) iter->data;
+ member = (pcmk_resource_t *) iter->data;
member->cmds->apply_coloc_score(member, primary, colocation, true);
}
}
/*!
* \internal
- * \brief Apply a colocation's score to node weights or resource priority
+ * \brief Apply a colocation's score to node scores or resource priority
*
* Given a colocation constraint for some other resource with a group, apply the
- * score to the dependent's allowed node weights (if we are still placing
+ * score to the dependent's allowed node scores (if we are still placing
* resources) or priority (if we are choosing promotable clone instance roles).
*
* \param[in,out] dependent Dependent resource in colocation
@@ -362,20 +390,20 @@ colocate_group_with(pe_resource_t *dependent, const pe_resource_t *primary,
* \param[in] colocation Colocation constraint to apply
*/
static void
-colocate_with_group(pe_resource_t *dependent, const pe_resource_t *primary,
+colocate_with_group(pcmk_resource_t *dependent, const pcmk_resource_t *primary,
const pcmk__colocation_t *colocation)
{
- pe_resource_t *member = NULL;
+ const pcmk_resource_t *member = NULL;
pe_rsc_trace(primary,
"Processing colocation %s (%s with group %s) for primary",
colocation->id, dependent->id, primary->id);
- if (pcmk_is_set(primary->flags, pe_rsc_provisional)) {
+ if (pcmk_is_set(primary->flags, pcmk_rsc_unassigned)) {
return;
}
- if (pe__group_flag_is_set(primary, pe__group_colocated)) {
+ if (pe__group_flag_is_set(primary, pcmk__group_colocated)) {
if (colocation->score >= INFINITY) {
/* For mandatory colocations, the entire group must be assignable
@@ -388,7 +416,7 @@ colocate_with_group(pe_resource_t *dependent, const pe_resource_t *primary,
* up doesn't matter, so apply the colocation based on the first
* member.
*/
- member = (pe_resource_t *) primary->children->data;
+ member = (pcmk_resource_t *) primary->children->data;
}
if (member == NULL) {
return; // Nothing to colocate with
@@ -406,18 +434,19 @@ colocate_with_group(pe_resource_t *dependent, const pe_resource_t *primary,
}
// Colocate dependent with each member individually
- for (GList *iter = primary->children; iter != NULL; iter = iter->next) {
- member = (pe_resource_t *) iter->data;
+ for (const GList *iter = primary->children; iter != NULL;
+ iter = iter->next) {
+ member = iter->data;
member->cmds->apply_coloc_score(dependent, member, colocation, false);
}
}
/*!
* \internal
- * \brief Apply a colocation's score to node weights or resource priority
+ * \brief Apply a colocation's score to node scores or resource priority
*
* Given a colocation constraint, apply its score to the dependent's
- * allowed node weights (if we are still placing resources) or priority (if
+ * allowed node scores (if we are still placing resources) or priority (if
* we are choosing promotable clone instance roles).
*
* \param[in,out] dependent Dependent resource in colocation
@@ -426,8 +455,8 @@ colocate_with_group(pe_resource_t *dependent, const pe_resource_t *primary,
* \param[in] for_dependent true if called on behalf of dependent
*/
void
-pcmk__group_apply_coloc_score(pe_resource_t *dependent,
- const pe_resource_t *primary,
+pcmk__group_apply_coloc_score(pcmk_resource_t *dependent,
+ const pcmk_resource_t *primary,
const pcmk__colocation_t *colocation,
bool for_dependent)
{
@@ -439,7 +468,7 @@ pcmk__group_apply_coloc_score(pe_resource_t *dependent,
} else {
// Method should only be called for primitive dependents
- CRM_ASSERT(dependent->variant == pe_native);
+ CRM_ASSERT(dependent->variant == pcmk_rsc_variant_primitive);
colocate_with_group(dependent, primary, colocation);
}
@@ -454,62 +483,61 @@ pcmk__group_apply_coloc_score(pe_resource_t *dependent,
*
* \return Flags appropriate to \p action on \p node
*/
-enum pe_action_flags
-pcmk__group_action_flags(pe_action_t *action, const pe_node_t *node)
+uint32_t
+pcmk__group_action_flags(pcmk_action_t *action, const pcmk_node_t *node)
{
// Default flags for a group action
- enum pe_action_flags flags = pe_action_optional
- |pe_action_runnable
- |pe_action_pseudo;
+ uint32_t flags = pcmk_action_optional
+ |pcmk_action_runnable
+ |pcmk_action_pseudo;
CRM_ASSERT(action != NULL);
// Update flags considering each member's own flags for same action
for (GList *iter = action->rsc->children; iter != NULL; iter = iter->next) {
- pe_resource_t *member = (pe_resource_t *) iter->data;
+ pcmk_resource_t *member = (pcmk_resource_t *) iter->data;
// Check whether member has the same action
enum action_tasks task = get_complex_task(member, action->task);
const char *task_s = task2text(task);
- pe_action_t *member_action = find_first_action(member->actions, NULL,
- task_s, node);
+ pcmk_action_t *member_action = find_first_action(member->actions, NULL,
+ task_s, node);
if (member_action != NULL) {
- enum pe_action_flags member_flags;
-
- member_flags = member->cmds->action_flags(member_action, node);
+ uint32_t member_flags = member->cmds->action_flags(member_action,
+ node);
// Group action is mandatory if any member action is
- if (pcmk_is_set(flags, pe_action_optional)
- && !pcmk_is_set(member_flags, pe_action_optional)) {
+ if (pcmk_is_set(flags, pcmk_action_optional)
+ && !pcmk_is_set(member_flags, pcmk_action_optional)) {
pe_rsc_trace(action->rsc, "%s is mandatory because %s is",
action->uuid, member_action->uuid);
pe__clear_raw_action_flags(flags, "group action",
- pe_action_optional);
- pe__clear_action_flags(action, pe_action_optional);
+ pcmk_action_optional);
+ pe__clear_action_flags(action, pcmk_action_optional);
}
// Group action is unrunnable if any member action is
if (!pcmk__str_eq(task_s, action->task, pcmk__str_none)
- && pcmk_is_set(flags, pe_action_runnable)
- && !pcmk_is_set(member_flags, pe_action_runnable)) {
+ && pcmk_is_set(flags, pcmk_action_runnable)
+ && !pcmk_is_set(member_flags, pcmk_action_runnable)) {
pe_rsc_trace(action->rsc, "%s is unrunnable because %s is",
action->uuid, member_action->uuid);
pe__clear_raw_action_flags(flags, "group action",
- pe_action_runnable);
- pe__clear_action_flags(action, pe_action_runnable);
+ pcmk_action_runnable);
+ pe__clear_action_flags(action, pcmk_action_runnable);
}
/* Group (pseudo-)actions other than stop or demote are unrunnable
* unless every member will do it.
*/
- } else if ((task != stop_rsc) && (task != action_demote)) {
+ } else if ((task != pcmk_action_stop) && (task != pcmk_action_demote)) {
pe_rsc_trace(action->rsc,
"%s is not runnable because %s will not %s",
action->uuid, member->id, task_s);
pe__clear_raw_action_flags(flags, "group action",
- pe_action_runnable);
+ pcmk_action_runnable);
}
}
@@ -524,49 +552,48 @@ pcmk__group_action_flags(pe_action_t *action, const pe_node_t *node)
* (and runnable_before members if appropriate) as appropriate for the ordering.
* Effects may cascade to other orderings involving the actions as well.
*
- * \param[in,out] first 'First' action in an ordering
- * \param[in,out] then 'Then' action in an ordering
- * \param[in] node If not NULL, limit scope of ordering to this node
- * (only used when interleaving instances)
- * \param[in] flags Action flags for \p first for ordering purposes
- * \param[in] filter Action flags to limit scope of certain updates (may
- * include pe_action_optional to affect only mandatory
- * actions, and pe_action_runnable to affect only
- * runnable actions)
- * \param[in] type Group of enum pe_ordering flags to apply
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] first 'First' action in an ordering
+ * \param[in,out] then 'Then' action in an ordering
+ * \param[in] node If not NULL, limit scope of ordering to this node
+ * (only used when interleaving instances)
+ * \param[in] flags Action flags for \p first for ordering purposes
+ * \param[in] filter Action flags to limit scope of certain updates (may
+ * include pcmk_action_optional to affect only
+ * mandatory actions, and pcmk_action_runnable to
+ * affect only runnable actions)
+ * \param[in] type Group of enum pcmk__action_relation_flags to apply
+ * \param[in,out] scheduler Scheduler data
*
* \return Group of enum pcmk__updated flags indicating what was updated
*/
uint32_t
-pcmk__group_update_ordered_actions(pe_action_t *first, pe_action_t *then,
- const pe_node_t *node, uint32_t flags,
+pcmk__group_update_ordered_actions(pcmk_action_t *first, pcmk_action_t *then,
+ const pcmk_node_t *node, uint32_t flags,
uint32_t filter, uint32_t type,
- pe_working_set_t *data_set)
+ pcmk_scheduler_t *scheduler)
{
uint32_t changed = pcmk__updated_none;
- CRM_ASSERT((first != NULL) && (then != NULL) && (data_set != NULL));
-
- // Group method can be called only for group action as "then" action
- CRM_ASSERT(then->rsc != NULL);
+ // Group method can be called only on behalf of "then" action
+ CRM_ASSERT((first != NULL) && (then != NULL) && (then->rsc != NULL)
+ && (scheduler != NULL));
// Update the actions for the group itself
changed |= pcmk__update_ordered_actions(first, then, node, flags, filter,
- type, data_set);
+ type, scheduler);
// Update the actions for each group member
for (GList *iter = then->rsc->children; iter != NULL; iter = iter->next) {
- pe_resource_t *member = (pe_resource_t *) iter->data;
+ pcmk_resource_t *member = (pcmk_resource_t *) iter->data;
- pe_action_t *member_action = find_first_action(member->actions, NULL,
- then->task, node);
+ pcmk_action_t *member_action = find_first_action(member->actions, NULL,
+ then->task, node);
if (member_action != NULL) {
changed |= member->cmds->update_ordered_actions(first,
member_action, node,
flags, filter, type,
- data_set);
+ scheduler);
}
}
return changed;
@@ -580,24 +607,25 @@ pcmk__group_update_ordered_actions(pe_action_t *first, pe_action_t *then,
* \param[in,out] location Location constraint to apply
*/
void
-pcmk__group_apply_location(pe_resource_t *rsc, pe__location_t *location)
+pcmk__group_apply_location(pcmk_resource_t *rsc, pe__location_t *location)
{
GList *node_list_orig = NULL;
GList *node_list_copy = NULL;
bool reset_scores = true;
- CRM_ASSERT((rsc != NULL) && (location != NULL));
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_group)
+ && (location != NULL));
node_list_orig = location->node_list_rh;
node_list_copy = pcmk__copy_node_list(node_list_orig, true);
- reset_scores = pe__group_flag_is_set(rsc, pe__group_colocated);
+ reset_scores = pe__group_flag_is_set(rsc, pcmk__group_colocated);
// Apply the constraint for the group itself (updates node scores)
pcmk__apply_location(rsc, location);
// Apply the constraint for each member
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
- pe_resource_t *member = (pe_resource_t *) iter->data;
+ pcmk_resource_t *member = (pcmk_resource_t *) iter->data;
member->cmds->apply_location(member, location);
@@ -615,21 +643,21 @@ pcmk__group_apply_location(pe_resource_t *rsc, pe__location_t *location)
g_list_free_full(node_list_copy, free);
}
-// Group implementation of resource_alloc_functions_t:colocated_resources()
+// Group implementation of pcmk_assignment_methods_t:colocated_resources()
GList *
-pcmk__group_colocated_resources(const pe_resource_t *rsc,
- const pe_resource_t *orig_rsc,
+pcmk__group_colocated_resources(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc,
GList *colocated_rscs)
{
- const pe_resource_t *member = NULL;
+ const pcmk_resource_t *member = NULL;
- CRM_ASSERT(rsc != NULL);
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_group));
if (orig_rsc == NULL) {
orig_rsc = rsc;
}
- if (pe__group_flag_is_set(rsc, pe__group_colocated)
+ if (pe__group_flag_is_set(rsc, pcmk__group_colocated)
|| pe_rsc_is_clone(rsc->parent)) {
/* This group has colocated members and/or is cloned -- either way,
* add every child's colocated resources to the list. The first and last
@@ -639,7 +667,7 @@ pcmk__group_colocated_resources(const pe_resource_t *rsc,
for (const GList *iter = rsc->children;
iter != NULL; iter = iter->next) {
- member = (const pe_resource_t *) iter->data;
+ member = (const pcmk_resource_t *) iter->data;
colocated_rscs = member->cmds->colocated_resources(member, orig_rsc,
colocated_rscs);
}
@@ -648,21 +676,21 @@ pcmk__group_colocated_resources(const pe_resource_t *rsc,
/* This group's members are not colocated, and the group is not cloned,
* so just add the group's own colocations to the list.
*/
- colocated_rscs = pcmk__colocated_resources(rsc, orig_rsc, colocated_rscs);
+ colocated_rscs = pcmk__colocated_resources(rsc, orig_rsc,
+ colocated_rscs);
}
return colocated_rscs;
}
-// Group implementation of resource_alloc_functions_t:with_this_colocations()
+// Group implementation of pcmk_assignment_methods_t:with_this_colocations()
void
-pcmk__with_group_colocations(const pe_resource_t *rsc,
- const pe_resource_t *orig_rsc, GList **list)
+pcmk__with_group_colocations(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc, GList **list)
{
- CRM_CHECK((rsc != NULL) && (rsc->variant == pe_group)
- && (orig_rsc != NULL) && (list != NULL),
- return);
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_group)
+ && (orig_rsc != NULL) && (list != NULL));
// Ignore empty groups
if (rsc->children == NULL) {
@@ -670,46 +698,85 @@ pcmk__with_group_colocations(const pe_resource_t *rsc,
}
/* "With this" colocations are needed only for the group itself and for its
- * last member. Add the group's colocations plus any relevant
- * parent colocations if cloned.
+ * last member. (Previous members will chain via the group internal
+ * colocations.)
*/
- if ((rsc == orig_rsc) || (orig_rsc == pe__last_group_member(rsc))) {
- crm_trace("Adding 'with %s' colocations to list for %s",
- rsc->id, orig_rsc->id);
- pcmk__add_with_this_list(list, rsc->rsc_cons_lhs);
- if (rsc->parent != NULL) { // Cloned group
- rsc->parent->cmds->with_this_colocations(rsc->parent, orig_rsc,
- list);
+ if ((orig_rsc != rsc) && (orig_rsc != pe__last_group_member(rsc))) {
+ return;
+ }
+
+ pe_rsc_trace(rsc, "Adding 'with %s' colocations to list for %s",
+ rsc->id, orig_rsc->id);
+
+ // Add the group's own colocations
+ pcmk__add_with_this_list(list, rsc->rsc_cons_lhs, orig_rsc);
+
+ // If cloned, add any relevant colocations with the clone
+ if (rsc->parent != NULL) {
+ rsc->parent->cmds->with_this_colocations(rsc->parent, orig_rsc,
+ list);
+ }
+
+ if (!pe__group_flag_is_set(rsc, pcmk__group_colocated)) {
+ // @COMPAT Non-colocated groups are deprecated
+ return;
+ }
+
+ // Add explicit colocations with the group's (other) children
+ for (const GList *iter = rsc->children; iter != NULL; iter = iter->next) {
+ const pcmk_resource_t *member = iter->data;
+
+ if (member != orig_rsc) {
+ member->cmds->with_this_colocations(member, orig_rsc, list);
}
}
}
-// Group implementation of resource_alloc_functions_t:this_with_colocations()
+// Group implementation of pcmk_assignment_methods_t:this_with_colocations()
void
-pcmk__group_with_colocations(const pe_resource_t *rsc,
- const pe_resource_t *orig_rsc, GList **list)
+pcmk__group_with_colocations(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc, GList **list)
{
- CRM_CHECK((rsc != NULL) && (rsc->variant == pe_group)
- && (orig_rsc != NULL) && (list != NULL),
- return);
+ const pcmk_resource_t *member = NULL;
+
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_group)
+ && (orig_rsc != NULL) && (list != NULL));
// Ignore empty groups
if (rsc->children == NULL) {
return;
}
- /* Colocations for the group itself, or for its first member, consist of the
- * group's colocations plus any relevant parent colocations if cloned.
+ /* "This with" colocations are normally needed only for the group itself and
+ * for its first member.
*/
if ((rsc == orig_rsc)
- || (orig_rsc == (const pe_resource_t *) rsc->children->data)) {
- crm_trace("Adding '%s with' colocations to list for %s",
- rsc->id, orig_rsc->id);
- pcmk__add_this_with_list(list, rsc->rsc_cons);
- if (rsc->parent != NULL) { // Cloned group
+ || (orig_rsc == (const pcmk_resource_t *) rsc->children->data)) {
+ pe_rsc_trace(rsc, "Adding '%s with' colocations to list for %s",
+ rsc->id, orig_rsc->id);
+
+ // Add the group's own colocations
+ pcmk__add_this_with_list(list, rsc->rsc_cons, orig_rsc);
+
+ // If cloned, add any relevant colocations involving the clone
+ if (rsc->parent != NULL) {
rsc->parent->cmds->this_with_colocations(rsc->parent, orig_rsc,
list);
}
+
+ if (!pe__group_flag_is_set(rsc, pcmk__group_colocated)) {
+ // @COMPAT Non-colocated groups are deprecated
+ return;
+ }
+
+ // Add explicit colocations involving the group's (other) children
+ for (const GList *iter = rsc->children;
+ iter != NULL; iter = iter->next) {
+ member = iter->data;
+ if (member != orig_rsc) {
+ member->cmds->this_with_colocations(member, orig_rsc, list);
+ }
+ }
return;
}
@@ -718,14 +785,13 @@ pcmk__group_with_colocations(const pe_resource_t *rsc,
* However, if an earlier group member is unmanaged, this chaining will not
* happen, so the group's mandatory colocations must be explicitly added.
*/
- for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
- const pe_resource_t *member = (const pe_resource_t *) iter->data;
-
+ for (const GList *iter = rsc->children; iter != NULL; iter = iter->next) {
+ member = iter->data;
if (orig_rsc == member) {
break; // We've seen all earlier members, and none are unmanaged
}
- if (!pcmk_is_set(member->flags, pe_rsc_managed)) {
+ if (!pcmk_is_set(member->flags, pcmk_rsc_managed)) {
crm_trace("Adding mandatory '%s with' colocations to list for "
"member %s because earlier member %s is unmanaged",
rsc->id, orig_rsc->id, member->id);
@@ -735,7 +801,7 @@ pcmk__group_with_colocations(const pe_resource_t *rsc,
colocation = (const pcmk__colocation_t *) cons_iter->data;
if (colocation->score == INFINITY) {
- pcmk__add_this_with(list, colocation);
+ pcmk__add_this_with(list, colocation, orig_rsc);
}
}
// @TODO Add mandatory (or all?) clone constraints if cloned
@@ -752,38 +818,57 @@ pcmk__group_with_colocations(const pe_resource_t *rsc,
* scores of the best nodes matching the attribute used for each of the
* resource's relevant colocations.
*
- * \param[in,out] rsc Resource to check colocations for
- * \param[in] log_id Resource ID to use in logs (if NULL, use \p rsc ID)
- * \param[in,out] nodes Nodes to update
- * \param[in] attr Colocation attribute (NULL to use default)
- * \param[in] factor Incorporate scores multiplied by this factor
- * \param[in] flags Bitmask of enum pcmk__coloc_select values
+ * \param[in,out] source_rsc Group resource whose node scores to add
+ * \param[in] target_rsc Resource on whose behalf to update \p *nodes
+ * \param[in] log_id Resource ID for logs (if \c NULL, use
+ * \p source_rsc ID)
+ * \param[in,out] nodes Nodes to update (set initial contents to \c NULL
+ * to copy allowed nodes from \p source_rsc)
+ * \param[in] colocation Original colocation constraint (used to get
+ * configured primary resource's stickiness, and
+ * to get colocation node attribute; if \c NULL,
+ * <tt>source_rsc</tt>'s own matching node scores will
+ * not be added, and \p *nodes must be \c NULL as
+ * well)
+ * \param[in] factor Incorporate scores multiplied by this factor
+ * \param[in] flags Bitmask of enum pcmk__coloc_select values
*
+ * \note \c NULL \p target_rsc, \c NULL \p *nodes, \c NULL \p colocation, and
+ * the \c pcmk__coloc_select_this_with flag are used together (and only by
+ * \c cmp_resources()).
* \note The caller remains responsible for freeing \p *nodes.
+ * \note This is the group implementation of
+ * \c pcmk_assignment_methods_t:add_colocated_node_scores().
*/
void
-pcmk__group_add_colocated_node_scores(pe_resource_t *rsc, const char *log_id,
- GHashTable **nodes, const char *attr,
+pcmk__group_add_colocated_node_scores(pcmk_resource_t *source_rsc,
+ const pcmk_resource_t *target_rsc,
+ const char *log_id, GHashTable **nodes,
+ const pcmk__colocation_t *colocation,
float factor, uint32_t flags)
{
- pe_resource_t *member = NULL;
+ pcmk_resource_t *member = NULL;
- CRM_CHECK((rsc != NULL) && (nodes != NULL), return);
+ CRM_ASSERT((source_rsc != NULL)
+ && (source_rsc->variant == pcmk_rsc_variant_group)
+ && (nodes != NULL)
+ && ((colocation != NULL)
+ || ((target_rsc == NULL) && (*nodes == NULL))));
if (log_id == NULL) {
- log_id = rsc->id;
+ log_id = source_rsc->id;
}
// Avoid infinite recursion
- if (pcmk_is_set(rsc->flags, pe_rsc_merging)) {
- pe_rsc_info(rsc, "%s: Breaking dependency loop at %s",
- log_id, rsc->id);
+ if (pcmk_is_set(source_rsc->flags, pcmk_rsc_updating_nodes)) {
+ pe_rsc_info(source_rsc, "%s: Breaking dependency loop at %s",
+ log_id, source_rsc->id);
return;
}
- pe__set_resource_flags(rsc, pe_rsc_merging);
+ pe__set_resource_flags(source_rsc, pcmk_rsc_updating_nodes);
// Ignore empty groups (only possible with schema validation disabled)
- if (rsc->children == NULL) {
+ if (source_rsc->children == NULL) {
return;
}
@@ -798,40 +883,41 @@ pcmk__group_add_colocated_node_scores(pe_resource_t *rsc, const char *log_id,
* For "with this" colocations, the first member works similarly.
*/
if (*nodes == NULL) {
- member = pe__last_group_member(rsc);
+ member = pe__last_group_member(source_rsc);
} else {
- member = rsc->children->data;
+ member = source_rsc->children->data;
}
- pe_rsc_trace(rsc, "%s: Merging scores from group %s using member %s "
- "(at %.6f)", log_id, rsc->id, member->id, factor);
- member->cmds->add_colocated_node_scores(member, log_id, nodes, attr, factor,
- flags);
- pe__clear_resource_flags(rsc, pe_rsc_merging);
+ pe_rsc_trace(source_rsc, "%s: Merging scores from group %s using member %s "
+ "(at %.6f)", log_id, source_rsc->id, member->id, factor);
+ member->cmds->add_colocated_node_scores(member, target_rsc, log_id, nodes,
+ colocation, factor, flags);
+ pe__clear_resource_flags(source_rsc, pcmk_rsc_updating_nodes);
}
-// Group implementation of resource_alloc_functions_t:add_utilization()
+// Group implementation of pcmk_assignment_methods_t:add_utilization()
void
-pcmk__group_add_utilization(const pe_resource_t *rsc,
- const pe_resource_t *orig_rsc, GList *all_rscs,
+pcmk__group_add_utilization(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc, GList *all_rscs,
GHashTable *utilization)
{
- pe_resource_t *member = NULL;
+ pcmk_resource_t *member = NULL;
- CRM_ASSERT((rsc != NULL) && (orig_rsc != NULL) && (utilization != NULL));
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_group)
+ && (orig_rsc != NULL) && (utilization != NULL));
- if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_unassigned)) {
return;
}
pe_rsc_trace(orig_rsc, "%s: Adding group %s as colocated utilization",
orig_rsc->id, rsc->id);
- if (pe__group_flag_is_set(rsc, pe__group_colocated)
+ if (pe__group_flag_is_set(rsc, pcmk__group_colocated)
|| pe_rsc_is_clone(rsc->parent)) {
// Every group member will be on same node, so sum all members
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
- member = (pe_resource_t *) iter->data;
+ member = (pcmk_resource_t *) iter->data;
- if (pcmk_is_set(member->flags, pe_rsc_provisional)
+ if (pcmk_is_set(member->flags, pcmk_rsc_unassigned)
&& (g_list_find(all_rscs, member) == NULL)) {
member->cmds->add_utilization(member, orig_rsc, all_rscs,
utilization);
@@ -840,9 +926,9 @@ pcmk__group_add_utilization(const pe_resource_t *rsc,
} else if (rsc->children != NULL) {
// Just add first member's utilization
- member = (pe_resource_t *) rsc->children->data;
+ member = (pcmk_resource_t *) rsc->children->data;
if ((member != NULL)
- && pcmk_is_set(member->flags, pe_rsc_provisional)
+ && pcmk_is_set(member->flags, pcmk_rsc_unassigned)
&& (g_list_find(all_rscs, member) == NULL)) {
member->cmds->add_utilization(member, orig_rsc, all_rscs,
@@ -851,14 +937,13 @@ pcmk__group_add_utilization(const pe_resource_t *rsc,
}
}
-// Group implementation of resource_alloc_functions_t:shutdown_lock()
void
-pcmk__group_shutdown_lock(pe_resource_t *rsc)
+pcmk__group_shutdown_lock(pcmk_resource_t *rsc)
{
- CRM_ASSERT(rsc != NULL);
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_group));
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
- pe_resource_t *member = (pe_resource_t *) iter->data;
+ pcmk_resource_t *member = (pcmk_resource_t *) iter->data;
member->cmds->shutdown_lock(member);
}
diff --git a/lib/pacemaker/pcmk_sched_instances.c b/lib/pacemaker/pcmk_sched_instances.c
index c880196..4667845 100644
--- a/lib/pacemaker/pcmk_sched_instances.c
+++ b/lib/pacemaker/pcmk_sched_instances.c
@@ -18,44 +18,6 @@
/*!
* \internal
- * \brief Check whether a clone or bundle has instances for all available nodes
- *
- * \param[in] collective Clone or bundle to check
- *
- * \return true if \p collective has enough instances for all of its available
- * allowed nodes, otherwise false
- */
-static bool
-can_run_everywhere(const pe_resource_t *collective)
-{
- GHashTableIter iter;
- pe_node_t *node = NULL;
- int available_nodes = 0;
- int max_instances = 0;
-
- switch (collective->variant) {
- case pe_clone:
- max_instances = pe__clone_max(collective);
- break;
- case pe_container:
- max_instances = pe__bundle_max(collective);
- break;
- default:
- return false; // Not actually possible
- }
-
- g_hash_table_iter_init(&iter, collective->allowed_nodes);
- while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) {
- if (pcmk__node_available(node, false, false)
- && (max_instances < ++available_nodes)) {
- return false;
- }
- }
- return true;
-}
-
-/*!
- * \internal
* \brief Check whether a node is allowed to run an instance
*
* \param[in] instance Clone instance or bundle container to check
@@ -65,12 +27,12 @@ can_run_everywhere(const pe_resource_t *collective)
* \return true if \p node is allowed to run \p instance, otherwise false
*/
static bool
-can_run_instance(const pe_resource_t *instance, const pe_node_t *node,
+can_run_instance(const pcmk_resource_t *instance, const pcmk_node_t *node,
int max_per_node)
{
- pe_node_t *allowed_node = NULL;
+ pcmk_node_t *allowed_node = NULL;
- if (pcmk_is_set(instance->flags, pe_rsc_orphan)) {
+ if (pcmk_is_set(instance->flags, pcmk_rsc_removed)) {
pe_rsc_trace(instance, "%s cannot run on %s: orphaned",
instance->id, pe__node_name(node));
return false;
@@ -118,11 +80,11 @@ can_run_instance(const pe_resource_t *instance, const pe_node_t *node,
* \param[in] max_per_node Maximum instances allowed to run on a node
*/
static void
-ban_unavailable_allowed_nodes(pe_resource_t *instance, int max_per_node)
+ban_unavailable_allowed_nodes(pcmk_resource_t *instance, int max_per_node)
{
if (instance->allowed_nodes != NULL) {
GHashTableIter iter;
- pe_node_t *node = NULL;
+ pcmk_node_t *node = NULL;
g_hash_table_iter_init(&iter, instance->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
@@ -132,11 +94,11 @@ ban_unavailable_allowed_nodes(pe_resource_t *instance, int max_per_node)
node->weight = -INFINITY;
for (GList *child_iter = instance->children;
child_iter != NULL; child_iter = child_iter->next) {
- pe_resource_t *child = (pe_resource_t *) child_iter->data;
- pe_node_t *child_node = NULL;
+ pcmk_resource_t *child = child_iter->data;
+ pcmk_node_t *child_node = NULL;
- child_node = pe_hash_table_lookup(child->allowed_nodes,
- node->details->id);
+ child_node = g_hash_table_lookup(child->allowed_nodes,
+ node->details->id);
if (child_node != NULL) {
pe_rsc_trace(instance,
"Banning %s child %s "
@@ -162,7 +124,7 @@ ban_unavailable_allowed_nodes(pe_resource_t *instance, int max_per_node)
* g_hash_table_destroy().
*/
static GHashTable *
-new_node_table(pe_node_t *node)
+new_node_table(pcmk_node_t *node)
{
GHashTable *table = pcmk__strkey_table(NULL, free);
@@ -179,38 +141,35 @@ new_node_table(pe_node_t *node)
* \param[in,out] nodes Node table to apply colocations to
*/
static void
-apply_parent_colocations(const pe_resource_t *rsc, GHashTable **nodes)
+apply_parent_colocations(const pcmk_resource_t *rsc, GHashTable **nodes)
{
- GList *iter = NULL;
- pcmk__colocation_t *colocation = NULL;
- pe_resource_t *other = NULL;
- float factor = 0.0;
+ GList *colocations = pcmk__this_with_colocations(rsc);
- /* Because the this_with_colocations() and with_this_colocations() methods
- * boil down to copies of rsc_cons and rsc_cons_lhs for clones and bundles,
- * we can use those here directly for efficiency.
- */
- for (iter = rsc->parent->rsc_cons; iter != NULL; iter = iter->next) {
- colocation = (pcmk__colocation_t *) iter->data;
- other = colocation->primary;
- factor = colocation->score / (float) INFINITY,
- other->cmds->add_colocated_node_scores(other, rsc->id, nodes,
- colocation->node_attribute,
- factor,
+ for (const GList *iter = colocations; iter != NULL; iter = iter->next) {
+ const pcmk__colocation_t *colocation = iter->data;
+ pcmk_resource_t *other = colocation->primary;
+ float factor = colocation->score / (float) INFINITY;
+
+ other->cmds->add_colocated_node_scores(other, rsc, rsc->id, nodes,
+ colocation, factor,
pcmk__coloc_select_default);
}
- for (iter = rsc->parent->rsc_cons_lhs; iter != NULL; iter = iter->next) {
- colocation = (pcmk__colocation_t *) iter->data;
+ g_list_free(colocations);
+ colocations = pcmk__with_this_colocations(rsc);
+
+ for (const GList *iter = colocations; iter != NULL; iter = iter->next) {
+ const pcmk__colocation_t *colocation = iter->data;
+ pcmk_resource_t *other = colocation->dependent;
+ float factor = colocation->score / (float) INFINITY;
+
if (!pcmk__colocation_has_influence(colocation, rsc)) {
continue;
}
- other = colocation->dependent;
- factor = colocation->score / (float) INFINITY,
- other->cmds->add_colocated_node_scores(other, rsc->id, nodes,
- colocation->node_attribute,
- factor,
+ other->cmds->add_colocated_node_scores(other, rsc, rsc->id, nodes,
+ colocation, factor,
pcmk__coloc_select_nonnegative);
}
+ g_list_free(colocations);
}
/*!
@@ -229,14 +188,14 @@ apply_parent_colocations(const pe_resource_t *rsc, GHashTable **nodes)
* or 0 if assignment order doesn't matter
*/
static int
-cmp_instance_by_colocation(const pe_resource_t *instance1,
- const pe_resource_t *instance2)
+cmp_instance_by_colocation(const pcmk_resource_t *instance1,
+ const pcmk_resource_t *instance2)
{
int rc = 0;
- pe_node_t *node1 = NULL;
- pe_node_t *node2 = NULL;
- pe_node_t *current_node1 = pe__current_node(instance1);
- pe_node_t *current_node2 = pe__current_node(instance2);
+ pcmk_node_t *node1 = NULL;
+ pcmk_node_t *node2 = NULL;
+ pcmk_node_t *current_node1 = pe__current_node(instance1);
+ pcmk_node_t *current_node2 = pe__current_node(instance2);
GHashTable *colocated_scores1 = NULL;
GHashTable *colocated_scores2 = NULL;
@@ -284,13 +243,13 @@ cmp_instance_by_colocation(const pe_resource_t *instance1,
* \return true if \p rsc or any of its children are failed, otherwise false
*/
static bool
-did_fail(const pe_resource_t *rsc)
+did_fail(const pcmk_resource_t *rsc)
{
- if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_failed)) {
return true;
}
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
- if (did_fail((const pe_resource_t *) iter->data)) {
+ if (did_fail((const pcmk_resource_t *) iter->data)) {
return true;
}
}
@@ -307,11 +266,12 @@ did_fail(const pe_resource_t *rsc)
* \return true if *node is either NULL or allowed for \p rsc, otherwise false
*/
static bool
-node_is_allowed(const pe_resource_t *rsc, pe_node_t **node)
+node_is_allowed(const pcmk_resource_t *rsc, pcmk_node_t **node)
{
if (*node != NULL) {
- pe_node_t *allowed = pe_hash_table_lookup(rsc->allowed_nodes,
- (*node)->details->id);
+ pcmk_node_t *allowed = g_hash_table_lookup(rsc->allowed_nodes,
+ (*node)->details->id);
+
if ((allowed == NULL) || (allowed->weight < 0)) {
pe_rsc_trace(rsc, "%s: current location (%s) is unavailable",
rsc->id, pe__node_name(*node));
@@ -336,8 +296,8 @@ node_is_allowed(const pe_resource_t *rsc, pe_node_t **node)
gint
pcmk__cmp_instance_number(gconstpointer a, gconstpointer b)
{
- const pe_resource_t *instance1 = (const pe_resource_t *) a;
- const pe_resource_t *instance2 = (const pe_resource_t *) b;
+ const pcmk_resource_t *instance1 = (const pcmk_resource_t *) a;
+ const pcmk_resource_t *instance2 = (const pcmk_resource_t *) b;
char *div1 = NULL;
char *div2 = NULL;
@@ -386,16 +346,16 @@ gint
pcmk__cmp_instance(gconstpointer a, gconstpointer b)
{
int rc = 0;
- pe_node_t *node1 = NULL;
- pe_node_t *node2 = NULL;
+ pcmk_node_t *node1 = NULL;
+ pcmk_node_t *node2 = NULL;
unsigned int nnodes1 = 0;
unsigned int nnodes2 = 0;
bool can1 = true;
bool can2 = true;
- const pe_resource_t *instance1 = (const pe_resource_t *) a;
- const pe_resource_t *instance2 = (const pe_resource_t *) b;
+ const pcmk_resource_t *instance1 = (const pcmk_resource_t *) a;
+ const pcmk_resource_t *instance2 = (const pcmk_resource_t *) b;
CRM_ASSERT((instance1 != NULL) && (instance2 != NULL));
@@ -547,7 +507,41 @@ pcmk__cmp_instance(gconstpointer a, gconstpointer b)
/*!
* \internal
- * \brief Choose a node for an instance
+ * \brief Increment the parent's instance count after assigning an instance
+ *
+ * An instance's parent tracks how many instances have been assigned to each
+ * node via its pcmk_node_t:count member. After assigning an instance to a node,
+ * find the corresponding node in the parent's allowed table and increment it.
+ *
+ * \param[in,out] instance Instance whose parent to update
+ * \param[in] assigned_to Node to which the instance was assigned
+ */
+static void
+increment_parent_count(pcmk_resource_t *instance,
+ const pcmk_node_t *assigned_to)
+{
+ pcmk_node_t *allowed = NULL;
+
+ if (assigned_to == NULL) {
+ return;
+ }
+ allowed = pcmk__top_allowed_node(instance, assigned_to);
+
+ if (allowed == NULL) {
+ /* The instance is allowed on the node, but its parent isn't. This
+ * shouldn't be possible if the resource is managed, and we won't be
+ * able to limit the number of instances assigned to the node.
+ */
+ CRM_LOG_ASSERT(!pcmk_is_set(instance->flags, pcmk_rsc_managed));
+
+ } else {
+ allowed->count++;
+ }
+}
+
+/*!
+ * \internal
+ * \brief Assign an instance to a node
*
* \param[in,out] instance Clone instance or bundle replica container
* \param[in] prefer If not NULL, attempt early assignment to this
@@ -555,84 +549,153 @@ pcmk__cmp_instance(gconstpointer a, gconstpointer b)
* perform final assignment
* \param[in] max_per_node Assign at most this many instances to one node
*
- * \return true if \p instance could be assigned to a node, otherwise false
+ * \return Node to which \p instance is assigned
*/
-static bool
-assign_instance(pe_resource_t *instance, const pe_node_t *prefer,
+static const pcmk_node_t *
+assign_instance(pcmk_resource_t *instance, const pcmk_node_t *prefer,
int max_per_node)
{
- pe_node_t *chosen = NULL;
- pe_node_t *allowed = NULL;
+ pcmk_node_t *chosen = NULL;
- CRM_ASSERT(instance != NULL);
pe_rsc_trace(instance, "Assigning %s (preferring %s)", instance->id,
((prefer == NULL)? "no node" : prefer->details->uname));
- if (!pcmk_is_set(instance->flags, pe_rsc_provisional)) {
- // Instance is already assigned
- return instance->fns->location(instance, NULL, FALSE) != NULL;
- }
-
- if (pcmk_is_set(instance->flags, pe_rsc_allocating)) {
+ if (pcmk_is_set(instance->flags, pcmk_rsc_assigning)) {
pe_rsc_debug(instance,
"Assignment loop detected involving %s colocations",
instance->id);
+ return NULL;
+ }
+ ban_unavailable_allowed_nodes(instance, max_per_node);
+
+ // Failed early assignments are reversible (stop_if_fail=false)
+ chosen = instance->cmds->assign(instance, prefer, (prefer == NULL));
+ increment_parent_count(instance, chosen);
+ return chosen;
+}
+
+/*!
+ * \internal
+ * \brief Try to assign an instance to its current node early
+ *
+ * \param[in] rsc Clone or bundle being assigned (for logs only)
+ * \param[in] instance Clone instance or bundle replica container
+ * \param[in] current Instance's current node
+ * \param[in] max_per_node Maximum number of instances per node
+ * \param[in] available Number of instances still available for assignment
+ *
+ * \return \c true if \p instance was successfully assigned to its current node,
+ * or \c false otherwise
+ */
+static bool
+assign_instance_early(const pcmk_resource_t *rsc, pcmk_resource_t *instance,
+ const pcmk_node_t *current, int max_per_node,
+ int available)
+{
+ const pcmk_node_t *chosen = NULL;
+ int reserved = 0;
+
+ pcmk_resource_t *parent = instance->parent;
+ GHashTable *allowed_orig = NULL;
+ GHashTable *allowed_orig_parent = parent->allowed_nodes;
+ const pcmk_node_t *allowed_node = NULL;
+
+ pe_rsc_trace(instance, "Trying to assign %s to its current node %s",
+ instance->id, pe__node_name(current));
+
+ allowed_node = g_hash_table_lookup(instance->allowed_nodes,
+ current->details->id);
+ if (!pcmk__node_available(allowed_node, true, false)) {
+ pe_rsc_info(instance,
+ "Not assigning %s to current node %s: unavailable",
+ instance->id, pe__node_name(current));
return false;
}
- if (prefer != NULL) { // Possible early assignment to preferred node
+ /* On each iteration, if instance gets assigned to a node other than its
+ * current one, we reserve one instance for the chosen node, unassign
+ * instance, restore instance's original node tables, and try again. This
+ * way, instances are proportionally assigned to nodes based on preferences,
+ * but shuffling of specific instances is minimized. If a node will be
+ * assigned instances at all, it preferentially receives instances that are
+ * currently active there.
+ *
+ * parent->allowed_nodes tracks the number of instances assigned to each
+ * node. If a node already has max_per_node instances assigned,
+ * ban_unavailable_allowed_nodes() marks it as unavailable.
+ *
+ * In the end, we restore the original parent->allowed_nodes to undo the
+ * changes to counts during tentative assignments. If we successfully
+ * assigned instance to its current node, we increment that node's counter.
+ */
+
+ // Back up the allowed node tables of instance and its children recursively
+ pcmk__copy_node_tables(instance, &allowed_orig);
- // Get preferred node with instance's scores
- allowed = g_hash_table_lookup(instance->allowed_nodes,
- prefer->details->id);
+ // Update instances-per-node counts in a scratch table
+ parent->allowed_nodes = pcmk__copy_node_table(parent->allowed_nodes);
- if ((allowed == NULL) || (allowed->weight < 0)) {
- pe_rsc_trace(instance,
- "Not assigning %s to preferred node %s: unavailable",
- instance->id, pe__node_name(prefer));
- return false;
+ while (reserved < available) {
+ chosen = assign_instance(instance, current, max_per_node);
+
+ if (pe__same_node(chosen, current)) {
+ // Successfully assigned to current node
+ break;
}
- }
- ban_unavailable_allowed_nodes(instance, max_per_node);
+ // Assignment updates scores, so restore to original state
+ pe_rsc_debug(instance, "Rolling back node scores for %s", instance->id);
+ pcmk__restore_node_tables(instance, allowed_orig);
- if (prefer == NULL) { // Final assignment
- chosen = instance->cmds->assign(instance, NULL);
+ if (chosen == NULL) {
+ // Assignment failed, so give up
+ pe_rsc_info(instance,
+ "Not assigning %s to current node %s: unavailable",
+ instance->id, pe__node_name(current));
+ pe__set_resource_flags(instance, pcmk_rsc_unassigned);
+ break;
+ }
- } else { // Possible early assignment to preferred node
- GHashTable *backup = pcmk__copy_node_table(instance->allowed_nodes);
+ // We prefer more strongly to assign an instance to the chosen node
+ pe_rsc_debug(instance,
+ "Not assigning %s to current node %s: %s is better",
+ instance->id, pe__node_name(current),
+ pe__node_name(chosen));
- chosen = instance->cmds->assign(instance, prefer);
+ // Reserve one instance for the chosen node and try again
+ if (++reserved >= available) {
+ pe_rsc_info(instance,
+ "Not assigning %s to current node %s: "
+ "other assignments are more important",
+ instance->id, pe__node_name(current));
- // Revert nodes if preferred node won't be assigned
- if ((chosen != NULL) && (chosen->details != prefer->details)) {
- crm_info("Not assigning %s to preferred node %s: %s is better",
- instance->id, pe__node_name(prefer),
- pe__node_name(chosen));
- g_hash_table_destroy(instance->allowed_nodes);
- instance->allowed_nodes = backup;
- pcmk__unassign_resource(instance);
- chosen = NULL;
- } else if (backup != NULL) {
- g_hash_table_destroy(backup);
+ } else {
+ pe_rsc_debug(instance,
+ "Reserved an instance of %s for %s. Retrying "
+ "assignment of %s to %s",
+ rsc->id, pe__node_name(chosen), instance->id,
+ pe__node_name(current));
}
+
+ // Clear this assignment (frees chosen); leave instance counts in parent
+ pcmk__unassign_resource(instance);
+ chosen = NULL;
}
- // The parent tracks how many instances have been assigned to each node
- if (chosen != NULL) {
- allowed = pcmk__top_allowed_node(instance, chosen);
- if (allowed == NULL) {
- /* The instance is allowed on the node, but its parent isn't. This
- * shouldn't be possible if the resource is managed, and we won't be
- * able to limit the number of instances assigned to the node.
- */
- CRM_LOG_ASSERT(!pcmk_is_set(instance->flags, pe_rsc_managed));
+ g_hash_table_destroy(allowed_orig);
- } else {
- allowed->count++;
- }
+ // Restore original instances-per-node counts
+ g_hash_table_destroy(parent->allowed_nodes);
+ parent->allowed_nodes = allowed_orig_parent;
+
+ if (chosen == NULL) {
+ // Couldn't assign instance to current node
+ return false;
}
- return chosen != NULL;
+ pe_rsc_trace(instance, "Assigned %s to current node %s",
+ instance->id, pe__node_name(current));
+ increment_parent_count(instance, chosen);
+ return true;
}
/*!
@@ -644,10 +707,10 @@ assign_instance(pe_resource_t *instance, const pe_node_t *prefer,
* \return Number of nodes that are available to run resources
*/
static unsigned int
-reset_allowed_node_counts(pe_resource_t *rsc)
+reset_allowed_node_counts(pcmk_resource_t *rsc)
{
unsigned int available_nodes = 0;
- pe_node_t *node = NULL;
+ pcmk_node_t *node = NULL;
GHashTableIter iter;
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
@@ -664,30 +727,28 @@ reset_allowed_node_counts(pe_resource_t *rsc)
* \internal
* \brief Check whether an instance has a preferred node
*
- * \param[in] rsc Clone or bundle being assigned (for logs only)
* \param[in] instance Clone instance or bundle replica container
* \param[in] optimal_per_node Optimal number of instances per node
*
* \return Instance's current node if still available, otherwise NULL
*/
-static const pe_node_t *
-preferred_node(const pe_resource_t *rsc, const pe_resource_t *instance,
- int optimal_per_node)
+static const pcmk_node_t *
+preferred_node(const pcmk_resource_t *instance, int optimal_per_node)
{
- const pe_node_t *node = NULL;
- const pe_node_t *parent_node = NULL;
+ const pcmk_node_t *node = NULL;
+ const pcmk_node_t *parent_node = NULL;
// Check whether instance is active, healthy, and not yet assigned
if ((instance->running_on == NULL)
- || !pcmk_is_set(instance->flags, pe_rsc_provisional)
- || pcmk_is_set(instance->flags, pe_rsc_failed)) {
+ || !pcmk_is_set(instance->flags, pcmk_rsc_unassigned)
+ || pcmk_is_set(instance->flags, pcmk_rsc_failed)) {
return NULL;
}
// Check whether instance's current node can run resources
node = pe__current_node(instance);
if (!pcmk__node_available(node, true, false)) {
- pe_rsc_trace(rsc, "Not assigning %s to %s early (unavailable)",
+ pe_rsc_trace(instance, "Not assigning %s to %s early (unavailable)",
instance->id, pe__node_name(node));
return NULL;
}
@@ -695,7 +756,7 @@ preferred_node(const pe_resource_t *rsc, const pe_resource_t *instance,
// Check whether node already has optimal number of instances assigned
parent_node = pcmk__top_allowed_node(instance, node);
if ((parent_node != NULL) && (parent_node->count >= optimal_per_node)) {
- pe_rsc_trace(rsc,
+ pe_rsc_trace(instance,
"Not assigning %s to %s early "
"(optimal instances already assigned)",
instance->id, pe__node_name(node));
@@ -715,7 +776,7 @@ preferred_node(const pe_resource_t *rsc, const pe_resource_t *instance,
* \param[in] max_per_node Maximum instances to assign to any one node
*/
void
-pcmk__assign_instances(pe_resource_t *collective, GList *instances,
+pcmk__assign_instances(pcmk_resource_t *collective, GList *instances,
int max_total, int max_per_node)
{
// Reuse node count to track number of assigned instances
@@ -724,8 +785,8 @@ pcmk__assign_instances(pe_resource_t *collective, GList *instances,
int optimal_per_node = 0;
int assigned = 0;
GList *iter = NULL;
- pe_resource_t *instance = NULL;
- const pe_node_t *current = NULL;
+ pcmk_resource_t *instance = NULL;
+ const pcmk_node_t *current = NULL;
if (available_nodes > 0) {
optimal_per_node = max_total / available_nodes;
@@ -744,13 +805,17 @@ pcmk__assign_instances(pe_resource_t *collective, GList *instances,
// Assign as many instances as possible to their current location
for (iter = instances; (iter != NULL) && (assigned < max_total);
iter = iter->next) {
- instance = (pe_resource_t *) iter->data;
+ int available = max_total - assigned;
+
+ instance = iter->data;
+ if (!pcmk_is_set(instance->flags, pcmk_rsc_unassigned)) {
+ continue; // Already assigned
+ }
- current = preferred_node(collective, instance, optimal_per_node);
+ current = preferred_node(instance, optimal_per_node);
if ((current != NULL)
- && assign_instance(instance, current, max_per_node)) {
- pe_rsc_trace(collective, "Assigned %s to current node %s",
- instance->id, pe__node_name(current));
+ && assign_instance_early(collective, instance, current,
+ max_per_node, available)) {
assigned++;
}
}
@@ -759,9 +824,9 @@ pcmk__assign_instances(pe_resource_t *collective, GList *instances,
assigned, max_total, pcmk__plural_s(max_total));
for (iter = instances; iter != NULL; iter = iter->next) {
- instance = (pe_resource_t *) iter->data;
+ instance = (pcmk_resource_t *) iter->data;
- if (!pcmk_is_set(instance->flags, pe_rsc_provisional)) {
+ if (!pcmk_is_set(instance->flags, pcmk_rsc_unassigned)) {
continue; // Already assigned
}
@@ -770,7 +835,7 @@ pcmk__assign_instances(pe_resource_t *collective, GList *instances,
if (pcmk__top_allowed_node(instance, current) == NULL) {
const char *unmanaged = "";
- if (!pcmk_is_set(instance->flags, pe_rsc_managed)) {
+ if (!pcmk_is_set(instance->flags, pcmk_rsc_managed)) {
unmanaged = "Unmanaged resource ";
}
crm_notice("%s%s is running on %s which is no longer allowed",
@@ -786,7 +851,7 @@ pcmk__assign_instances(pe_resource_t *collective, GList *instances,
resource_location(instance, NULL, -INFINITY,
"collective_limit_reached", collective->cluster);
- } else if (assign_instance(instance, NULL, max_per_node)) {
+ } else if (assign_instance(instance, NULL, max_per_node) != NULL) {
assigned++;
}
}
@@ -821,7 +886,7 @@ enum instance_state {
* \param[in,out] state Whether any instance is starting, stopping, etc.
*/
static void
-check_instance_state(const pe_resource_t *instance, uint32_t *state)
+check_instance_state(const pcmk_resource_t *instance, uint32_t *state)
{
const GList *iter = NULL;
uint32_t instance_state = 0; // State of just this instance
@@ -832,11 +897,11 @@ check_instance_state(const pe_resource_t *instance, uint32_t *state)
}
// If instance is a collective (a cloned group), check its children instead
- if (instance->variant > pe_native) {
+ if (instance->variant > pcmk_rsc_variant_primitive) {
for (iter = instance->children;
(iter != NULL) && !pcmk_all_flags_set(*state, instance_all);
iter = iter->next) {
- check_instance_state((const pe_resource_t *) iter->data, state);
+ check_instance_state((const pcmk_resource_t *) iter->data, state);
}
return;
}
@@ -854,11 +919,13 @@ check_instance_state(const pe_resource_t *instance, uint32_t *state)
|instance_stopping);
iter = iter->next) {
- const pe_action_t *action = (const pe_action_t *) iter->data;
- const bool optional = pcmk_is_set(action->flags, pe_action_optional);
+ const pcmk_action_t *action = (const pcmk_action_t *) iter->data;
+ const bool optional = pcmk_is_set(action->flags, pcmk_action_optional);
+
+ if (pcmk__str_eq(PCMK_ACTION_START, action->task, pcmk__str_none)) {
+ if (!optional
+ && pcmk_is_set(action->flags, pcmk_action_runnable)) {
- if (pcmk__str_eq(RSC_START, action->task, pcmk__str_none)) {
- if (!optional && pcmk_is_set(action->flags, pe_action_runnable)) {
pe_rsc_trace(instance, "Instance is starting due to %s",
action->uuid);
instance_state |= instance_starting;
@@ -868,14 +935,15 @@ check_instance_state(const pe_resource_t *instance, uint32_t *state)
(optional? "optional" : "unrunnable"));
}
- } else if (pcmk__str_eq(RSC_STOP, action->task, pcmk__str_none)) {
+ } else if (pcmk__str_eq(PCMK_ACTION_STOP, action->task,
+ pcmk__str_none)) {
/* Only stop actions can be pseudo-actions for primitives. That
* indicates that the node they are on is being fenced, so the stop
* is implied rather than actually executed.
*/
if (!optional
- && pcmk_any_flags_set(action->flags,
- pe_action_pseudo|pe_action_runnable)) {
+ && pcmk_any_flags_set(action->flags, pcmk_action_pseudo
+ |pcmk_action_runnable)) {
pe_rsc_trace(instance, "Instance is stopping due to %s",
action->uuid);
instance_state |= instance_stopping;
@@ -902,52 +970,52 @@ check_instance_state(const pe_resource_t *instance, uint32_t *state)
* \param[in,out] instances List of clone instances or bundle containers
*/
void
-pcmk__create_instance_actions(pe_resource_t *collective, GList *instances)
+pcmk__create_instance_actions(pcmk_resource_t *collective, GList *instances)
{
uint32_t state = 0;
- pe_action_t *stop = NULL;
- pe_action_t *stopped = NULL;
+ pcmk_action_t *stop = NULL;
+ pcmk_action_t *stopped = NULL;
- pe_action_t *start = NULL;
- pe_action_t *started = NULL;
+ pcmk_action_t *start = NULL;
+ pcmk_action_t *started = NULL;
pe_rsc_trace(collective, "Creating collective instance actions for %s",
collective->id);
// Create actions for each instance appropriate to its variant
for (GList *iter = instances; iter != NULL; iter = iter->next) {
- pe_resource_t *instance = (pe_resource_t *) iter->data;
+ pcmk_resource_t *instance = (pcmk_resource_t *) iter->data;
instance->cmds->create_actions(instance);
check_instance_state(instance, &state);
}
// Create pseudo-actions for rsc start and started
- start = pe__new_rsc_pseudo_action(collective, RSC_START,
+ start = pe__new_rsc_pseudo_action(collective, PCMK_ACTION_START,
!pcmk_is_set(state, instance_starting),
true);
- started = pe__new_rsc_pseudo_action(collective, RSC_STARTED,
+ started = pe__new_rsc_pseudo_action(collective, PCMK_ACTION_RUNNING,
!pcmk_is_set(state, instance_starting),
false);
started->priority = INFINITY;
if (pcmk_any_flags_set(state, instance_active|instance_starting)) {
- pe__set_action_flags(started, pe_action_runnable);
+ pe__set_action_flags(started, pcmk_action_runnable);
}
// Create pseudo-actions for rsc stop and stopped
- stop = pe__new_rsc_pseudo_action(collective, RSC_STOP,
+ stop = pe__new_rsc_pseudo_action(collective, PCMK_ACTION_STOP,
!pcmk_is_set(state, instance_stopping),
true);
- stopped = pe__new_rsc_pseudo_action(collective, RSC_STOPPED,
+ stopped = pe__new_rsc_pseudo_action(collective, PCMK_ACTION_STOPPED,
!pcmk_is_set(state, instance_stopping),
true);
stopped->priority = INFINITY;
if (!pcmk_is_set(state, instance_restarting)) {
- pe__set_action_flags(stop, pe_action_migrate_runnable);
+ pe__set_action_flags(stop, pcmk_action_migratable);
}
- if (collective->variant == pe_clone) {
+ if (collective->variant == pcmk_rsc_variant_clone) {
pe__create_clone_notif_pseudo_ops(collective, start, started, stop,
stopped);
}
@@ -965,9 +1033,9 @@ pcmk__create_instance_actions(pe_resource_t *collective, GList *instances)
* is no longer needed.
*/
static inline GList *
-get_instance_list(const pe_resource_t *rsc)
+get_instance_list(const pcmk_resource_t *rsc)
{
- if (rsc->variant == pe_container) {
+ if (rsc->variant == pcmk_rsc_variant_bundle) {
return pe__bundle_containers(rsc);
} else {
return rsc->children;
@@ -982,7 +1050,7 @@ get_instance_list(const pe_resource_t *rsc)
* \param[in,out] list Return value of get_instance_list() for \p rsc
*/
static inline void
-free_instance_list(const pe_resource_t *rsc, GList *list)
+free_instance_list(const pcmk_resource_t *rsc, GList *list)
{
if (list != rsc->children) {
g_list_free(list);
@@ -995,7 +1063,7 @@ free_instance_list(const pe_resource_t *rsc, GList *list)
*
* \param[in] instance Clone instance or bundle replica container
* \param[in] node Instance must match this node
- * \param[in] role If not RSC_ROLE_UNKNOWN, instance must match this role
+ * \param[in] role If not pcmk_role_unknown, instance must match this role
* \param[in] current If true, compare instance's original node and role,
* otherwise compare assigned next node and role
*
@@ -1003,14 +1071,14 @@ free_instance_list(const pe_resource_t *rsc, GList *list)
* otherwise false
*/
bool
-pcmk__instance_matches(const pe_resource_t *instance, const pe_node_t *node,
+pcmk__instance_matches(const pcmk_resource_t *instance, const pcmk_node_t *node,
enum rsc_role_e role, bool current)
{
- pe_node_t *instance_node = NULL;
+ pcmk_node_t *instance_node = NULL;
CRM_CHECK((instance != NULL) && (node != NULL), return false);
- if ((role != RSC_ROLE_UNKNOWN)
+ if ((role != pcmk_role_unknown)
&& (role != instance->fns->state(instance, current))) {
pe_rsc_trace(instance,
"%s is not a compatible instance (role is not %s)",
@@ -1018,7 +1086,7 @@ pcmk__instance_matches(const pe_resource_t *instance, const pe_node_t *node,
return false;
}
- if (!is_set_recursive(instance, pe_rsc_block, true)) {
+ if (!is_set_recursive(instance, pcmk_rsc_blocked, true)) {
// We only want instances that haven't failed
instance_node = instance->fns->location(instance, NULL, current);
}
@@ -1030,7 +1098,7 @@ pcmk__instance_matches(const pe_resource_t *instance, const pe_node_t *node,
return false;
}
- if (instance_node->details != node->details) {
+ if (!pe__same_node(instance_node, node)) {
pe_rsc_trace(instance,
"%s is not a compatible instance (assigned to %s not %s)",
instance->id, pe__node_name(instance_node),
@@ -1048,27 +1116,28 @@ pcmk__instance_matches(const pe_resource_t *instance, const pe_node_t *node,
* \param[in] match_rsc Resource that instance must match (for logging only)
* \param[in] rsc Clone or bundle resource to check for matching instance
* \param[in] node Instance must match this node
- * \param[in] role If not RSC_ROLE_UNKNOWN, instance must match this role
+ * \param[in] role If not pcmk_role_unknown, instance must match this role
* \param[in] current If true, compare instance's original node and role,
* otherwise compare assigned next node and role
*
* \return \p rsc instance matching \p node and \p role if any, otherwise NULL
*/
-static pe_resource_t *
-find_compatible_instance_on_node(const pe_resource_t *match_rsc,
- const pe_resource_t *rsc,
- const pe_node_t *node, enum rsc_role_e role,
+static pcmk_resource_t *
+find_compatible_instance_on_node(const pcmk_resource_t *match_rsc,
+ const pcmk_resource_t *rsc,
+ const pcmk_node_t *node, enum rsc_role_e role,
bool current)
{
GList *instances = NULL;
instances = get_instance_list(rsc);
for (GList *iter = instances; iter != NULL; iter = iter->next) {
- pe_resource_t *instance = (pe_resource_t *) iter->data;
+ pcmk_resource_t *instance = (pcmk_resource_t *) iter->data;
if (pcmk__instance_matches(instance, node, role, current)) {
- pe_rsc_trace(match_rsc, "Found %s %s instance %s compatible with %s on %s",
- role == RSC_ROLE_UNKNOWN? "matching" : role2text(role),
+ pe_rsc_trace(match_rsc,
+ "Found %s %s instance %s compatible with %s on %s",
+ role == pcmk_role_unknown? "matching" : role2text(role),
rsc->id, instance->id, match_rsc->id,
pe__node_name(node));
free_instance_list(rsc, instances); // Only frees list, not contents
@@ -1078,7 +1147,7 @@ find_compatible_instance_on_node(const pe_resource_t *match_rsc,
free_instance_list(rsc, instances);
pe_rsc_trace(match_rsc, "No %s %s instance found compatible with %s on %s",
- ((role == RSC_ROLE_UNKNOWN)? "matching" : role2text(role)),
+ ((role == pcmk_role_unknown)? "matching" : role2text(role)),
rsc->id, match_rsc->id, pe__node_name(node));
return NULL;
}
@@ -1089,23 +1158,24 @@ find_compatible_instance_on_node(const pe_resource_t *match_rsc,
*
* \param[in] match_rsc Resource that instance must match
* \param[in] rsc Clone or bundle resource to check for matching instance
- * \param[in] role If not RSC_ROLE_UNKNOWN, instance must match this role
+ * \param[in] role If not pcmk_role_unknown, instance must match this role
* \param[in] current If true, compare instance's original node and role,
* otherwise compare assigned next node and role
*
* \return Compatible (by \p role and \p match_rsc location) instance of \p rsc
* if any, otherwise NULL
*/
-pe_resource_t *
-pcmk__find_compatible_instance(const pe_resource_t *match_rsc,
- const pe_resource_t *rsc, enum rsc_role_e role,
+pcmk_resource_t *
+pcmk__find_compatible_instance(const pcmk_resource_t *match_rsc,
+ const pcmk_resource_t *rsc, enum rsc_role_e role,
bool current)
{
- pe_resource_t *instance = NULL;
+ pcmk_resource_t *instance = NULL;
GList *nodes = NULL;
- const pe_node_t *node = match_rsc->fns->location(match_rsc, NULL, current);
+ const pcmk_node_t *node = NULL;
// If match_rsc has a node, check only that node
+ node = match_rsc->fns->location(match_rsc, NULL, current);
if (node != NULL) {
return find_compatible_instance_on_node(match_rsc, rsc, node, role,
current);
@@ -1117,7 +1187,7 @@ pcmk__find_compatible_instance(const pe_resource_t *match_rsc,
for (GList *iter = nodes; (iter != NULL) && (instance == NULL);
iter = iter->next) {
instance = find_compatible_instance_on_node(match_rsc, rsc,
- (pe_node_t *) iter->data,
+ (pcmk_node_t *) iter->data,
role, current);
}
@@ -1136,14 +1206,15 @@ pcmk__find_compatible_instance(const pe_resource_t *match_rsc,
* \param[in] first 'First' action in an ordering
* \param[in] then 'Then' action in an ordering
* \param[in,out] then_instance 'Then' instance that has no interleave match
- * \param[in] type Group of enum pe_ordering flags to apply
+ * \param[in] type Group of enum pcmk__action_relation_flags
* \param[in] current If true, "then" action is stopped or demoted
*
* \return true if \p then_instance was unassigned, otherwise false
*/
static bool
-unassign_if_mandatory(const pe_action_t *first, const pe_action_t *then,
- pe_resource_t *then_instance, uint32_t type, bool current)
+unassign_if_mandatory(const pcmk_action_t *first, const pcmk_action_t *then,
+ pcmk_resource_t *then_instance, uint32_t type,
+ bool current)
{
// Allow "then" instance to go down even without an interleave match
if (current) {
@@ -1155,13 +1226,13 @@ unassign_if_mandatory(const pe_action_t *first, const pe_action_t *then,
/* If the "first" action must be runnable, but there is no "first"
* instance, the "then" instance must not be allowed to come up.
*/
- } else if (pcmk_any_flags_set(type, pe_order_runnable_left
- |pe_order_implies_then)) {
+ } else if (pcmk_any_flags_set(type, pcmk__ar_unrunnable_first_blocks
+ |pcmk__ar_first_implies_then)) {
pe_rsc_info(then->rsc,
"Inhibiting %s from being active "
"because there is no %s instance to interleave",
then_instance->id, first->rsc->id);
- return pcmk__assign_resource(then_instance, NULL, true);
+ return pcmk__assign_resource(then_instance, NULL, true, true);
}
return false;
}
@@ -1181,13 +1252,13 @@ unassign_if_mandatory(const pe_action_t *first, const pe_action_t *then,
* bundle container, its containerized resource) that matches
* \p action_name and \p node if any, otherwise NULL
*/
-static pe_action_t *
-find_instance_action(const pe_action_t *action, const pe_resource_t *instance,
- const char *action_name, const pe_node_t *node,
+static pcmk_action_t *
+find_instance_action(const pcmk_action_t *action, const pcmk_resource_t *instance,
+ const char *action_name, const pcmk_node_t *node,
bool for_first)
{
- const pe_resource_t *rsc = NULL;
- pe_action_t *matching_action = NULL;
+ const pcmk_resource_t *rsc = NULL;
+ pcmk_action_t *matching_action = NULL;
/* If instance is a bundle container, sometimes we should interleave the
* action for the container itself, and sometimes for the containerized
@@ -1204,15 +1275,15 @@ find_instance_action(const pe_action_t *action, const pe_resource_t *instance,
* everything except promote and demote (which can only be performed on the
* containerized resource).
*/
- if ((for_first && !pcmk__str_any_of(action->task, CRMD_ACTION_STOP,
- CRMD_ACTION_STOPPED, NULL))
+ if ((for_first && !pcmk__str_any_of(action->task, PCMK_ACTION_STOP,
+ PCMK_ACTION_STOPPED, NULL))
- || (!for_first && pcmk__str_any_of(action->task, CRMD_ACTION_PROMOTE,
- CRMD_ACTION_PROMOTED,
- CRMD_ACTION_DEMOTE,
- CRMD_ACTION_DEMOTED, NULL))) {
+ || (!for_first && pcmk__str_any_of(action->task, PCMK_ACTION_PROMOTE,
+ PCMK_ACTION_PROMOTED,
+ PCMK_ACTION_DEMOTE,
+ PCMK_ACTION_DEMOTED, NULL))) {
- rsc = pcmk__get_rsc_in_container(instance);
+ rsc = pe__get_rsc_in_container(instance);
}
if (rsc == NULL) {
rsc = instance; // No containerized resource, use instance itself
@@ -1225,11 +1296,12 @@ find_instance_action(const pe_action_t *action, const pe_resource_t *instance,
return matching_action;
}
- if (pcmk_is_set(instance->flags, pe_rsc_orphan)
- || pcmk__str_any_of(action_name, RSC_STOP, RSC_DEMOTE, NULL)) {
+ if (pcmk_is_set(instance->flags, pcmk_rsc_removed)
+ || pcmk__str_any_of(action_name, PCMK_ACTION_STOP, PCMK_ACTION_DEMOTE,
+ NULL)) {
crm_trace("No %s action found for %s%s",
action_name,
- pcmk_is_set(instance->flags, pe_rsc_orphan)? "orphan " : "",
+ pcmk_is_set(instance->flags, pcmk_rsc_removed)? "orphan " : "",
instance->id);
} else {
crm_err("No %s action found for %s to interleave (bug?)",
@@ -1252,20 +1324,23 @@ find_instance_action(const pe_action_t *action, const pe_resource_t *instance,
* \return Original action name for \p action
*/
static const char *
-orig_action_name(const pe_action_t *action)
+orig_action_name(const pcmk_action_t *action)
{
- const pe_resource_t *instance = action->rsc->children->data; // Any instance
+ // Any instance will do
+ const pcmk_resource_t *instance = action->rsc->children->data;
+
char *action_type = NULL;
const char *action_name = action->task;
- enum action_tasks orig_task = no_action;
+ enum action_tasks orig_task = pcmk_action_unspecified;
- if (pcmk__strcase_any_of(action->task, CRMD_ACTION_NOTIFY,
- CRMD_ACTION_NOTIFIED, NULL)) {
+ if (pcmk__strcase_any_of(action->task, PCMK_ACTION_NOTIFY,
+ PCMK_ACTION_NOTIFIED, NULL)) {
// action->uuid is RSC_(confirmed-){pre,post}_notify_ACTION_INTERVAL
CRM_CHECK(parse_op_key(action->uuid, NULL, &action_type, NULL),
- return task2text(no_action));
+ return task2text(pcmk_action_unspecified));
action_name = strstr(action_type, "_notify_");
- CRM_CHECK(action_name != NULL, return task2text(no_action));
+ CRM_CHECK(action_name != NULL,
+ return task2text(pcmk_action_unspecified));
action_name += strlen("_notify_");
}
orig_task = get_complex_task(instance, action_name);
@@ -1286,16 +1361,16 @@ orig_action_name(const pe_action_t *action)
* \param[in,out] then 'Then' action in an ordering
* \param[in] node If not NULL, limit scope of ordering to this node
* \param[in] filter Action flags to limit scope of certain updates (may
- * include pe_action_optional to affect only mandatory
- * actions, and pe_action_runnable to affect only
- * runnable actions)
- * \param[in] type Group of enum pe_ordering flags to apply
+ * include pcmk_action_optional to affect only
+ * mandatory actions, and pcmk_action_runnable to
+ * affect only runnable actions)
+ * \param[in] type Group of enum pcmk__action_relation_flags to apply
*
* \return Group of enum pcmk__updated flags indicating what was updated
*/
static uint32_t
-update_interleaved_actions(pe_action_t *first, pe_action_t *then,
- const pe_node_t *node, uint32_t filter,
+update_interleaved_actions(pcmk_action_t *first, pcmk_action_t *then,
+ const pcmk_node_t *node, uint32_t filter,
uint32_t type)
{
GList *instances = NULL;
@@ -1303,23 +1378,23 @@ update_interleaved_actions(pe_action_t *first, pe_action_t *then,
const char *orig_first_task = orig_action_name(first);
// Stops and demotes must be interleaved with instance on current node
- bool current = pcmk__ends_with(first->uuid, "_" CRMD_ACTION_STOPPED "_0")
+ bool current = pcmk__ends_with(first->uuid, "_" PCMK_ACTION_STOPPED "_0")
|| pcmk__ends_with(first->uuid,
- "_" CRMD_ACTION_DEMOTED "_0");
+ "_" PCMK_ACTION_DEMOTED "_0");
// Update the specified actions for each "then" instance individually
instances = get_instance_list(then->rsc);
for (GList *iter = instances; iter != NULL; iter = iter->next) {
- pe_resource_t *first_instance = NULL;
- pe_resource_t *then_instance = iter->data;
+ pcmk_resource_t *first_instance = NULL;
+ pcmk_resource_t *then_instance = iter->data;
- pe_action_t *first_action = NULL;
- pe_action_t *then_action = NULL;
+ pcmk_action_t *first_action = NULL;
+ pcmk_action_t *then_action = NULL;
// Find a "first" instance to interleave with this "then" instance
first_instance = pcmk__find_compatible_instance(then_instance,
first->rsc,
- RSC_ROLE_UNKNOWN,
+ pcmk_role_unknown,
current);
if (first_instance == NULL) { // No instance can be interleaved
@@ -1366,10 +1441,10 @@ update_interleaved_actions(pe_action_t *first, pe_action_t *then,
* \return true if \p first and \p then can be interleaved, otherwise false
*/
static bool
-can_interleave_actions(const pe_action_t *first, const pe_action_t *then)
+can_interleave_actions(const pcmk_action_t *first, const pcmk_action_t *then)
{
bool interleave = false;
- pe_resource_t *rsc = NULL;
+ pcmk_resource_t *rsc = NULL;
if ((first->rsc == NULL) || (then->rsc == NULL)) {
crm_trace("Not interleaving %s with %s: not resource actions",
@@ -1383,7 +1458,8 @@ can_interleave_actions(const pe_action_t *first, const pe_action_t *then)
return false;
}
- if ((first->rsc->variant < pe_clone) || (then->rsc->variant < pe_clone)) {
+ if ((first->rsc->variant < pcmk_rsc_variant_clone)
+ || (then->rsc->variant < pcmk_rsc_variant_clone)) {
crm_trace("Not interleaving %s with %s: not clones or bundles",
first->uuid, then->uuid);
return false;
@@ -1418,19 +1494,19 @@ can_interleave_actions(const pe_action_t *first, const pe_action_t *then)
* \param[in] node If not NULL, limit scope of ordering to this node
* \param[in] flags Action flags for \p first for ordering purposes
* \param[in] filter Action flags to limit scope of certain updates (may
- * include pe_action_optional to affect only mandatory
- * actions, and pe_action_runnable to affect only
- * runnable actions)
- * \param[in] type Group of enum pe_ordering flags to apply
+ * include pcmk_action_optional to affect only
+ * mandatory actions, and pcmk_action_runnable to
+ * affect only runnable actions)
+ * \param[in] type Group of enum pcmk__action_relation_flags to apply
*
* \return Group of enum pcmk__updated flags indicating what was updated
*/
static uint32_t
-update_noninterleaved_actions(pe_resource_t *instance, pe_action_t *first,
- const pe_action_t *then, const pe_node_t *node,
+update_noninterleaved_actions(pcmk_resource_t *instance, pcmk_action_t *first,
+ const pcmk_action_t *then, const pcmk_node_t *node,
uint32_t flags, uint32_t filter, uint32_t type)
{
- pe_action_t *instance_action = NULL;
+ pcmk_action_t *instance_action = NULL;
uint32_t instance_flags = 0;
uint32_t changed = pcmk__updated_none;
@@ -1443,7 +1519,7 @@ update_noninterleaved_actions(pe_resource_t *instance, pe_action_t *first,
// Check whether action is runnable
instance_flags = instance->cmds->action_flags(instance_action, node);
- if (!pcmk_is_set(instance_flags, pe_action_runnable)) {
+ if (!pcmk_is_set(instance_flags, pcmk_action_runnable)) {
return changed;
}
@@ -1456,7 +1532,7 @@ update_noninterleaved_actions(pe_resource_t *instance, pe_action_t *first,
if (pcmk_is_set(changed, pcmk__updated_then)) {
for (GList *after_iter = instance_action->actions_after;
after_iter != NULL; after_iter = after_iter->next) {
- pe_action_wrapper_t *after = after_iter->data;
+ pcmk__related_action_t *after = after_iter->data;
pcmk__update_action_for_orderings(after->action, instance->cluster);
}
@@ -1474,26 +1550,28 @@ update_noninterleaved_actions(pe_resource_t *instance, pe_action_t *first,
* appropriate for the ordering. Effects may cascade to other orderings
* involving the actions as well.
*
- * \param[in,out] first 'First' action in an ordering
- * \param[in,out] then 'Then' action in an ordering
- * \param[in] node If not NULL, limit scope of ordering to this node
- * (only used when interleaving instances)
- * \param[in] flags Action flags for \p first for ordering purposes
- * \param[in] filter Action flags to limit scope of certain updates (may
- * include pe_action_optional to affect only mandatory
- * actions, and pe_action_runnable to affect only
- * runnable actions)
- * \param[in] type Group of enum pe_ordering flags to apply
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] first 'First' action in an ordering
+ * \param[in,out] then 'Then' action in an ordering
+ * \param[in] node If not NULL, limit scope of ordering to this node
+ * (only used when interleaving instances)
+ * \param[in] flags Action flags for \p first for ordering purposes
+ * \param[in] filter Action flags to limit scope of certain updates (may
+ * include pcmk_action_optional to affect only
+ * mandatory actions, and pcmk_action_runnable to
+ * affect only runnable actions)
+ * \param[in] type Group of enum pcmk__action_relation_flags to apply
+ * \param[in,out] scheduler Scheduler data
*
* \return Group of enum pcmk__updated flags indicating what was updated
*/
uint32_t
-pcmk__instance_update_ordered_actions(pe_action_t *first, pe_action_t *then,
- const pe_node_t *node, uint32_t flags,
+pcmk__instance_update_ordered_actions(pcmk_action_t *first, pcmk_action_t *then,
+ const pcmk_node_t *node, uint32_t flags,
uint32_t filter, uint32_t type,
- pe_working_set_t *data_set)
+ pcmk_scheduler_t *scheduler)
{
+ CRM_ASSERT((first != NULL) && (then != NULL) && (scheduler != NULL));
+
if (then->rsc == NULL) {
return pcmk__updated_none;
@@ -1506,11 +1584,11 @@ pcmk__instance_update_ordered_actions(pe_action_t *first, pe_action_t *then,
// Update actions for the clone or bundle resource itself
changed |= pcmk__update_ordered_actions(first, then, node, flags,
- filter, type, data_set);
+ filter, type, scheduler);
// Update the 'then' clone instances or bundle containers individually
for (GList *iter = instances; iter != NULL; iter = iter->next) {
- pe_resource_t *instance = iter->data;
+ pcmk_resource_t *instance = iter->data;
changed |= update_noninterleaved_actions(instance, first, then,
node, flags, filter, type);
@@ -1536,25 +1614,26 @@ pcmk__instance_update_ordered_actions(pe_action_t *first, pe_action_t *then,
*
* \return Flags appropriate to \p action on \p node
*/
-enum pe_action_flags
-pcmk__collective_action_flags(pe_action_t *action, const GList *instances,
- const pe_node_t *node)
+uint32_t
+pcmk__collective_action_flags(pcmk_action_t *action, const GList *instances,
+ const pcmk_node_t *node)
{
bool any_runnable = false;
- enum pe_action_flags flags;
const char *action_name = orig_action_name(action);
// Set original assumptions (optional and runnable may be cleared below)
- flags = pe_action_optional|pe_action_runnable|pe_action_pseudo;
+ uint32_t flags = pcmk_action_optional
+ |pcmk_action_runnable
+ |pcmk_action_pseudo;
for (const GList *iter = instances; iter != NULL; iter = iter->next) {
- const pe_resource_t *instance = iter->data;
- const pe_node_t *instance_node = NULL;
- pe_action_t *instance_action = NULL;
- enum pe_action_flags instance_flags;
+ const pcmk_resource_t *instance = iter->data;
+ const pcmk_node_t *instance_node = NULL;
+ pcmk_action_t *instance_action = NULL;
+ uint32_t instance_flags;
// Node is relevant only to primitive instances
- if (instance->variant == pe_native) {
+ if (instance->variant == pcmk_rsc_variant_primitive) {
instance_node = node;
}
@@ -1573,16 +1652,17 @@ pcmk__collective_action_flags(pe_action_t *action, const GList *instances,
instance_flags = instance->cmds->action_flags(instance_action, node);
// If any instance action is mandatory, so is the collective action
- if (pcmk_is_set(flags, pe_action_optional)
- && !pcmk_is_set(instance_flags, pe_action_optional)) {
+ if (pcmk_is_set(flags, pcmk_action_optional)
+ && !pcmk_is_set(instance_flags, pcmk_action_optional)) {
pe_rsc_trace(instance, "%s is mandatory because %s is",
action->uuid, instance_action->uuid);
- pe__clear_action_summary_flags(flags, action, pe_action_optional);
- pe__clear_action_flags(action, pe_action_optional);
+ pe__clear_action_summary_flags(flags, action,
+ pcmk_action_optional);
+ pe__clear_action_flags(action, pcmk_action_optional);
}
// If any instance action is runnable, so is the collective action
- if (pcmk_is_set(instance_flags, pe_action_runnable)) {
+ if (pcmk_is_set(instance_flags, pcmk_action_runnable)) {
any_runnable = true;
}
}
@@ -1591,69 +1671,11 @@ pcmk__collective_action_flags(pe_action_t *action, const GList *instances,
pe_rsc_trace(action->rsc,
"%s is not runnable because no instance can run %s",
action->uuid, action_name);
- pe__clear_action_summary_flags(flags, action, pe_action_runnable);
+ pe__clear_action_summary_flags(flags, action, pcmk_action_runnable);
if (node == NULL) {
- pe__clear_action_flags(action, pe_action_runnable);
+ pe__clear_action_flags(action, pcmk_action_runnable);
}
}
return flags;
}
-
-/*!
- * \internal
- * \brief Add a collective resource's colocations to a list for an instance
- *
- * \param[in,out] list Colocation list to add to
- * \param[in] instance Clone or bundle instance or instance group member
- * \param[in] collective Clone or bundle resource with colocations to add
- * \param[in] with_this If true, add collective's "with this" colocations,
- * otherwise add its "this with" colocations
- */
-void
-pcmk__add_collective_constraints(GList **list, const pe_resource_t *instance,
- const pe_resource_t *collective,
- bool with_this)
-{
- const GList *colocations = NULL;
- bool everywhere = false;
-
- CRM_CHECK((list != NULL) && (instance != NULL), return);
-
- if (collective == NULL) {
- return;
- }
- switch (collective->variant) {
- case pe_clone:
- case pe_container:
- break;
- default:
- return;
- }
-
- everywhere = can_run_everywhere(collective);
-
- if (with_this) {
- colocations = collective->rsc_cons_lhs;
- } else {
- colocations = collective->rsc_cons;
- }
-
- for (const GList *iter = colocations; iter != NULL; iter = iter->next) {
- const pcmk__colocation_t *colocation = iter->data;
-
- if (with_this
- && !pcmk__colocation_has_influence(colocation, instance)) {
- continue;
- }
- if (!everywhere || (colocation->score < 0)
- || (!with_this && (colocation->score == INFINITY))) {
-
- if (with_this) {
- pcmk__add_with_this(list, colocation);
- } else {
- pcmk__add_this_with(list, colocation);
- }
- }
- }
-}
diff --git a/lib/pacemaker/pcmk_sched_location.c b/lib/pacemaker/pcmk_sched_location.c
index b4ce4ff..eab9481 100644
--- a/lib/pacemaker/pcmk_sched_location.c
+++ b/lib/pacemaker/pcmk_sched_location.c
@@ -14,13 +14,14 @@
#include <crm/crm.h>
#include <crm/pengine/status.h>
+#include <crm/pengine/rules.h>
#include <pacemaker-internal.h>
#include "libpacemaker_private.h"
static int
get_node_score(const char *rule, const char *score, bool raw,
- pe_node_t *node, pe_resource_t *rsc)
+ pcmk_node_t *node, pcmk_resource_t *rsc)
{
int score_f = 0;
@@ -31,7 +32,11 @@ get_node_score(const char *rule, const char *score, bool raw,
score_f = char2score(score);
} else {
- const char *attr_score = pe_node_attribute_calculated(node, score, rsc);
+ const char *attr_score = NULL;
+
+ attr_score = pe__node_attribute_calculated(node, score, rsc,
+ pcmk__rsc_node_current,
+ false);
if (attr_score == NULL) {
crm_debug("Rule %s: %s did not have a value for %s",
@@ -48,9 +53,8 @@ get_node_score(const char *rule, const char *score, bool raw,
}
static pe__location_t *
-generate_location_rule(pe_resource_t *rsc, xmlNode *rule_xml,
+generate_location_rule(pcmk_resource_t *rsc, xmlNode *rule_xml,
const char *discovery, crm_time_t *next_change,
- pe_working_set_t *data_set,
pe_re_match_data_t *re_match_data)
{
const char *rule_id = NULL;
@@ -58,8 +62,8 @@ generate_location_rule(pe_resource_t *rsc, xmlNode *rule_xml,
const char *boolean = NULL;
const char *role = NULL;
- GList *gIter = NULL;
- GList *match_L = NULL;
+ GList *iter = NULL;
+ GList *nodes = NULL;
bool do_and = true;
bool accept = true;
@@ -68,7 +72,7 @@ generate_location_rule(pe_resource_t *rsc, xmlNode *rule_xml,
pe__location_t *location_rule = NULL;
- rule_xml = expand_idref(rule_xml, data_set->input);
+ rule_xml = expand_idref(rule_xml, rsc->cluster->input);
if (rule_xml == NULL) {
return NULL;
}
@@ -79,7 +83,7 @@ generate_location_rule(pe_resource_t *rsc, xmlNode *rule_xml,
crm_trace("Processing rule: %s", rule_id);
- if ((role != NULL) && (text2role(role) == RSC_ROLE_UNKNOWN)) {
+ if ((role != NULL) && (text2role(role) == pcmk_role_unknown)) {
pe_err("Bad role specified for %s: %s", rule_id, role);
return NULL;
}
@@ -95,8 +99,7 @@ generate_location_rule(pe_resource_t *rsc, xmlNode *rule_xml,
do_and = false;
}
- location_rule = pcmk__new_location(rule_id, rsc, 0, discovery, NULL,
- data_set);
+ location_rule = pcmk__new_location(rule_id, rsc, 0, discovery, NULL);
if (location_rule == NULL) {
return NULL;
@@ -116,36 +119,34 @@ generate_location_rule(pe_resource_t *rsc, xmlNode *rule_xml,
if (role != NULL) {
crm_trace("Setting role filter: %s", role);
location_rule->role_filter = text2role(role);
- if (location_rule->role_filter == RSC_ROLE_UNPROMOTED) {
+ if (location_rule->role_filter == pcmk_role_unpromoted) {
/* Any promotable clone cannot be promoted without being in the
* unpromoted role first. Ergo, any constraint for the unpromoted
* role applies to every role.
*/
- location_rule->role_filter = RSC_ROLE_UNKNOWN;
+ location_rule->role_filter = pcmk_role_unknown;
}
}
if (do_and) {
- GList *gIter = NULL;
-
- match_L = pcmk__copy_node_list(data_set->nodes, true);
- for (gIter = match_L; gIter != NULL; gIter = gIter->next) {
- pe_node_t *node = (pe_node_t *) gIter->data;
+ nodes = pcmk__copy_node_list(rsc->cluster->nodes, true);
+ for (iter = nodes; iter != NULL; iter = iter->next) {
+ pcmk_node_t *node = iter->data;
node->weight = get_node_score(rule_id, score, raw_score, node, rsc);
}
}
- for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
+ for (iter = rsc->cluster->nodes; iter != NULL; iter = iter->next) {
int score_f = 0;
- pe_node_t *node = (pe_node_t *) gIter->data;
+ pcmk_node_t *node = iter->data;
pe_match_data_t match_data = {
.re = re_match_data,
- .params = pe_rsc_params(rsc, node, data_set),
+ .params = pe_rsc_params(rsc, node, rsc->cluster),
.meta = rsc->meta,
};
- accept = pe_test_rule(rule_xml, node->details->attrs, RSC_ROLE_UNKNOWN,
- data_set->now, next_change, &match_data);
+ accept = pe_test_rule(rule_xml, node->details->attrs, pcmk_role_unknown,
+ rsc->cluster->now, next_change, &match_data);
crm_trace("Rule %s %s on %s", ID(rule_xml), accept? "passed" : "failed",
pe__node_name(node));
@@ -153,14 +154,14 @@ generate_location_rule(pe_resource_t *rsc, xmlNode *rule_xml,
score_f = get_node_score(rule_id, score, raw_score, node, rsc);
if (accept) {
- pe_node_t *local = pe_find_node_id(match_L, node->details->id);
+ pcmk_node_t *local = pe_find_node_id(nodes, node->details->id);
if ((local == NULL) && do_and) {
continue;
} else if (local == NULL) {
local = pe__copy_node(node);
- match_L = g_list_append(match_L, local);
+ nodes = g_list_append(nodes, local);
}
if (!do_and) {
@@ -171,10 +172,10 @@ generate_location_rule(pe_resource_t *rsc, xmlNode *rule_xml,
} else if (do_and && !accept) {
// Remove it
- pe_node_t *delete = pe_find_node_id(match_L, node->details->id);
+ pcmk_node_t *delete = pe_find_node_id(nodes, node->details->id);
if (delete != NULL) {
- match_L = g_list_remove(match_L, delete);
+ nodes = g_list_remove(nodes, delete);
crm_trace("%s did not match", pe__node_name(node));
}
free(delete);
@@ -185,7 +186,7 @@ generate_location_rule(pe_resource_t *rsc, xmlNode *rule_xml,
free((char *)score);
}
- location_rule->node_list_rh = match_L;
+ location_rule->node_list_rh = nodes;
if (location_rule->node_list_rh == NULL) {
crm_trace("No matching nodes for rule %s", rule_id);
return NULL;
@@ -197,15 +198,15 @@ generate_location_rule(pe_resource_t *rsc, xmlNode *rule_xml,
}
static void
-unpack_rsc_location(xmlNode *xml_obj, pe_resource_t *rsc, const char *role,
- const char *score, pe_working_set_t *data_set,
- pe_re_match_data_t *re_match_data)
+unpack_rsc_location(xmlNode *xml_obj, pcmk_resource_t *rsc, const char *role,
+ const char *score, pe_re_match_data_t *re_match_data)
{
pe__location_t *location = NULL;
const char *rsc_id = crm_element_value(xml_obj, XML_LOC_ATTR_SOURCE);
const char *id = crm_element_value(xml_obj, XML_ATTR_ID);
const char *node = crm_element_value(xml_obj, XML_CIB_TAG_NODE);
- const char *discovery = crm_element_value(xml_obj, XML_LOCATION_ATTR_DISCOVERY);
+ const char *discovery = crm_element_value(xml_obj,
+ XML_LOCATION_ATTR_DISCOVERY);
if (rsc == NULL) {
pcmk__config_warn("Ignoring constraint '%s' because resource '%s' "
@@ -219,13 +220,12 @@ unpack_rsc_location(xmlNode *xml_obj, pe_resource_t *rsc, const char *role,
if ((node != NULL) && (score != NULL)) {
int score_i = char2score(score);
- pe_node_t *match = pe_find_node(data_set->nodes, node);
+ pcmk_node_t *match = pe_find_node(rsc->cluster->nodes, node);
if (!match) {
return;
}
- location = pcmk__new_location(id, rsc, score_i, discovery, match,
- data_set);
+ location = pcmk__new_location(id, rsc, score_i, discovery, match);
} else {
bool empty = true;
@@ -240,7 +240,7 @@ unpack_rsc_location(xmlNode *xml_obj, pe_resource_t *rsc, const char *role,
empty = false;
crm_trace("Unpacking %s/%s", id, ID(rule_xml));
generate_location_rule(rsc, rule_xml, discovery, next_change,
- data_set, re_match_data);
+ re_match_data);
}
if (empty) {
@@ -254,7 +254,8 @@ unpack_rsc_location(xmlNode *xml_obj, pe_resource_t *rsc, const char *role,
if (crm_time_is_defined(next_change)) {
time_t t = (time_t) crm_time_get_seconds_since_epoch(next_change);
- pe__update_recheck_time(t, data_set);
+ pe__update_recheck_time(t, rsc->cluster,
+ "location rule evaluation");
}
crm_time_free(next_change);
return;
@@ -265,18 +266,18 @@ unpack_rsc_location(xmlNode *xml_obj, pe_resource_t *rsc, const char *role,
}
if ((location != NULL) && (role != NULL)) {
- if (text2role(role) == RSC_ROLE_UNKNOWN) {
+ if (text2role(role) == pcmk_role_unknown) {
pe_err("Invalid constraint %s: Bad role %s", id, role);
return;
} else {
enum rsc_role_e r = text2role(role);
- switch(r) {
- case RSC_ROLE_UNKNOWN:
- case RSC_ROLE_STARTED:
- case RSC_ROLE_UNPROMOTED:
+ switch (r) {
+ case pcmk_role_unknown:
+ case pcmk_role_started:
+ case pcmk_role_unpromoted:
/* Applies to all */
- location->role_filter = RSC_ROLE_UNKNOWN;
+ location->role_filter = pcmk_role_unknown;
break;
default:
location->role_filter = r;
@@ -287,23 +288,22 @@ unpack_rsc_location(xmlNode *xml_obj, pe_resource_t *rsc, const char *role,
}
static void
-unpack_simple_location(xmlNode *xml_obj, pe_working_set_t *data_set)
+unpack_simple_location(xmlNode *xml_obj, pcmk_scheduler_t *scheduler)
{
const char *id = crm_element_value(xml_obj, XML_ATTR_ID);
const char *value = crm_element_value(xml_obj, XML_LOC_ATTR_SOURCE);
if (value) {
- pe_resource_t *rsc;
+ pcmk_resource_t *rsc;
- rsc = pcmk__find_constraint_resource(data_set->resources, value);
- unpack_rsc_location(xml_obj, rsc, NULL, NULL, data_set, NULL);
+ rsc = pcmk__find_constraint_resource(scheduler->resources, value);
+ unpack_rsc_location(xml_obj, rsc, NULL, NULL, NULL);
}
value = crm_element_value(xml_obj, XML_LOC_ATTR_SOURCE_PATTERN);
if (value) {
regex_t *r_patt = calloc(1, sizeof(regex_t));
bool invert = false;
- GList *rIter = NULL;
if (value[0] == '!') {
value++;
@@ -318,13 +318,15 @@ unpack_simple_location(xmlNode *xml_obj, pe_working_set_t *data_set)
return;
}
- for (rIter = data_set->resources; rIter; rIter = rIter->next) {
- pe_resource_t *r = rIter->data;
+ for (GList *iter = scheduler->resources; iter != NULL;
+ iter = iter->next) {
+
+ pcmk_resource_t *r = iter->data;
int nregs = 0;
regmatch_t *pmatch = NULL;
int status;
- if(r_patt->re_nsub > 0) {
+ if (r_patt->re_nsub > 0) {
nregs = r_patt->re_nsub + 1;
} else {
nregs = 1;
@@ -341,13 +343,12 @@ unpack_simple_location(xmlNode *xml_obj, pe_working_set_t *data_set)
};
crm_debug("'%s' matched '%s' for %s", r->id, value, id);
- unpack_rsc_location(xml_obj, r, NULL, NULL, data_set,
- &re_match_data);
+ unpack_rsc_location(xml_obj, r, NULL, NULL, &re_match_data);
} else if (invert && (status != 0)) {
crm_debug("'%s' is an inverted match of '%s' for %s",
r->id, value, id);
- unpack_rsc_location(xml_obj, r, NULL, NULL, data_set, NULL);
+ unpack_rsc_location(xml_obj, r, NULL, NULL, NULL);
} else {
crm_trace("'%s' does not match '%s' for %s", r->id, value, id);
@@ -364,13 +365,13 @@ unpack_simple_location(xmlNode *xml_obj, pe_working_set_t *data_set)
// \return Standard Pacemaker return code
static int
unpack_location_tags(xmlNode *xml_obj, xmlNode **expanded_xml,
- pe_working_set_t *data_set)
+ pcmk_scheduler_t *scheduler)
{
const char *id = NULL;
const char *rsc_id = NULL;
const char *state = NULL;
- pe_resource_t *rsc = NULL;
- pe_tag_t *tag = NULL;
+ pcmk_resource_t *rsc = NULL;
+ pcmk_tag_t *tag = NULL;
xmlNode *rsc_set = NULL;
*expanded_xml = NULL;
@@ -380,12 +381,12 @@ unpack_location_tags(xmlNode *xml_obj, xmlNode **expanded_xml,
id = ID(xml_obj);
if (id == NULL) {
pcmk__config_err("Ignoring <%s> constraint without " XML_ATTR_ID,
- crm_element_name(xml_obj));
+ xml_obj->name);
return pcmk_rc_unpack_error;
}
// Check whether there are any resource sets with template or tag references
- *expanded_xml = pcmk__expand_tags_in_sets(xml_obj, data_set);
+ *expanded_xml = pcmk__expand_tags_in_sets(xml_obj, scheduler);
if (*expanded_xml != NULL) {
crm_log_xml_trace(*expanded_xml, "Expanded rsc_location");
return pcmk_rc_ok;
@@ -396,7 +397,7 @@ unpack_location_tags(xmlNode *xml_obj, xmlNode **expanded_xml,
return pcmk_rc_ok;
}
- if (!pcmk__valid_resource_or_tag(data_set, rsc_id, &rsc, &tag)) {
+ if (!pcmk__valid_resource_or_tag(scheduler, rsc_id, &rsc, &tag)) {
pcmk__config_err("Ignoring constraint '%s' because '%s' is not a "
"valid resource or tag", id, rsc_id);
return pcmk_rc_unpack_error;
@@ -410,9 +411,9 @@ unpack_location_tags(xmlNode *xml_obj, xmlNode **expanded_xml,
*expanded_xml = copy_xml(xml_obj);
- // Convert template/tag reference in "rsc" into resource_set under constraint
+ // Convert any template or tag reference into constraint resource_set
if (!pcmk__tag_to_set(*expanded_xml, &rsc_set, XML_LOC_ATTR_SOURCE,
- false, data_set)) {
+ false, scheduler)) {
free_xml(*expanded_xml);
*expanded_xml = NULL;
return pcmk_rc_unpack_error;
@@ -437,10 +438,11 @@ unpack_location_tags(xmlNode *xml_obj, xmlNode **expanded_xml,
// \return Standard Pacemaker return code
static int
-unpack_location_set(xmlNode *location, xmlNode *set, pe_working_set_t *data_set)
+unpack_location_set(xmlNode *location, xmlNode *set,
+ pcmk_scheduler_t *scheduler)
{
xmlNode *xml_rsc = NULL;
- pe_resource_t *resource = NULL;
+ pcmk_resource_t *resource = NULL;
const char *set_id;
const char *role;
const char *local_score;
@@ -461,7 +463,7 @@ unpack_location_set(xmlNode *location, xmlNode *set, pe_working_set_t *data_set)
for (xml_rsc = first_named_child(set, XML_TAG_RESOURCE_REF);
xml_rsc != NULL; xml_rsc = crm_next_same_xml(xml_rsc)) {
- resource = pcmk__find_constraint_resource(data_set->resources,
+ resource = pcmk__find_constraint_resource(scheduler->resources,
ID(xml_rsc));
if (resource == NULL) {
pcmk__config_err("%s: No resource found for %s",
@@ -469,15 +471,14 @@ unpack_location_set(xmlNode *location, xmlNode *set, pe_working_set_t *data_set)
return pcmk_rc_unpack_error;
}
- unpack_rsc_location(location, resource, role, local_score, data_set,
- NULL);
+ unpack_rsc_location(location, resource, role, local_score, NULL);
}
return pcmk_rc_ok;
}
void
-pcmk__unpack_location(xmlNode *xml_obj, pe_working_set_t *data_set)
+pcmk__unpack_location(xmlNode *xml_obj, pcmk_scheduler_t *scheduler)
{
xmlNode *set = NULL;
bool any_sets = false;
@@ -485,7 +486,7 @@ pcmk__unpack_location(xmlNode *xml_obj, pe_working_set_t *data_set)
xmlNode *orig_xml = NULL;
xmlNode *expanded_xml = NULL;
- if (unpack_location_tags(xml_obj, &expanded_xml, data_set) != pcmk_rc_ok) {
+ if (unpack_location_tags(xml_obj, &expanded_xml, scheduler) != pcmk_rc_ok) {
return;
}
@@ -498,9 +499,9 @@ pcmk__unpack_location(xmlNode *xml_obj, pe_working_set_t *data_set)
set = crm_next_same_xml(set)) {
any_sets = true;
- set = expand_idref(set, data_set->input);
+ set = expand_idref(set, scheduler->input);
if ((set == NULL) // Configuration error, message already logged
- || (unpack_location_set(xml_obj, set, data_set) != pcmk_rc_ok)) {
+ || (unpack_location_set(xml_obj, set, scheduler) != pcmk_rc_ok)) {
if (expanded_xml) {
free_xml(expanded_xml);
@@ -515,29 +516,27 @@ pcmk__unpack_location(xmlNode *xml_obj, pe_working_set_t *data_set)
}
if (!any_sets) {
- unpack_simple_location(xml_obj, data_set);
+ unpack_simple_location(xml_obj, scheduler);
}
}
/*!
* \internal
- * \brief Add a new location constraint to a cluster working set
+ * \brief Add a new location constraint to scheduler data
*
* \param[in] id XML ID of location constraint
* \param[in,out] rsc Resource in location constraint
- * \param[in] node_weight Constraint score
+ * \param[in] node_score Constraint score
* \param[in] discover_mode Resource discovery option for constraint
* \param[in] node Node in constraint (or NULL if rule-based)
- * \param[in,out] data_set Cluster working set to add constraint to
*
* \return Newly allocated location constraint
- * \note The result will be added to \p data_set and should not be freed
- * separately.
+ * \note The result will be added to the cluster (via \p rsc) and should not be
+ * freed separately.
*/
pe__location_t *
-pcmk__new_location(const char *id, pe_resource_t *rsc,
- int node_weight, const char *discover_mode,
- pe_node_t *node, pe_working_set_t *data_set)
+pcmk__new_location(const char *id, pcmk_resource_t *rsc,
+ int node_score, const char *discover_mode, pcmk_node_t *node)
{
pe__location_t *new_con = NULL;
@@ -550,7 +549,7 @@ pcmk__new_location(const char *id, pe_resource_t *rsc,
return NULL;
} else if (node == NULL) {
- CRM_CHECK(node_weight == 0, return NULL);
+ CRM_CHECK(node_score == 0, return NULL);
}
new_con = calloc(1, sizeof(pe__location_t));
@@ -558,17 +557,17 @@ pcmk__new_location(const char *id, pe_resource_t *rsc,
new_con->id = strdup(id);
new_con->rsc_lh = rsc;
new_con->node_list_rh = NULL;
- new_con->role_filter = RSC_ROLE_UNKNOWN;
+ new_con->role_filter = pcmk_role_unknown;
if (pcmk__str_eq(discover_mode, "always",
pcmk__str_null_matches|pcmk__str_casei)) {
- new_con->discover_mode = pe_discover_always;
+ new_con->discover_mode = pcmk_probe_always;
} else if (pcmk__str_eq(discover_mode, "never", pcmk__str_casei)) {
- new_con->discover_mode = pe_discover_never;
+ new_con->discover_mode = pcmk_probe_never;
} else if (pcmk__str_eq(discover_mode, "exclusive", pcmk__str_casei)) {
- new_con->discover_mode = pe_discover_exclusive;
+ new_con->discover_mode = pcmk_probe_exclusive;
rsc->exclusive_discover = TRUE;
} else {
@@ -577,14 +576,14 @@ pcmk__new_location(const char *id, pe_resource_t *rsc,
}
if (node != NULL) {
- pe_node_t *copy = pe__copy_node(node);
+ pcmk_node_t *copy = pe__copy_node(node);
- copy->weight = node_weight;
+ copy->weight = node_score;
new_con->node_list_rh = g_list_prepend(NULL, copy);
}
- data_set->placement_constraints = g_list_prepend(data_set->placement_constraints,
- new_con);
+ rsc->cluster->placement_constraints = g_list_prepend(
+ rsc->cluster->placement_constraints, new_con);
rsc->rsc_location = g_list_prepend(rsc->rsc_location, new_con);
}
@@ -595,12 +594,12 @@ pcmk__new_location(const char *id, pe_resource_t *rsc,
* \internal
* \brief Apply all location constraints
*
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*/
void
-pcmk__apply_locations(pe_working_set_t *data_set)
+pcmk__apply_locations(pcmk_scheduler_t *scheduler)
{
- for (GList *iter = data_set->placement_constraints;
+ for (GList *iter = scheduler->placement_constraints;
iter != NULL; iter = iter->next) {
pe__location_t *location = iter->data;
@@ -619,14 +618,14 @@ pcmk__apply_locations(pe_working_set_t *data_set)
* apply_location() method should be used instead in most cases.
*/
void
-pcmk__apply_location(pe_resource_t *rsc, pe__location_t *location)
+pcmk__apply_location(pcmk_resource_t *rsc, pe__location_t *location)
{
bool need_role = false;
- CRM_CHECK((rsc != NULL) && (location != NULL), return);
+ CRM_ASSERT((rsc != NULL) && (location != NULL));
// If a role was specified, ensure constraint is applicable
- need_role = (location->role_filter > RSC_ROLE_UNKNOWN);
+ need_role = (location->role_filter > pcmk_role_unknown);
if (need_role && (location->role_filter != rsc->next_role)) {
pe_rsc_trace(rsc,
"Not applying %s to %s because role will be %s not %s",
@@ -645,34 +644,33 @@ pcmk__apply_location(pe_resource_t *rsc, pe__location_t *location)
(need_role? " for role " : ""),
(need_role? role2text(location->role_filter) : ""), rsc->id);
- for (GList *gIter = location->node_list_rh; gIter != NULL;
- gIter = gIter->next) {
+ for (GList *iter = location->node_list_rh;
+ iter != NULL; iter = iter->next) {
- pe_node_t *node = (pe_node_t *) gIter->data;
- pe_node_t *weighted_node = NULL;
+ pcmk_node_t *node = iter->data;
+ pcmk_node_t *allowed_node = g_hash_table_lookup(rsc->allowed_nodes,
+ node->details->id);
- weighted_node = (pe_node_t *) pe_hash_table_lookup(rsc->allowed_nodes,
- node->details->id);
- if (weighted_node == NULL) {
+ if (allowed_node == NULL) {
pe_rsc_trace(rsc, "* = %d on %s",
node->weight, pe__node_name(node));
- weighted_node = pe__copy_node(node);
+ allowed_node = pe__copy_node(node);
g_hash_table_insert(rsc->allowed_nodes,
- (gpointer) weighted_node->details->id,
- weighted_node);
+ (gpointer) allowed_node->details->id,
+ allowed_node);
} else {
pe_rsc_trace(rsc, "* + %d on %s",
node->weight, pe__node_name(node));
- weighted_node->weight = pcmk__add_scores(weighted_node->weight,
- node->weight);
+ allowed_node->weight = pcmk__add_scores(allowed_node->weight,
+ node->weight);
}
- if (weighted_node->rsc_discover_mode < location->discover_mode) {
- if (location->discover_mode == pe_discover_exclusive) {
+ if (allowed_node->rsc_discover_mode < location->discover_mode) {
+ if (location->discover_mode == pcmk_probe_exclusive) {
rsc->exclusive_discover = TRUE;
}
/* exclusive > never > always... always is default */
- weighted_node->rsc_discover_mode = location->discover_mode;
+ allowed_node->rsc_discover_mode = location->discover_mode;
}
}
}
diff --git a/lib/pacemaker/pcmk_sched_migration.c b/lib/pacemaker/pcmk_sched_migration.c
index 7e6ba8e..5231bf7 100644
--- a/lib/pacemaker/pcmk_sched_migration.c
+++ b/lib/pacemaker/pcmk_sched_migration.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2004-2022 the Pacemaker project contributors
+ * Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -25,8 +25,8 @@
* \param[in] target Node to add as migration target
*/
static void
-add_migration_meta(pe_action_t *action, const pe_node_t *source,
- const pe_node_t *target)
+add_migration_meta(pcmk_action_t *action, const pcmk_node_t *source,
+ const pcmk_node_t *target)
{
add_hash_param(action->meta, XML_LRM_ATTR_MIGRATE_SOURCE,
source->details->uname);
@@ -43,12 +43,12 @@ add_migration_meta(pe_action_t *action, const pe_node_t *source,
* \param[in] current Node that resource is originally active on
*/
void
-pcmk__create_migration_actions(pe_resource_t *rsc, const pe_node_t *current)
+pcmk__create_migration_actions(pcmk_resource_t *rsc, const pcmk_node_t *current)
{
- pe_action_t *migrate_to = NULL;
- pe_action_t *migrate_from = NULL;
- pe_action_t *start = NULL;
- pe_action_t *stop = NULL;
+ pcmk_action_t *migrate_to = NULL;
+ pcmk_action_t *migrate_from = NULL;
+ pcmk_action_t *start = NULL;
+ pcmk_action_t *stop = NULL;
pe_rsc_trace(rsc, "Creating actions to %smigrate %s from %s to %s",
((rsc->partial_migration_target == NULL)? "" : "partially "),
@@ -58,61 +58,68 @@ pcmk__create_migration_actions(pe_resource_t *rsc, const pe_node_t *current)
stop = stop_action(rsc, current, TRUE);
if (rsc->partial_migration_target == NULL) {
- migrate_to = custom_action(rsc, pcmk__op_key(rsc->id, RSC_MIGRATE, 0),
- RSC_MIGRATE, current, TRUE, TRUE,
+ migrate_to = custom_action(rsc, pcmk__op_key(rsc->id,
+ PCMK_ACTION_MIGRATE_TO, 0),
+ PCMK_ACTION_MIGRATE_TO, current, TRUE,
rsc->cluster);
}
- migrate_from = custom_action(rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0),
- RSC_MIGRATED, rsc->allocated_to, TRUE, TRUE,
- rsc->cluster);
+ migrate_from = custom_action(rsc, pcmk__op_key(rsc->id,
+ PCMK_ACTION_MIGRATE_FROM, 0),
+ PCMK_ACTION_MIGRATE_FROM, rsc->allocated_to,
+ TRUE, rsc->cluster);
- if ((migrate_from != NULL)
- && ((migrate_to != NULL) || (rsc->partial_migration_target != NULL))) {
+ pe__set_action_flags(start, pcmk_action_migratable);
+ pe__set_action_flags(stop, pcmk_action_migratable);
- pe__set_action_flags(start, pe_action_migrate_runnable);
- pe__set_action_flags(stop, pe_action_migrate_runnable);
+ // This is easier than trying to delete it from the graph
+ pe__set_action_flags(start, pcmk_action_pseudo);
- // This is easier than trying to delete it from the graph
- pe__set_action_flags(start, pe_action_pseudo);
-
- if (rsc->partial_migration_target == NULL) {
- pe__set_action_flags(migrate_from, pe_action_migrate_runnable);
-
- if (migrate_to != NULL) {
- pe__set_action_flags(migrate_to, pe_action_migrate_runnable);
- migrate_to->needs = start->needs;
- }
-
- // Probe -> migrate_to -> migrate_from
- pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_STATUS, 0), NULL,
- rsc, pcmk__op_key(rsc->id, RSC_MIGRATE, 0),
- NULL, pe_order_optional, rsc->cluster);
- pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_MIGRATE, 0), NULL,
- rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0),
- NULL,
- pe_order_optional|pe_order_implies_first_migratable,
- rsc->cluster);
- } else {
- pe__set_action_flags(migrate_from, pe_action_migrate_runnable);
- migrate_from->needs = start->needs;
-
- // Probe -> migrate_from (migrate_to already completed)
- pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_STATUS, 0), NULL,
- rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0),
- NULL, pe_order_optional, rsc->cluster);
- }
-
- // migrate_from before stop or start
- pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0), NULL,
- rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
- pe_order_optional|pe_order_implies_first_migratable,
- rsc->cluster);
- pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0), NULL,
- rsc, pcmk__op_key(rsc->id, RSC_START, 0), NULL,
- pe_order_optional|pe_order_implies_first_migratable|pe_order_pseudo_left,
+ if (rsc->partial_migration_target == NULL) {
+ pe__set_action_flags(migrate_from, pcmk_action_migratable);
+ pe__set_action_flags(migrate_to, pcmk_action_migratable);
+ migrate_to->needs = start->needs;
+
+ // Probe -> migrate_to -> migrate_from
+ pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, PCMK_ACTION_MONITOR, 0),
+ NULL,
+ rsc,
+ pcmk__op_key(rsc->id, PCMK_ACTION_MIGRATE_TO, 0),
+ NULL, pcmk__ar_ordered, rsc->cluster);
+ pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, PCMK_ACTION_MIGRATE_TO, 0),
+ NULL,
+ rsc,
+ pcmk__op_key(rsc->id, PCMK_ACTION_MIGRATE_FROM, 0),
+ NULL,
+ pcmk__ar_ordered|pcmk__ar_unmigratable_then_blocks,
rsc->cluster);
+ } else {
+ pe__set_action_flags(migrate_from, pcmk_action_migratable);
+ migrate_from->needs = start->needs;
+
+ // Probe -> migrate_from (migrate_to already completed)
+ pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, PCMK_ACTION_MONITOR, 0),
+ NULL,
+ rsc,
+ pcmk__op_key(rsc->id, PCMK_ACTION_MIGRATE_FROM, 0),
+ NULL, pcmk__ar_ordered, rsc->cluster);
}
+ // migrate_from before stop or start
+ pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, PCMK_ACTION_MIGRATE_FROM, 0),
+ NULL,
+ rsc, pcmk__op_key(rsc->id, PCMK_ACTION_STOP, 0),
+ NULL,
+ pcmk__ar_ordered|pcmk__ar_unmigratable_then_blocks,
+ rsc->cluster);
+ pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, PCMK_ACTION_MIGRATE_FROM, 0),
+ NULL,
+ rsc, pcmk__op_key(rsc->id, PCMK_ACTION_START, 0),
+ NULL,
+ pcmk__ar_ordered
+ |pcmk__ar_unmigratable_then_blocks
+ |pcmk__ar_first_else_then,
+ rsc->cluster);
+
if (migrate_to != NULL) {
add_migration_meta(migrate_to, current, rsc->allocated_to);
@@ -132,9 +139,7 @@ pcmk__create_migration_actions(pe_resource_t *rsc, const pe_node_t *current)
}
}
- if (migrate_from != NULL) {
- add_migration_meta(migrate_from, current, rsc->allocated_to);
- }
+ add_migration_meta(migrate_from, current, rsc->allocated_to);
}
/*!
@@ -147,18 +152,19 @@ pcmk__create_migration_actions(pe_resource_t *rsc, const pe_node_t *current)
void
pcmk__abort_dangling_migration(void *data, void *user_data)
{
- const pe_node_t *dangling_source = (const pe_node_t *) data;
- pe_resource_t *rsc = (pe_resource_t *) user_data;
+ const pcmk_node_t *dangling_source = (const pcmk_node_t *) data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) user_data;
- pe_action_t *stop = NULL;
- bool cleanup = pcmk_is_set(rsc->cluster->flags, pe_flag_remove_after_stop);
+ pcmk_action_t *stop = NULL;
+ bool cleanup = pcmk_is_set(rsc->cluster->flags,
+ pcmk_sched_remove_after_stop);
pe_rsc_trace(rsc,
"Scheduling stop%s for %s on %s due to dangling migration",
(cleanup? " and cleanup" : ""), rsc->id,
pe__node_name(dangling_source));
stop = stop_action(rsc, dangling_source, FALSE);
- pe__set_action_flags(stop, pe_action_dangle);
+ pe__set_action_flags(stop, pcmk_action_migration_abort);
if (cleanup) {
pcmk__schedule_cleanup(rsc, dangling_source, false);
}
@@ -174,30 +180,30 @@ pcmk__abort_dangling_migration(void *data, void *user_data)
* \return true if \p rsc can migrate, otherwise false
*/
bool
-pcmk__rsc_can_migrate(const pe_resource_t *rsc, const pe_node_t *current)
+pcmk__rsc_can_migrate(const pcmk_resource_t *rsc, const pcmk_node_t *current)
{
CRM_CHECK(rsc != NULL, return false);
- if (!pcmk_is_set(rsc->flags, pe_rsc_allow_migrate)) {
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_migratable)) {
pe_rsc_trace(rsc, "%s cannot migrate because "
"the configuration does not allow it",
rsc->id);
return false;
}
- if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
pe_rsc_trace(rsc, "%s cannot migrate because it is not managed",
rsc->id);
return false;
}
- if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_failed)) {
pe_rsc_trace(rsc, "%s cannot migrate because it is failed",
rsc->id);
return false;
}
- if (pcmk_is_set(rsc->flags, pe_rsc_start_pending)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_start_pending)) {
pe_rsc_trace(rsc, "%s cannot migrate because it has a start pending",
rsc->id);
return false;
@@ -230,7 +236,7 @@ pcmk__rsc_can_migrate(const pe_resource_t *rsc, const pe_node_t *current)
* \return Newly allocated copy of action name (or NULL if none available)
*/
static char *
-task_from_action_or_key(const pe_action_t *action, const char *key)
+task_from_action_or_key(const pcmk_action_t *action, const char *key)
{
char *res = NULL;
@@ -270,8 +276,8 @@ pcmk__order_migration_equivalents(pe__ordering_t *order)
}
// Only orderings involving at least one migratable resource are relevant
- first_migratable = pcmk_is_set(order->lh_rsc->flags, pe_rsc_allow_migrate);
- then_migratable = pcmk_is_set(order->rh_rsc->flags, pe_rsc_allow_migrate);
+ first_migratable = pcmk_is_set(order->lh_rsc->flags, pcmk_rsc_migratable);
+ then_migratable = pcmk_is_set(order->rh_rsc->flags, pcmk_rsc_migratable);
if (!first_migratable && !then_migratable) {
return;
}
@@ -282,24 +288,26 @@ pcmk__order_migration_equivalents(pe__ordering_t *order)
then_task = task_from_action_or_key(order->rh_action,
order->rh_action_task);
- if (pcmk__str_eq(first_task, RSC_START, pcmk__str_none)
- && pcmk__str_eq(then_task, RSC_START, pcmk__str_none)) {
+ if (pcmk__str_eq(first_task, PCMK_ACTION_START, pcmk__str_none)
+ && pcmk__str_eq(then_task, PCMK_ACTION_START, pcmk__str_none)) {
- uint32_t flags = pe_order_optional;
+ uint32_t flags = pcmk__ar_ordered;
if (first_migratable && then_migratable) {
/* A start then B start
* -> A migrate_from then B migrate_to */
pcmk__new_ordering(order->lh_rsc,
- pcmk__op_key(order->lh_rsc->id, RSC_MIGRATED, 0),
+ pcmk__op_key(order->lh_rsc->id,
+ PCMK_ACTION_MIGRATE_FROM, 0),
NULL, order->rh_rsc,
- pcmk__op_key(order->rh_rsc->id, RSC_MIGRATE, 0),
+ pcmk__op_key(order->rh_rsc->id,
+ PCMK_ACTION_MIGRATE_TO, 0),
NULL, flags, order->lh_rsc->cluster);
}
if (then_migratable) {
if (first_migratable) {
- pe__set_order_flags(flags, pe_order_apply_first_non_migratable);
+ pe__set_order_flags(flags, pcmk__ar_if_first_unmigratable);
}
/* A start then B start
@@ -307,75 +315,87 @@ pcmk__order_migration_equivalents(pe__ordering_t *order)
* migration)
*/
pcmk__new_ordering(order->lh_rsc,
- pcmk__op_key(order->lh_rsc->id, RSC_START, 0),
+ pcmk__op_key(order->lh_rsc->id,
+ PCMK_ACTION_START, 0),
NULL, order->rh_rsc,
- pcmk__op_key(order->rh_rsc->id, RSC_MIGRATE, 0),
+ pcmk__op_key(order->rh_rsc->id,
+ PCMK_ACTION_MIGRATE_TO, 0),
NULL, flags, order->lh_rsc->cluster);
}
} else if (then_migratable
- && pcmk__str_eq(first_task, RSC_STOP, pcmk__str_none)
- && pcmk__str_eq(then_task, RSC_STOP, pcmk__str_none)) {
+ && pcmk__str_eq(first_task, PCMK_ACTION_STOP, pcmk__str_none)
+ && pcmk__str_eq(then_task, PCMK_ACTION_STOP, pcmk__str_none)) {
- uint32_t flags = pe_order_optional;
+ uint32_t flags = pcmk__ar_ordered;
if (first_migratable) {
- pe__set_order_flags(flags, pe_order_apply_first_non_migratable);
+ pe__set_order_flags(flags, pcmk__ar_if_first_unmigratable);
}
/* For an ordering "stop A then stop B", if A is moving via restart, and
* B is migrating, enforce that B's migrate_to occurs after A's stop.
*/
pcmk__new_ordering(order->lh_rsc,
- pcmk__op_key(order->lh_rsc->id, RSC_STOP, 0), NULL,
+ pcmk__op_key(order->lh_rsc->id, PCMK_ACTION_STOP, 0),
+ NULL,
order->rh_rsc,
- pcmk__op_key(order->rh_rsc->id, RSC_MIGRATE, 0),
+ pcmk__op_key(order->rh_rsc->id,
+ PCMK_ACTION_MIGRATE_TO, 0),
NULL, flags, order->lh_rsc->cluster);
// Also order B's migrate_from after A's stop during partial migrations
if (order->rh_rsc->partial_migration_target) {
pcmk__new_ordering(order->lh_rsc,
- pcmk__op_key(order->lh_rsc->id, RSC_STOP, 0),
+ pcmk__op_key(order->lh_rsc->id, PCMK_ACTION_STOP,
+ 0),
NULL, order->rh_rsc,
- pcmk__op_key(order->rh_rsc->id, RSC_MIGRATED, 0),
+ pcmk__op_key(order->rh_rsc->id,
+ PCMK_ACTION_MIGRATE_FROM, 0),
NULL, flags, order->lh_rsc->cluster);
}
- } else if (pcmk__str_eq(first_task, RSC_PROMOTE, pcmk__str_none)
- && pcmk__str_eq(then_task, RSC_START, pcmk__str_none)) {
+ } else if (pcmk__str_eq(first_task, PCMK_ACTION_PROMOTE, pcmk__str_none)
+ && pcmk__str_eq(then_task, PCMK_ACTION_START, pcmk__str_none)) {
- uint32_t flags = pe_order_optional;
+ uint32_t flags = pcmk__ar_ordered;
if (then_migratable) {
/* A promote then B start
* -> A promote then B migrate_to */
pcmk__new_ordering(order->lh_rsc,
- pcmk__op_key(order->lh_rsc->id, RSC_PROMOTE, 0),
+ pcmk__op_key(order->lh_rsc->id,
+ PCMK_ACTION_PROMOTE, 0),
NULL, order->rh_rsc,
- pcmk__op_key(order->rh_rsc->id, RSC_MIGRATE, 0),
+ pcmk__op_key(order->rh_rsc->id,
+ PCMK_ACTION_MIGRATE_TO, 0),
NULL, flags, order->lh_rsc->cluster);
}
- } else if (pcmk__str_eq(first_task, RSC_DEMOTE, pcmk__str_none)
- && pcmk__str_eq(then_task, RSC_STOP, pcmk__str_none)) {
+ } else if (pcmk__str_eq(first_task, PCMK_ACTION_DEMOTE, pcmk__str_none)
+ && pcmk__str_eq(then_task, PCMK_ACTION_STOP, pcmk__str_none)) {
- uint32_t flags = pe_order_optional;
+ uint32_t flags = pcmk__ar_ordered;
if (then_migratable) {
/* A demote then B stop
* -> A demote then B migrate_to */
pcmk__new_ordering(order->lh_rsc,
- pcmk__op_key(order->lh_rsc->id, RSC_DEMOTE, 0),
+ pcmk__op_key(order->lh_rsc->id,
+ PCMK_ACTION_DEMOTE, 0),
NULL, order->rh_rsc,
- pcmk__op_key(order->rh_rsc->id, RSC_MIGRATE, 0),
+ pcmk__op_key(order->rh_rsc->id,
+ PCMK_ACTION_MIGRATE_TO, 0),
NULL, flags, order->lh_rsc->cluster);
- // Also order B migrate_from after A demote during partial migrations
+ // Order B migrate_from after A demote during partial migrations
if (order->rh_rsc->partial_migration_target) {
pcmk__new_ordering(order->lh_rsc,
- pcmk__op_key(order->lh_rsc->id, RSC_DEMOTE, 0),
+ pcmk__op_key(order->lh_rsc->id,
+ PCMK_ACTION_DEMOTE, 0),
NULL, order->rh_rsc,
- pcmk__op_key(order->rh_rsc->id, RSC_MIGRATED, 0),
+ pcmk__op_key(order->rh_rsc->id,
+ PCMK_ACTION_MIGRATE_FROM, 0),
NULL, flags, order->lh_rsc->cluster);
}
}
diff --git a/lib/pacemaker/pcmk_sched_nodes.c b/lib/pacemaker/pcmk_sched_nodes.c
index d7d5ba4..9cf5545 100644
--- a/lib/pacemaker/pcmk_sched_nodes.c
+++ b/lib/pacemaker/pcmk_sched_nodes.c
@@ -9,7 +9,6 @@
#include <crm_internal.h>
#include <crm/msg_xml.h>
-#include <crm/lrmd.h> // lrmd_event_data_t
#include <crm/common/xml_internal.h>
#include <pacemaker-internal.h>
#include <pacemaker.h>
@@ -28,7 +27,7 @@
* or maintenance mode, otherwise false
*/
bool
-pcmk__node_available(const pe_node_t *node, bool consider_score,
+pcmk__node_available(const pcmk_node_t *node, bool consider_score,
bool consider_guest)
{
if ((node == NULL) || (node->details == NULL) || !node->details->online
@@ -43,7 +42,7 @@ pcmk__node_available(const pe_node_t *node, bool consider_score,
// @TODO Go through all callers to see which should set consider_guest
if (consider_guest && pe__is_guest_node(node)) {
- pe_resource_t *guest = node->details->remote_rsc->container;
+ pcmk_resource_t *guest = node->details->remote_rsc->container;
if (guest->fns->location(guest, NULL, FALSE) == NULL) {
return false;
@@ -66,7 +65,7 @@ pcmk__copy_node_table(GHashTable *nodes)
{
GHashTable *new_table = NULL;
GHashTableIter iter;
- pe_node_t *node = NULL;
+ pcmk_node_t *node = NULL;
if (nodes == NULL) {
return NULL;
@@ -74,7 +73,7 @@ pcmk__copy_node_table(GHashTable *nodes)
new_table = pcmk__strkey_table(NULL, free);
g_hash_table_iter_init(&iter, nodes);
while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) {
- pe_node_t *new_node = pe__copy_node(node);
+ pcmk_node_t *new_node = pe__copy_node(node);
g_hash_table_insert(new_table, (gpointer) new_node->details->id,
new_node);
@@ -84,6 +83,82 @@ pcmk__copy_node_table(GHashTable *nodes)
/*!
* \internal
+ * \brief Free a table of node tables
+ *
+ * \param[in,out] data Table to free
+ *
+ * \note This is a \c GDestroyNotify wrapper for \c g_hash_table_destroy().
+ */
+static void
+destroy_node_tables(gpointer data)
+{
+ g_hash_table_destroy((GHashTable *) data);
+}
+
+/*!
+ * \internal
+ * \brief Recursively copy the node tables of a resource
+ *
+ * Build a hash table containing copies of the allowed nodes tables of \p rsc
+ * and its entire tree of descendants. The key is the resource ID, and the value
+ * is a copy of the resource's node table.
+ *
+ * \param[in] rsc Resource whose node table to copy
+ * \param[in,out] copy Where to store the copied node tables
+ *
+ * \note \p *copy should be \c NULL for the top-level call.
+ * \note The caller is responsible for freeing \p copy using
+ * \c g_hash_table_destroy().
+ */
+void
+pcmk__copy_node_tables(const pcmk_resource_t *rsc, GHashTable **copy)
+{
+ CRM_ASSERT((rsc != NULL) && (copy != NULL));
+
+ if (*copy == NULL) {
+ *copy = pcmk__strkey_table(NULL, destroy_node_tables);
+ }
+
+ g_hash_table_insert(*copy, rsc->id,
+ pcmk__copy_node_table(rsc->allowed_nodes));
+
+ for (const GList *iter = rsc->children; iter != NULL; iter = iter->next) {
+ pcmk__copy_node_tables((const pcmk_resource_t *) iter->data, copy);
+ }
+}
+
+/*!
+ * \internal
+ * \brief Recursively restore the node tables of a resource from backup
+ *
+ * Given a hash table containing backup copies of the allowed nodes tables of
+ * \p rsc and its entire tree of descendants, replace the resources' current
+ * node tables with the backed-up copies.
+ *
+ * \param[in,out] rsc Resource whose node tables to restore
+ * \param[in] backup Table of backup node tables (created by
+ * \c pcmk__copy_node_tables())
+ *
+ * \note This function frees the resources' current node tables.
+ */
+void
+pcmk__restore_node_tables(pcmk_resource_t *rsc, GHashTable *backup)
+{
+ CRM_ASSERT((rsc != NULL) && (backup != NULL));
+
+ g_hash_table_destroy(rsc->allowed_nodes);
+
+ // Copy to avoid danger with multiple restores
+ rsc->allowed_nodes = g_hash_table_lookup(backup, rsc->id);
+ rsc->allowed_nodes = pcmk__copy_node_table(rsc->allowed_nodes);
+
+ for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
+ pcmk__restore_node_tables((pcmk_resource_t *) iter->data, backup);
+ }
+}
+
+/*!
+ * \internal
* \brief Copy a list of node objects
*
* \param[in] list List to copy
@@ -96,9 +171,9 @@ pcmk__copy_node_list(const GList *list, bool reset)
{
GList *result = NULL;
- for (const GList *gIter = list; gIter != NULL; gIter = gIter->next) {
- pe_node_t *new_node = NULL;
- pe_node_t *this_node = (pe_node_t *) gIter->data;
+ for (const GList *iter = list; iter != NULL; iter = iter->next) {
+ pcmk_node_t *new_node = NULL;
+ pcmk_node_t *this_node = iter->data;
new_node = pe__copy_node(this_node);
if (reset) {
@@ -111,14 +186,14 @@ pcmk__copy_node_list(const GList *list, bool reset)
/*!
* \internal
- * \brief Compare two nodes for allocation desirability
+ * \brief Compare two nodes for assignment preference
*
- * Given two nodes, check which one is more preferred by allocation criteria
- * such as node weight and utilization.
+ * Given two nodes, check which one is more preferred by assignment criteria
+ * such as node score and utilization.
*
* \param[in] a First node to compare
* \param[in] b Second node to compare
- * \param[in] data Node that resource being assigned is active on, if any
+ * \param[in] data Node to prefer if all else equal
*
* \return -1 if \p a is preferred, +1 if \p b is preferred, or 0 if they are
* equally preferred
@@ -126,12 +201,12 @@ pcmk__copy_node_list(const GList *list, bool reset)
static gint
compare_nodes(gconstpointer a, gconstpointer b, gpointer data)
{
- const pe_node_t *node1 = (const pe_node_t *) a;
- const pe_node_t *node2 = (const pe_node_t *) b;
- const pe_node_t *active = (const pe_node_t *) data;
+ const pcmk_node_t *node1 = (const pcmk_node_t *) a;
+ const pcmk_node_t *node2 = (const pcmk_node_t *) b;
+ const pcmk_node_t *preferred = (const pcmk_node_t *) data;
- int node1_weight = 0;
- int node2_weight = 0;
+ int node1_score = -INFINITY;
+ int node2_score = -INFINITY;
int result = 0;
@@ -142,29 +217,29 @@ compare_nodes(gconstpointer a, gconstpointer b, gpointer data)
return -1;
}
- // Compare node weights
+ // Compare node scores
- node1_weight = pcmk__node_available(node1, false, false)? node1->weight : -INFINITY;
- node2_weight = pcmk__node_available(node2, false, false)? node2->weight : -INFINITY;
+ if (pcmk__node_available(node1, false, false)) {
+ node1_score = node1->weight;
+ }
+ if (pcmk__node_available(node2, false, false)) {
+ node2_score = node2->weight;
+ }
- if (node1_weight > node2_weight) {
- crm_trace("%s (%d) > %s (%d) : weight",
- pe__node_name(node1), node1_weight, pe__node_name(node2),
- node2_weight);
+ if (node1_score > node2_score) {
+ crm_trace("%s before %s (score %d > %d)",
+ pe__node_name(node1), pe__node_name(node2),
+ node1_score, node2_score);
return -1;
}
- if (node1_weight < node2_weight) {
- crm_trace("%s (%d) < %s (%d) : weight",
- pe__node_name(node1), node1_weight, pe__node_name(node2),
- node2_weight);
+ if (node1_score < node2_score) {
+ crm_trace("%s after %s (score %d < %d)",
+ pe__node_name(node1), pe__node_name(node2),
+ node1_score, node2_score);
return 1;
}
- crm_trace("%s (%d) == %s (%d) : weight",
- pe__node_name(node1), node1_weight, pe__node_name(node2),
- node2_weight);
-
// If appropriate, compare node utilization
if (pcmk__str_eq(node1->details->data_set->placement_strategy, "minimal",
@@ -176,56 +251,65 @@ compare_nodes(gconstpointer a, gconstpointer b, gpointer data)
pcmk__str_casei)) {
result = pcmk__compare_node_capacities(node1, node2);
if (result < 0) {
- crm_trace("%s > %s : capacity (%d)",
- pe__node_name(node1), pe__node_name(node2), result);
+ crm_trace("%s before %s (greater capacity by %d attributes)",
+ pe__node_name(node1), pe__node_name(node2), result * -1);
return -1;
} else if (result > 0) {
- crm_trace("%s < %s : capacity (%d)",
+ crm_trace("%s after %s (lower capacity by %d attributes)",
pe__node_name(node1), pe__node_name(node2), result);
return 1;
}
}
- // Compare number of allocated resources
+ // Compare number of resources already assigned to node
if (node1->details->num_resources < node2->details->num_resources) {
- crm_trace("%s (%d) > %s (%d) : resources",
- pe__node_name(node1), node1->details->num_resources,
- pe__node_name(node2), node2->details->num_resources);
+ crm_trace("%s before %s (%d resources < %d)",
+ pe__node_name(node1), pe__node_name(node2),
+ node1->details->num_resources, node2->details->num_resources);
return -1;
} else if (node1->details->num_resources > node2->details->num_resources) {
- crm_trace("%s (%d) < %s (%d) : resources",
- pe__node_name(node1), node1->details->num_resources,
- pe__node_name(node2), node2->details->num_resources);
+ crm_trace("%s after %s (%d resources > %d)",
+ pe__node_name(node1), pe__node_name(node2),
+ node1->details->num_resources, node2->details->num_resources);
return 1;
}
// Check whether one node is already running desired resource
- if (active != NULL) {
- if (active->details == node1->details) {
- crm_trace("%s (%d) > %s (%d) : active",
- pe__node_name(node1), node1->details->num_resources,
- pe__node_name(node2), node2->details->num_resources);
+ if (preferred != NULL) {
+ if (pe__same_node(preferred, node1)) {
+ crm_trace("%s before %s (preferred node)",
+ pe__node_name(node1), pe__node_name(node2));
return -1;
- } else if (active->details == node2->details) {
- crm_trace("%s (%d) < %s (%d) : active",
- pe__node_name(node1), node1->details->num_resources,
- pe__node_name(node2), node2->details->num_resources);
+ } else if (pe__same_node(preferred, node2)) {
+ crm_trace("%s after %s (not preferred node)",
+ pe__node_name(node1), pe__node_name(node2));
return 1;
}
}
// If all else is equal, prefer node with lowest-sorting name
equal:
- crm_trace("%s = %s", pe__node_name(node1), pe__node_name(node2));
- return strcmp(node1->details->uname, node2->details->uname);
+ result = strcmp(node1->details->uname, node2->details->uname);
+ if (result < 0) {
+ crm_trace("%s before %s (name)",
+ pe__node_name(node1), pe__node_name(node2));
+ return -1;
+ } else if (result > 0) {
+ crm_trace("%s after %s (name)",
+ pe__node_name(node1), pe__node_name(node2));
+ return 1;
+ }
+
+ crm_trace("%s == %s", pe__node_name(node1), pe__node_name(node2));
+ return 0;
}
/*!
* \internal
- * \brief Sort a list of nodes by allocation desirability
+ * \brief Sort a list of nodes by assigment preference
*
* \param[in,out] nodes Node list to sort
* \param[in] active_node Node where resource being assigned is active
@@ -233,7 +317,7 @@ equal:
* \return New head of sorted list
*/
GList *
-pcmk__sort_nodes(GList *nodes, pe_node_t *active_node)
+pcmk__sort_nodes(GList *nodes, pcmk_node_t *active_node)
{
return g_list_sort_with_data(nodes, compare_nodes, active_node);
}
@@ -251,7 +335,7 @@ bool
pcmk__any_node_available(GHashTable *nodes)
{
GHashTableIter iter;
- const pe_node_t *node = NULL;
+ const pcmk_node_t *node = NULL;
if (nodes == NULL) {
return false;
@@ -269,14 +353,14 @@ pcmk__any_node_available(GHashTable *nodes)
* \internal
* \brief Apply node health values for all nodes in cluster
*
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*/
void
-pcmk__apply_node_health(pe_working_set_t *data_set)
+pcmk__apply_node_health(pcmk_scheduler_t *scheduler)
{
int base_health = 0;
enum pcmk__health_strategy strategy;
- const char *strategy_str = pe_pref(data_set->config_hash,
+ const char *strategy_str = pe_pref(scheduler->config_hash,
PCMK__OPT_NODE_HEALTH_STRATEGY);
strategy = pcmk__parse_health_strategy(strategy_str);
@@ -287,11 +371,11 @@ pcmk__apply_node_health(pe_working_set_t *data_set)
// The progressive strategy can use a base health score
if (strategy == pcmk__health_strategy_progressive) {
- base_health = pe__health_score(PCMK__OPT_NODE_HEALTH_BASE, data_set);
+ base_health = pe__health_score(PCMK__OPT_NODE_HEALTH_BASE, scheduler);
}
- for (GList *iter = data_set->nodes; iter != NULL; iter = iter->next) {
- pe_node_t *node = (pe_node_t *) iter->data;
+ for (GList *iter = scheduler->nodes; iter != NULL; iter = iter->next) {
+ pcmk_node_t *node = (pcmk_node_t *) iter->data;
int health = pe__sum_node_health_scores(node, base_health);
// An overall health score of 0 has no effect
@@ -302,8 +386,8 @@ pcmk__apply_node_health(pe_working_set_t *data_set)
pe__node_name(node), health);
// Use node health as a location score for each resource on the node
- for (GList *r = data_set->resources; r != NULL; r = r->next) {
- pe_resource_t *rsc = (pe_resource_t *) r->data;
+ for (GList *r = scheduler->resources; r != NULL; r = r->next) {
+ pcmk_resource_t *rsc = (pcmk_resource_t *) r->data;
bool constrain = true;
@@ -315,8 +399,7 @@ pcmk__apply_node_health(pe_working_set_t *data_set)
PCMK__META_ALLOW_UNHEALTHY_NODES));
}
if (constrain) {
- pcmk__new_location(strategy_str, rsc, health, NULL, node,
- data_set);
+ pcmk__new_location(strategy_str, rsc, health, NULL, node);
} else {
pe_rsc_trace(rsc, "%s is immune from health ban on %s",
rsc->id, pe__node_name(node));
@@ -335,8 +418,8 @@ pcmk__apply_node_health(pe_working_set_t *data_set)
* \return Equivalent of \p node from \p rsc's parent's allowed nodes if any,
* otherwise NULL
*/
-pe_node_t *
-pcmk__top_allowed_node(const pe_resource_t *rsc, const pe_node_t *node)
+pcmk_node_t *
+pcmk__top_allowed_node(const pcmk_resource_t *rsc, const pcmk_node_t *node)
{
GHashTable *allowed_nodes = NULL;
@@ -347,5 +430,5 @@ pcmk__top_allowed_node(const pe_resource_t *rsc, const pe_node_t *node)
} else {
allowed_nodes = rsc->parent->allowed_nodes;
}
- return pe_hash_table_lookup(allowed_nodes, node->details->id);
+ return g_hash_table_lookup(allowed_nodes, node->details->id);
}
diff --git a/lib/pacemaker/pcmk_sched_ordering.c b/lib/pacemaker/pcmk_sched_ordering.c
index 6629999..e589692 100644
--- a/lib/pacemaker/pcmk_sched_ordering.c
+++ b/lib/pacemaker/pcmk_sched_ordering.c
@@ -29,40 +29,41 @@ enum ordering_symmetry {
ordering_symmetric_inverse, // the inverse relation in a symmetric ordering
};
-#define EXPAND_CONSTRAINT_IDREF(__set, __rsc, __name) do { \
- __rsc = pcmk__find_constraint_resource(data_set->resources, __name); \
- if (__rsc == NULL) { \
- pcmk__config_err("%s: No resource found for %s", __set, __name); \
- return pcmk_rc_unpack_error; \
- } \
+#define EXPAND_CONSTRAINT_IDREF(__set, __rsc, __name) do { \
+ __rsc = pcmk__find_constraint_resource(scheduler->resources, \
+ __name); \
+ if (__rsc == NULL) { \
+ pcmk__config_err("%s: No resource found for %s", __set, __name);\
+ return pcmk_rc_unpack_error; \
+ } \
} while (0)
static const char *
invert_action(const char *action)
{
- if (pcmk__str_eq(action, RSC_START, pcmk__str_casei)) {
- return RSC_STOP;
+ if (pcmk__str_eq(action, PCMK_ACTION_START, pcmk__str_none)) {
+ return PCMK_ACTION_STOP;
- } else if (pcmk__str_eq(action, RSC_STOP, pcmk__str_casei)) {
- return RSC_START;
+ } else if (pcmk__str_eq(action, PCMK_ACTION_STOP, pcmk__str_none)) {
+ return PCMK_ACTION_START;
- } else if (pcmk__str_eq(action, RSC_PROMOTE, pcmk__str_casei)) {
- return RSC_DEMOTE;
+ } else if (pcmk__str_eq(action, PCMK_ACTION_PROMOTE, pcmk__str_none)) {
+ return PCMK_ACTION_DEMOTE;
- } else if (pcmk__str_eq(action, RSC_DEMOTE, pcmk__str_casei)) {
- return RSC_PROMOTE;
+ } else if (pcmk__str_eq(action, PCMK_ACTION_DEMOTE, pcmk__str_none)) {
+ return PCMK_ACTION_PROMOTE;
- } else if (pcmk__str_eq(action, RSC_PROMOTED, pcmk__str_casei)) {
- return RSC_DEMOTED;
+ } else if (pcmk__str_eq(action, PCMK_ACTION_PROMOTED, pcmk__str_none)) {
+ return PCMK_ACTION_DEMOTED;
- } else if (pcmk__str_eq(action, RSC_DEMOTED, pcmk__str_casei)) {
- return RSC_PROMOTED;
+ } else if (pcmk__str_eq(action, PCMK_ACTION_DEMOTED, pcmk__str_none)) {
+ return PCMK_ACTION_PROMOTED;
- } else if (pcmk__str_eq(action, RSC_STARTED, pcmk__str_casei)) {
- return RSC_STOPPED;
+ } else if (pcmk__str_eq(action, PCMK_ACTION_RUNNING, pcmk__str_none)) {
+ return PCMK_ACTION_STOPPED;
- } else if (pcmk__str_eq(action, RSC_STOPPED, pcmk__str_casei)) {
- return RSC_STARTED;
+ } else if (pcmk__str_eq(action, PCMK_ACTION_STOPPED, pcmk__str_none)) {
+ return PCMK_ACTION_RUNNING;
}
crm_warn("Unknown action '%s' specified in order constraint", action);
return NULL;
@@ -86,19 +87,19 @@ get_ordering_type(const xmlNode *xml_obj)
if (score_i == 0) {
kind_e = pe_order_kind_optional;
}
- pe_warn_once(pe_wo_order_score,
+ pe_warn_once(pcmk__wo_order_score,
"Support for 'score' in rsc_order is deprecated "
"and will be removed in a future release "
"(use 'kind' instead)");
}
- } else if (pcmk__str_eq(kind, "Mandatory", pcmk__str_casei)) {
+ } else if (pcmk__str_eq(kind, "Mandatory", pcmk__str_none)) {
kind_e = pe_order_kind_mandatory;
- } else if (pcmk__str_eq(kind, "Optional", pcmk__str_casei)) {
+ } else if (pcmk__str_eq(kind, "Optional", pcmk__str_none)) {
kind_e = pe_order_kind_optional;
- } else if (pcmk__str_eq(kind, "Serialize", pcmk__str_casei)) {
+ } else if (pcmk__str_eq(kind, "Serialize", pcmk__str_none)) {
kind_e = pe_order_kind_serialize;
} else {
@@ -177,34 +178,39 @@ static uint32_t
ordering_flags_for_kind(enum pe_order_kind kind, const char *first,
enum ordering_symmetry symmetry)
{
- uint32_t flags = pe_order_none; // so we trace-log all flags set
-
- pe__set_order_flags(flags, pe_order_optional);
+ uint32_t flags = pcmk__ar_none; // so we trace-log all flags set
switch (kind) {
case pe_order_kind_optional:
+ pe__set_order_flags(flags, pcmk__ar_ordered);
break;
case pe_order_kind_serialize:
- pe__set_order_flags(flags, pe_order_serialize_only);
+ /* This flag is not used anywhere directly but means the relation
+ * will not match an equality comparison against pcmk__ar_none or
+ * pcmk__ar_ordered.
+ */
+ pe__set_order_flags(flags, pcmk__ar_serialize);
break;
case pe_order_kind_mandatory:
+ pe__set_order_flags(flags, pcmk__ar_ordered);
switch (symmetry) {
case ordering_asymmetric:
- pe__set_order_flags(flags, pe_order_asymmetrical);
+ pe__set_order_flags(flags, pcmk__ar_asymmetric);
break;
case ordering_symmetric:
- pe__set_order_flags(flags, pe_order_implies_then);
- if (pcmk__strcase_any_of(first, RSC_START, RSC_PROMOTE,
- NULL)) {
- pe__set_order_flags(flags, pe_order_runnable_left);
+ pe__set_order_flags(flags, pcmk__ar_first_implies_then);
+ if (pcmk__strcase_any_of(first, PCMK_ACTION_START,
+ PCMK_ACTION_PROMOTE, NULL)) {
+ pe__set_order_flags(flags,
+ pcmk__ar_unrunnable_first_blocks);
}
break;
case ordering_symmetric_inverse:
- pe__set_order_flags(flags, pe_order_implies_first);
+ pe__set_order_flags(flags, pcmk__ar_then_implies_first);
break;
}
break;
@@ -221,17 +227,17 @@ ordering_flags_for_kind(enum pe_order_kind kind, const char *first,
* \param[in] instance_attr XML attribute name for instance number.
* This option is deprecated and will be removed in a
* future release.
- * \param[in] data_set Cluster working set
+ * \param[in] scheduler Scheduler data
*
* \return Resource corresponding to \p id, or NULL if none
*/
-static pe_resource_t *
+static pcmk_resource_t *
get_ordering_resource(const xmlNode *xml, const char *resource_attr,
const char *instance_attr,
- const pe_working_set_t *data_set)
+ const pcmk_scheduler_t *scheduler)
{
// @COMPAT: instance_attr and instance_id variables deprecated since 2.1.5
- pe_resource_t *rsc = NULL;
+ pcmk_resource_t *rsc = NULL;
const char *rsc_id = crm_element_value(xml, resource_attr);
const char *instance_id = crm_element_value(xml, instance_attr);
@@ -241,7 +247,7 @@ get_ordering_resource(const xmlNode *xml, const char *resource_attr,
return NULL;
}
- rsc = pcmk__find_constraint_resource(data_set->resources, rsc_id);
+ rsc = pcmk__find_constraint_resource(scheduler->resources, rsc_id);
if (rsc == NULL) {
pcmk__config_err("Ignoring constraint '%s' because resource '%s' "
"does not exist", ID(xml), rsc_id);
@@ -249,7 +255,7 @@ get_ordering_resource(const xmlNode *xml, const char *resource_attr,
}
if (instance_id != NULL) {
- pe_warn_once(pe_wo_order_inst,
+ pe_warn_once(pcmk__wo_order_inst,
"Support for " XML_ORDER_ATTR_FIRST_INSTANCE " and "
XML_ORDER_ATTR_THEN_INSTANCE " is deprecated and will be "
"removed in a future release.");
@@ -281,7 +287,7 @@ get_ordering_resource(const xmlNode *xml, const char *resource_attr,
* \return Minimum 'first' instances required (or 0 if not applicable)
*/
static int
-get_minimum_first_instances(const pe_resource_t *rsc, const xmlNode *xml)
+get_minimum_first_instances(const pcmk_resource_t *rsc, const xmlNode *xml)
{
const char *clone_min = NULL;
bool require_all = false;
@@ -290,8 +296,7 @@ get_minimum_first_instances(const pe_resource_t *rsc, const xmlNode *xml)
return 0;
}
- clone_min = g_hash_table_lookup(rsc->meta,
- XML_RSC_ATTR_INCARNATION_MIN);
+ clone_min = g_hash_table_lookup(rsc->meta, PCMK_META_CLONE_MIN);
if (clone_min != NULL) {
int clone_min_int = 0;
@@ -303,7 +308,7 @@ get_minimum_first_instances(const pe_resource_t *rsc, const xmlNode *xml)
* require-all=false is deprecated equivalent of clone-min=1
*/
if (pcmk__xe_get_bool_attr(xml, "require-all", &require_all) != ENODATA) {
- pe_warn_once(pe_wo_require_all,
+ pe_warn_once(pcmk__wo_require_all,
"Support for require-all in ordering constraints "
"is deprecated and will be removed in a future release"
" (use clone-min clone meta-attribute instead)");
@@ -326,17 +331,16 @@ get_minimum_first_instances(const pe_resource_t *rsc, const xmlNode *xml)
* \param[in] action_then 'Then' action in ordering
* \param[in] flags Ordering flags
* \param[in] clone_min Minimum required instances of 'first'
- * \param[in,out] data_set Cluster working set
*/
static void
clone_min_ordering(const char *id,
- pe_resource_t *rsc_first, const char *action_first,
- pe_resource_t *rsc_then, const char *action_then,
- uint32_t flags, int clone_min, pe_working_set_t *data_set)
+ pcmk_resource_t *rsc_first, const char *action_first,
+ pcmk_resource_t *rsc_then, const char *action_then,
+ uint32_t flags, int clone_min)
{
// Create a pseudo-action for when the minimum instances are active
- char *task = crm_strdup_printf(CRM_OP_RELAXED_CLONE ":%s", id);
- pe_action_t *clone_min_met = get_pseudo_op(task, data_set);
+ char *task = crm_strdup_printf(PCMK_ACTION_CLONE_ONE_OR_MORE ":%s", id);
+ pcmk_action_t *clone_min_met = get_pseudo_op(task, rsc_first->cluster);
free(task);
@@ -344,24 +348,24 @@ clone_min_ordering(const char *id,
* considered runnable before allowing the pseudo-action to be runnable.
*/
clone_min_met->required_runnable_before = clone_min;
- pe__set_action_flags(clone_min_met, pe_action_requires_any);
+ pe__set_action_flags(clone_min_met, pcmk_action_min_runnable);
// Order the actions for each clone instance before the pseudo-action
- for (GList *rIter = rsc_first->children; rIter != NULL;
- rIter = rIter->next) {
-
- pe_resource_t *child = rIter->data;
+ for (GList *iter = rsc_first->children; iter != NULL; iter = iter->next) {
+ pcmk_resource_t *child = iter->data;
pcmk__new_ordering(child, pcmk__op_key(child->id, action_first, 0),
NULL, NULL, NULL, clone_min_met,
- pe_order_one_or_more|pe_order_implies_then_printed,
- data_set);
+ pcmk__ar_min_runnable
+ |pcmk__ar_first_implies_then_graphed,
+ rsc_first->cluster);
}
// Order "then" action after the pseudo-action (if runnable)
pcmk__new_ordering(NULL, NULL, clone_min_met, rsc_then,
pcmk__op_key(rsc_then->id, action_then, 0),
- NULL, flags|pe_order_runnable_left, data_set);
+ NULL, flags|pcmk__ar_unrunnable_first_blocks,
+ rsc_first->cluster);
}
/*!
@@ -397,8 +401,8 @@ clone_min_ordering(const char *id,
*/
static void
inverse_ordering(const char *id, enum pe_order_kind kind,
- pe_resource_t *rsc_first, const char *action_first,
- pe_resource_t *rsc_then, const char *action_then)
+ pcmk_resource_t *rsc_first, const char *action_first,
+ pcmk_resource_t *rsc_then, const char *action_then)
{
action_then = invert_action(action_then);
action_first = invert_action(action_first);
@@ -409,20 +413,20 @@ inverse_ordering(const char *id, enum pe_order_kind kind,
uint32_t flags = ordering_flags_for_kind(kind, action_first,
ordering_symmetric_inverse);
- handle_restart_type(rsc_then, kind, pe_order_implies_first, flags);
+ handle_restart_type(rsc_then, kind, pcmk__ar_then_implies_first, flags);
pcmk__order_resource_actions(rsc_then, action_then, rsc_first,
action_first, flags);
}
}
static void
-unpack_simple_rsc_order(xmlNode *xml_obj, pe_working_set_t *data_set)
+unpack_simple_rsc_order(xmlNode *xml_obj, pcmk_scheduler_t *scheduler)
{
- pe_resource_t *rsc_then = NULL;
- pe_resource_t *rsc_first = NULL;
+ pcmk_resource_t *rsc_then = NULL;
+ pcmk_resource_t *rsc_first = NULL;
int min_required_before = 0;
enum pe_order_kind kind = pe_order_kind_mandatory;
- uint32_t cons_weight = pe_order_none;
+ uint32_t flags = pcmk__ar_none;
enum ordering_symmetry symmetry;
const char *action_then = NULL;
@@ -434,27 +438,27 @@ unpack_simple_rsc_order(xmlNode *xml_obj, pe_working_set_t *data_set)
id = crm_element_value(xml_obj, XML_ATTR_ID);
if (id == NULL) {
pcmk__config_err("Ignoring <%s> constraint without " XML_ATTR_ID,
- crm_element_name(xml_obj));
+ xml_obj->name);
return;
}
rsc_first = get_ordering_resource(xml_obj, XML_ORDER_ATTR_FIRST,
XML_ORDER_ATTR_FIRST_INSTANCE,
- data_set);
+ scheduler);
if (rsc_first == NULL) {
return;
}
rsc_then = get_ordering_resource(xml_obj, XML_ORDER_ATTR_THEN,
XML_ORDER_ATTR_THEN_INSTANCE,
- data_set);
+ scheduler);
if (rsc_then == NULL) {
return;
}
action_first = crm_element_value(xml_obj, XML_ORDER_ATTR_FIRST_ACTION);
if (action_first == NULL) {
- action_first = RSC_START;
+ action_first = PCMK_ACTION_START;
}
action_then = crm_element_value(xml_obj, XML_ORDER_ATTR_THEN_ACTION);
@@ -465,9 +469,9 @@ unpack_simple_rsc_order(xmlNode *xml_obj, pe_working_set_t *data_set)
kind = get_ordering_type(xml_obj);
symmetry = get_ordering_symmetry(xml_obj, kind, NULL);
- cons_weight = ordering_flags_for_kind(kind, action_first, symmetry);
+ flags = ordering_flags_for_kind(kind, action_first, symmetry);
- handle_restart_type(rsc_then, kind, pe_order_implies_then, cons_weight);
+ handle_restart_type(rsc_then, kind, pcmk__ar_first_implies_then, flags);
/* If there is a minimum number of instances that must be runnable before
* the 'then' action is runnable, we use a pseudo-action for convenience:
@@ -477,10 +481,10 @@ unpack_simple_rsc_order(xmlNode *xml_obj, pe_working_set_t *data_set)
min_required_before = get_minimum_first_instances(rsc_first, xml_obj);
if (min_required_before > 0) {
clone_min_ordering(id, rsc_first, action_first, rsc_then, action_then,
- cons_weight, min_required_before, data_set);
+ flags, min_required_before);
} else {
pcmk__order_resource_actions(rsc_first, action_first, rsc_then,
- action_then, cons_weight);
+ action_then, flags);
}
if (symmetry == ordering_symmetric) {
@@ -511,17 +515,17 @@ unpack_simple_rsc_order(xmlNode *xml_obj, pe_working_set_t *data_set)
* \param[in] then_action 'then' action (if NULL, \p then_rsc and
* \p then_action_task must be set)
*
- * \param[in] flags Flag set of enum pe_ordering
- * \param[in,out] data_set Cluster working set to add ordering to
+ * \param[in] flags Group of enum pcmk__action_relation_flags
+ * \param[in,out] sched Scheduler data to add ordering to
*
* \note This function takes ownership of first_action_task and
* then_action_task, which do not need to be freed by the caller.
*/
void
-pcmk__new_ordering(pe_resource_t *first_rsc, char *first_action_task,
- pe_action_t *first_action, pe_resource_t *then_rsc,
- char *then_action_task, pe_action_t *then_action,
- uint32_t flags, pe_working_set_t *data_set)
+pcmk__new_ordering(pcmk_resource_t *first_rsc, char *first_action_task,
+ pcmk_action_t *first_action, pcmk_resource_t *then_rsc,
+ char *then_action_task, pcmk_action_t *then_action,
+ uint32_t flags, pcmk_scheduler_t *sched)
{
pe__ordering_t *order = NULL;
@@ -540,7 +544,7 @@ pcmk__new_ordering(pe_resource_t *first_rsc, char *first_action_task,
order = calloc(1, sizeof(pe__ordering_t));
CRM_ASSERT(order != NULL);
- order->id = data_set->order_id++;
+ order->id = sched->order_id++;
order->flags = flags;
order->lh_rsc = first_rsc;
order->rh_rsc = then_rsc;
@@ -566,12 +570,12 @@ pcmk__new_ordering(pe_resource_t *first_rsc, char *first_action_task,
}
pe_rsc_trace(first_rsc, "Created ordering %d for %s then %s",
- (data_set->order_id - 1),
+ (sched->order_id - 1),
pcmk__s(order->lh_action_task, "an underspecified action"),
pcmk__s(order->rh_action_task, "an underspecified action"));
- data_set->ordering_constraints = g_list_prepend(data_set->ordering_constraints,
- order);
+ sched->ordering_constraints = g_list_prepend(sched->ordering_constraints,
+ order);
pcmk__order_migration_equivalents(order);
}
@@ -581,23 +585,23 @@ pcmk__new_ordering(pe_resource_t *first_rsc, char *first_action_task,
* \param[in] set Set XML to unpack
* \param[in] parent_kind rsc_order XML "kind" attribute
* \param[in] parent_symmetrical_s rsc_order XML "symmetrical" attribute
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*
* \return Standard Pacemaker return code
*/
static int
unpack_order_set(const xmlNode *set, enum pe_order_kind parent_kind,
- const char *parent_symmetrical_s, pe_working_set_t *data_set)
+ const char *parent_symmetrical_s, pcmk_scheduler_t *scheduler)
{
GList *set_iter = NULL;
GList *resources = NULL;
- pe_resource_t *last = NULL;
- pe_resource_t *resource = NULL;
+ pcmk_resource_t *last = NULL;
+ pcmk_resource_t *resource = NULL;
int local_kind = parent_kind;
bool sequential = false;
- uint32_t flags = pe_order_optional;
+ uint32_t flags = pcmk__ar_ordered;
enum ordering_symmetry symmetry;
char *key = NULL;
@@ -607,7 +611,7 @@ unpack_order_set(const xmlNode *set, enum pe_order_kind parent_kind,
const char *kind_s = crm_element_value(set, XML_ORDER_ATTR_KIND);
if (action == NULL) {
- action = RSC_START;
+ action = PCMK_ACTION_START;
}
if (kind_s) {
@@ -636,7 +640,7 @@ unpack_order_set(const xmlNode *set, enum pe_order_kind parent_kind,
set_iter = resources;
while (set_iter != NULL) {
- resource = (pe_resource_t *) set_iter->data;
+ resource = (pcmk_resource_t *) set_iter->data;
set_iter = set_iter->next;
key = pcmk__op_key(resource->id, action, 0);
@@ -644,12 +648,12 @@ unpack_order_set(const xmlNode *set, enum pe_order_kind parent_kind,
if (local_kind == pe_order_kind_serialize) {
/* Serialize before everything that comes after */
- for (GList *gIter = set_iter; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *then_rsc = (pe_resource_t *) gIter->data;
+ for (GList *iter = set_iter; iter != NULL; iter = iter->next) {
+ pcmk_resource_t *then_rsc = iter->data;
char *then_key = pcmk__op_key(then_rsc->id, action, 0);
pcmk__new_ordering(resource, strdup(key), NULL, then_rsc,
- then_key, NULL, flags, data_set);
+ then_key, NULL, flags, scheduler);
}
} else if (sequential) {
@@ -674,7 +678,7 @@ unpack_order_set(const xmlNode *set, enum pe_order_kind parent_kind,
set_iter = resources;
while (set_iter != NULL) {
- resource = (pe_resource_t *) set_iter->data;
+ resource = (pcmk_resource_t *) set_iter->data;
set_iter = set_iter->next;
if (sequential) {
@@ -694,42 +698,42 @@ unpack_order_set(const xmlNode *set, enum pe_order_kind parent_kind,
/*!
* \brief Order two resource sets relative to each other
*
- * \param[in] id Ordering ID (for logging)
- * \param[in] set1 First listed set
- * \param[in] set2 Second listed set
- * \param[in] kind Ordering kind
- * \param[in,out] data_set Cluster working set
- * \param[in] symmetry Which ordering symmetry applies to this relation
+ * \param[in] id Ordering ID (for logging)
+ * \param[in] set1 First listed set
+ * \param[in] set2 Second listed set
+ * \param[in] kind Ordering kind
+ * \param[in,out] scheduler Scheduler data
+ * \param[in] symmetry Which ordering symmetry applies to this relation
*
* \return Standard Pacemaker return code
*/
static int
order_rsc_sets(const char *id, const xmlNode *set1, const xmlNode *set2,
- enum pe_order_kind kind, pe_working_set_t *data_set,
+ enum pe_order_kind kind, pcmk_scheduler_t *scheduler,
enum ordering_symmetry symmetry)
{
const xmlNode *xml_rsc = NULL;
const xmlNode *xml_rsc_2 = NULL;
- pe_resource_t *rsc_1 = NULL;
- pe_resource_t *rsc_2 = NULL;
+ pcmk_resource_t *rsc_1 = NULL;
+ pcmk_resource_t *rsc_2 = NULL;
const char *action_1 = crm_element_value(set1, "action");
const char *action_2 = crm_element_value(set2, "action");
- uint32_t flags = pe_order_none;
+ uint32_t flags = pcmk__ar_none;
bool require_all = true;
(void) pcmk__xe_get_bool_attr(set1, "require-all", &require_all);
if (action_1 == NULL) {
- action_1 = RSC_START;
+ action_1 = PCMK_ACTION_START;
}
if (action_2 == NULL) {
- action_2 = RSC_START;
+ action_2 = PCMK_ACTION_START;
}
if (symmetry == ordering_symmetric_inverse) {
@@ -737,8 +741,8 @@ order_rsc_sets(const char *id, const xmlNode *set1, const xmlNode *set2,
action_2 = invert_action(action_2);
}
- if (pcmk__str_eq(RSC_STOP, action_1, pcmk__str_casei)
- || pcmk__str_eq(RSC_DEMOTE, action_1, pcmk__str_casei)) {
+ if (pcmk__str_eq(PCMK_ACTION_STOP, action_1, pcmk__str_none)
+ || pcmk__str_eq(PCMK_ACTION_DEMOTE, action_1, pcmk__str_none)) {
/* Assuming: A -> ( B || C) -> D
* The one-or-more logic only applies during the start/promote phase.
* During shutdown neither B nor can shutdown until D is down, so simply
@@ -753,11 +757,11 @@ order_rsc_sets(const char *id, const xmlNode *set1, const xmlNode *set2,
* irrelevant in regards to set2.
*/
if (!require_all) {
- char *task = crm_strdup_printf(CRM_OP_RELAXED_SET ":%s", ID(set1));
- pe_action_t *unordered_action = get_pseudo_op(task, data_set);
+ char *task = crm_strdup_printf(PCMK_ACTION_ONE_OR_MORE ":%s", ID(set1));
+ pcmk_action_t *unordered_action = get_pseudo_op(task, scheduler);
free(task);
- pe__set_action_flags(unordered_action, pe_action_requires_any);
+ pe__set_action_flags(unordered_action, pcmk_action_min_runnable);
for (xml_rsc = first_named_child(set1, XML_TAG_RESOURCE_REF);
xml_rsc != NULL; xml_rsc = crm_next_same_xml(xml_rsc)) {
@@ -770,8 +774,9 @@ order_rsc_sets(const char *id, const xmlNode *set1, const xmlNode *set2,
*/
pcmk__new_ordering(rsc_1, pcmk__op_key(rsc_1->id, action_1, 0),
NULL, NULL, NULL, unordered_action,
- pe_order_one_or_more|pe_order_implies_then_printed,
- data_set);
+ pcmk__ar_min_runnable
+ |pcmk__ar_first_implies_then_graphed,
+ scheduler);
}
for (xml_rsc_2 = first_named_child(set2, XML_TAG_RESOURCE_REF);
xml_rsc_2 != NULL; xml_rsc_2 = crm_next_same_xml(xml_rsc_2)) {
@@ -784,7 +789,8 @@ order_rsc_sets(const char *id, const xmlNode *set1, const xmlNode *set2,
*/
pcmk__new_ordering(NULL, NULL, unordered_action,
rsc_2, pcmk__op_key(rsc_2->id, action_2, 0),
- NULL, flags|pe_order_runnable_left, data_set);
+ NULL, flags|pcmk__ar_unrunnable_first_blocks,
+ scheduler);
}
return pcmk_rc_ok;
@@ -859,7 +865,8 @@ order_rsc_sets(const char *id, const xmlNode *set1, const xmlNode *set2,
EXPAND_CONSTRAINT_IDREF(id, rsc_1, ID(xml_rsc));
- for (xmlNode *xml_rsc_2 = first_named_child(set2, XML_TAG_RESOURCE_REF);
+ for (xmlNode *xml_rsc_2 = first_named_child(set2,
+ XML_TAG_RESOURCE_REF);
xml_rsc_2 != NULL; xml_rsc_2 = crm_next_same_xml(xml_rsc_2)) {
EXPAND_CONSTRAINT_IDREF(id, rsc_2, ID(xml_rsc_2));
@@ -878,31 +885,31 @@ order_rsc_sets(const char *id, const xmlNode *set1, const xmlNode *set2,
*
* \param[in,out] xml_obj Ordering constraint XML
* \param[out] expanded_xml Equivalent XML with tags expanded
- * \param[in] data_set Cluster working set
+ * \param[in] scheduler Scheduler data
*
* \return Standard Pacemaker return code (specifically, pcmk_rc_ok on success,
* and pcmk_rc_unpack_error on invalid configuration)
*/
static int
unpack_order_tags(xmlNode *xml_obj, xmlNode **expanded_xml,
- const pe_working_set_t *data_set)
+ const pcmk_scheduler_t *scheduler)
{
const char *id_first = NULL;
const char *id_then = NULL;
const char *action_first = NULL;
const char *action_then = NULL;
- pe_resource_t *rsc_first = NULL;
- pe_resource_t *rsc_then = NULL;
- pe_tag_t *tag_first = NULL;
- pe_tag_t *tag_then = NULL;
+ pcmk_resource_t *rsc_first = NULL;
+ pcmk_resource_t *rsc_then = NULL;
+ pcmk_tag_t *tag_first = NULL;
+ pcmk_tag_t *tag_then = NULL;
xmlNode *rsc_set_first = NULL;
xmlNode *rsc_set_then = NULL;
bool any_sets = false;
// Check whether there are any resource sets with template or tag references
- *expanded_xml = pcmk__expand_tags_in_sets(xml_obj, data_set);
+ *expanded_xml = pcmk__expand_tags_in_sets(xml_obj, scheduler);
if (*expanded_xml != NULL) {
crm_log_xml_trace(*expanded_xml, "Expanded rsc_order");
return pcmk_rc_ok;
@@ -914,14 +921,15 @@ unpack_order_tags(xmlNode *xml_obj, xmlNode **expanded_xml,
return pcmk_rc_ok;
}
- if (!pcmk__valid_resource_or_tag(data_set, id_first, &rsc_first,
+ if (!pcmk__valid_resource_or_tag(scheduler, id_first, &rsc_first,
&tag_first)) {
pcmk__config_err("Ignoring constraint '%s' because '%s' is not a "
"valid resource or tag", ID(xml_obj), id_first);
return pcmk_rc_unpack_error;
}
- if (!pcmk__valid_resource_or_tag(data_set, id_then, &rsc_then, &tag_then)) {
+ if (!pcmk__valid_resource_or_tag(scheduler, id_then, &rsc_then,
+ &tag_then)) {
pcmk__config_err("Ignoring constraint '%s' because '%s' is not a "
"valid resource or tag", ID(xml_obj), id_then);
return pcmk_rc_unpack_error;
@@ -937,9 +945,9 @@ unpack_order_tags(xmlNode *xml_obj, xmlNode **expanded_xml,
*expanded_xml = copy_xml(xml_obj);
- // Convert template/tag reference in "first" into resource_set under constraint
+ // Convert template/tag reference in "first" into constraint resource_set
if (!pcmk__tag_to_set(*expanded_xml, &rsc_set_first, XML_ORDER_ATTR_FIRST,
- true, data_set)) {
+ true, scheduler)) {
free_xml(*expanded_xml);
*expanded_xml = NULL;
return pcmk_rc_unpack_error;
@@ -954,9 +962,9 @@ unpack_order_tags(xmlNode *xml_obj, xmlNode **expanded_xml,
any_sets = true;
}
- // Convert template/tag reference in "then" into resource_set under constraint
+ // Convert template/tag reference in "then" into constraint resource_set
if (!pcmk__tag_to_set(*expanded_xml, &rsc_set_then, XML_ORDER_ATTR_THEN,
- true, data_set)) {
+ true, scheduler)) {
free_xml(*expanded_xml);
*expanded_xml = NULL;
return pcmk_rc_unpack_error;
@@ -985,11 +993,11 @@ unpack_order_tags(xmlNode *xml_obj, xmlNode **expanded_xml,
* \internal
* \brief Unpack ordering constraint XML
*
- * \param[in,out] xml_obj Ordering constraint XML to unpack
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] xml_obj Ordering constraint XML to unpack
+ * \param[in,out] scheduler Scheduler data
*/
void
-pcmk__unpack_ordering(xmlNode *xml_obj, pe_working_set_t *data_set)
+pcmk__unpack_ordering(xmlNode *xml_obj, pcmk_scheduler_t *scheduler)
{
xmlNode *set = NULL;
xmlNode *last = NULL;
@@ -1005,7 +1013,7 @@ pcmk__unpack_ordering(xmlNode *xml_obj, pe_working_set_t *data_set)
NULL);
// Expand any resource tags in the constraint XML
- if (unpack_order_tags(xml_obj, &expanded_xml, data_set) != pcmk_rc_ok) {
+ if (unpack_order_tags(xml_obj, &expanded_xml, scheduler) != pcmk_rc_ok) {
return;
}
if (expanded_xml != NULL) {
@@ -1017,9 +1025,9 @@ pcmk__unpack_ordering(xmlNode *xml_obj, pe_working_set_t *data_set)
for (set = first_named_child(xml_obj, XML_CONS_TAG_RSC_SET);
set != NULL; set = crm_next_same_xml(set)) {
- set = expand_idref(set, data_set->input);
+ set = expand_idref(set, scheduler->input);
if ((set == NULL) // Configuration error, message already logged
- || (unpack_order_set(set, kind, invert, data_set) != pcmk_rc_ok)) {
+ || (unpack_order_set(set, kind, invert, scheduler) != pcmk_rc_ok)) {
if (expanded_xml != NULL) {
free_xml(expanded_xml);
@@ -1029,7 +1037,7 @@ pcmk__unpack_ordering(xmlNode *xml_obj, pe_working_set_t *data_set)
if (last != NULL) {
- if (order_rsc_sets(id, last, set, kind, data_set,
+ if (order_rsc_sets(id, last, set, kind, scheduler,
symmetry) != pcmk_rc_ok) {
if (expanded_xml != NULL) {
free_xml(expanded_xml);
@@ -1038,7 +1046,7 @@ pcmk__unpack_ordering(xmlNode *xml_obj, pe_working_set_t *data_set)
}
if ((symmetry == ordering_symmetric)
- && (order_rsc_sets(id, set, last, kind, data_set,
+ && (order_rsc_sets(id, set, last, kind, scheduler,
ordering_symmetric_inverse) != pcmk_rc_ok)) {
if (expanded_xml != NULL) {
free_xml(expanded_xml);
@@ -1057,17 +1065,17 @@ pcmk__unpack_ordering(xmlNode *xml_obj, pe_working_set_t *data_set)
// If the constraint has no resource sets, unpack it as a simple ordering
if (last == NULL) {
- return unpack_simple_rsc_order(xml_obj, data_set);
+ return unpack_simple_rsc_order(xml_obj, scheduler);
}
}
static bool
-ordering_is_invalid(pe_action_t *action, pe_action_wrapper_t *input)
+ordering_is_invalid(pcmk_action_t *action, pcmk__related_action_t *input)
{
/* Prevent user-defined ordering constraints between resources
* running in a guest node and the resource that defines that node.
*/
- if (!pcmk_is_set(input->type, pe_order_preserve)
+ if (!pcmk_is_set(input->type, pcmk__ar_guest_allowed)
&& (input->action->rsc != NULL)
&& pcmk__rsc_corresponds_to_guest(action->rsc, input->action->node)) {
@@ -1083,8 +1091,9 @@ ordering_is_invalid(pe_action_t *action, pe_action_wrapper_t *input)
* migrated from node2 to node1. If there would be a graph loop,
* break the order "load_stopped_node2" -> "rscA_migrate_to node1".
*/
- if ((input->type == pe_order_load) && action->rsc
- && pcmk__str_eq(action->task, RSC_MIGRATE, pcmk__str_casei)
+ if (((uint32_t) input->type == pcmk__ar_if_on_same_node_or_target)
+ && (action->rsc != NULL)
+ && pcmk__str_eq(action->task, PCMK_ACTION_MIGRATE_TO, pcmk__str_none)
&& pcmk__graph_has_loop(action, action, input)) {
return true;
}
@@ -1093,18 +1102,18 @@ ordering_is_invalid(pe_action_t *action, pe_action_wrapper_t *input)
}
void
-pcmk__disable_invalid_orderings(pe_working_set_t *data_set)
+pcmk__disable_invalid_orderings(pcmk_scheduler_t *scheduler)
{
- for (GList *iter = data_set->actions; iter != NULL; iter = iter->next) {
- pe_action_t *action = (pe_action_t *) iter->data;
- pe_action_wrapper_t *input = NULL;
+ for (GList *iter = scheduler->actions; iter != NULL; iter = iter->next) {
+ pcmk_action_t *action = (pcmk_action_t *) iter->data;
+ pcmk__related_action_t *input = NULL;
for (GList *input_iter = action->actions_before;
input_iter != NULL; input_iter = input_iter->next) {
- input = (pe_action_wrapper_t *) input_iter->data;
+ input = input_iter->data;
if (ordering_is_invalid(action, input)) {
- input->type = pe_order_none;
+ input->type = (enum pe_ordering) pcmk__ar_none;
}
}
}
@@ -1118,23 +1127,22 @@ pcmk__disable_invalid_orderings(pe_working_set_t *data_set)
* \param[in] shutdown_op Shutdown action for node
*/
void
-pcmk__order_stops_before_shutdown(pe_node_t *node, pe_action_t *shutdown_op)
+pcmk__order_stops_before_shutdown(pcmk_node_t *node, pcmk_action_t *shutdown_op)
{
for (GList *iter = node->details->data_set->actions;
iter != NULL; iter = iter->next) {
- pe_action_t *action = (pe_action_t *) iter->data;
+ pcmk_action_t *action = (pcmk_action_t *) iter->data;
// Only stops on the node shutting down are relevant
- if ((action->rsc == NULL) || (action->node == NULL)
- || (action->node->details != node->details)
- || !pcmk__str_eq(action->task, RSC_STOP, pcmk__str_casei)) {
+ if (!pe__same_node(action->node, node)
+ || !pcmk__str_eq(action->task, PCMK_ACTION_STOP, pcmk__str_none)) {
continue;
}
// Resources and nodes in maintenance mode won't be touched
- if (pcmk_is_set(action->rsc->flags, pe_rsc_maintenance)) {
+ if (pcmk_is_set(action->rsc->flags, pcmk_rsc_maintenance)) {
pe_rsc_trace(action->rsc,
"Not ordering %s before shutdown of %s because "
"resource in maintenance mode",
@@ -1154,7 +1162,7 @@ pcmk__order_stops_before_shutdown(pe_node_t *node, pe_action_t *shutdown_op)
* we may still end up blocking)
*/
if (!pcmk_any_flags_set(action->rsc->flags,
- pe_rsc_managed|pe_rsc_block)) {
+ pcmk_rsc_managed|pcmk_rsc_blocked)) {
pe_rsc_trace(action->rsc,
"Not ordering %s before shutdown of %s because "
"resource is unmanaged or blocked",
@@ -1164,10 +1172,10 @@ pcmk__order_stops_before_shutdown(pe_node_t *node, pe_action_t *shutdown_op)
pe_rsc_trace(action->rsc, "Ordering %s before shutdown of %s",
action->uuid, pe__node_name(node));
- pe__clear_action_flags(action, pe_action_optional);
+ pe__clear_action_flags(action, pcmk_action_optional);
pcmk__new_ordering(action->rsc, NULL, action, NULL,
- strdup(CRM_OP_SHUTDOWN), shutdown_op,
- pe_order_optional|pe_order_runnable_left,
+ strdup(PCMK_ACTION_DO_SHUTDOWN), shutdown_op,
+ pcmk__ar_ordered|pcmk__ar_unrunnable_first_blocks,
node->details->data_set);
}
}
@@ -1183,7 +1191,7 @@ pcmk__order_stops_before_shutdown(pe_node_t *node, pe_action_t *shutdown_op)
* \note It is the caller's responsibility to free the result with g_list_free()
*/
static GList *
-find_actions_by_task(const pe_resource_t *rsc, const char *original_key)
+find_actions_by_task(const pcmk_resource_t *rsc, const char *original_key)
{
// Search under given task key directly
GList *list = find_actions(rsc->actions, original_key, NULL);
@@ -1215,11 +1223,11 @@ find_actions_by_task(const pe_resource_t *rsc, const char *original_key)
* \param[in,out] order Ordering constraint being applied
*/
static void
-order_resource_actions_after(pe_action_t *first_action,
- const pe_resource_t *rsc, pe__ordering_t *order)
+order_resource_actions_after(pcmk_action_t *first_action,
+ const pcmk_resource_t *rsc, pe__ordering_t *order)
{
GList *then_actions = NULL;
- uint32_t flags = pe_order_none;
+ uint32_t flags = pcmk__ar_none;
CRM_CHECK((rsc != NULL) && (order != NULL), return);
@@ -1241,15 +1249,17 @@ order_resource_actions_after(pe_action_t *first_action,
}
if ((first_action != NULL) && (first_action->rsc == rsc)
- && pcmk_is_set(first_action->flags, pe_action_dangle)) {
+ && pcmk_is_set(first_action->flags, pcmk_action_migration_abort)) {
pe_rsc_trace(rsc,
"Detected dangling migration ordering (%s then %s %s)",
first_action->uuid, order->rh_action_task, rsc->id);
- pe__clear_order_flags(flags, pe_order_implies_then);
+ pe__clear_order_flags(flags, pcmk__ar_first_implies_then);
}
- if ((first_action == NULL) && !pcmk_is_set(flags, pe_order_implies_then)) {
+ if ((first_action == NULL)
+ && !pcmk_is_set(flags, pcmk__ar_first_implies_then)) {
+
pe_rsc_debug(rsc,
"Ignoring ordering %d for %s: No first action found",
order->id, rsc->id);
@@ -1258,12 +1268,12 @@ order_resource_actions_after(pe_action_t *first_action,
}
for (GList *iter = then_actions; iter != NULL; iter = iter->next) {
- pe_action_t *then_action_iter = (pe_action_t *) iter->data;
+ pcmk_action_t *then_action_iter = (pcmk_action_t *) iter->data;
if (first_action != NULL) {
order_actions(first_action, then_action_iter, flags);
} else {
- pe__clear_action_flags(then_action_iter, pe_action_runnable);
+ pe__clear_action_flags(then_action_iter, pcmk_action_runnable);
crm_warn("%s of %s is unrunnable because there is no %s of %s "
"to order it after", then_action_iter->task, rsc->id,
order->lh_action_task, order->lh_rsc->id);
@@ -1274,12 +1284,11 @@ order_resource_actions_after(pe_action_t *first_action,
}
static void
-rsc_order_first(pe_resource_t *first_rsc, pe__ordering_t *order,
- pe_working_set_t *data_set)
+rsc_order_first(pcmk_resource_t *first_rsc, pe__ordering_t *order)
{
GList *first_actions = NULL;
- pe_action_t *first_action = order->lh_action;
- pe_resource_t *then_rsc = order->rh_rsc;
+ pcmk_action_t *first_action = order->lh_action;
+ pcmk_resource_t *then_rsc = order->rh_rsc;
CRM_ASSERT(first_rsc != NULL);
pe_rsc_trace(first_rsc, "Applying ordering constraint %d (first: %s)",
@@ -1305,15 +1314,17 @@ rsc_order_first(pe_resource_t *first_rsc, pe__ordering_t *order,
parse_op_key(order->lh_action_task, NULL, &op_type, &interval_ms);
key = pcmk__op_key(first_rsc->id, op_type, interval_ms);
- if ((first_rsc->fns->state(first_rsc, TRUE) == RSC_ROLE_STOPPED)
- && pcmk__str_eq(op_type, RSC_STOP, pcmk__str_casei)) {
+ if ((first_rsc->fns->state(first_rsc, TRUE) == pcmk_role_stopped)
+ && pcmk__str_eq(op_type, PCMK_ACTION_STOP, pcmk__str_none)) {
free(key);
pe_rsc_trace(first_rsc,
"Ignoring constraint %d: first (%s for %s) not found",
order->id, order->lh_action_task, first_rsc->id);
- } else if ((first_rsc->fns->state(first_rsc, TRUE) == RSC_ROLE_UNPROMOTED)
- && pcmk__str_eq(op_type, RSC_DEMOTE, pcmk__str_casei)) {
+ } else if ((first_rsc->fns->state(first_rsc,
+ TRUE) == pcmk_role_unpromoted)
+ && pcmk__str_eq(op_type, PCMK_ACTION_DEMOTE,
+ pcmk__str_none)) {
free(key);
pe_rsc_trace(first_rsc,
"Ignoring constraint %d: first (%s for %s) not found",
@@ -1324,7 +1335,7 @@ rsc_order_first(pe_resource_t *first_rsc, pe__ordering_t *order,
"Creating first (%s for %s) for constraint %d ",
order->lh_action_task, first_rsc->id, order->id);
first_action = custom_action(first_rsc, key, op_type, NULL, TRUE,
- TRUE, data_set);
+ first_rsc->cluster);
first_actions = g_list_prepend(NULL, first_action);
}
@@ -1339,8 +1350,8 @@ rsc_order_first(pe_resource_t *first_rsc, pe__ordering_t *order,
}
then_rsc = order->rh_action->rsc;
}
- for (GList *gIter = first_actions; gIter != NULL; gIter = gIter->next) {
- first_action = (pe_action_t *) gIter->data;
+ for (GList *iter = first_actions; iter != NULL; iter = iter->next) {
+ first_action = iter->data;
if (then_rsc == NULL) {
order_actions(first_action, order->rh_action, order->flags);
@@ -1353,8 +1364,29 @@ rsc_order_first(pe_resource_t *first_rsc, pe__ordering_t *order,
g_list_free(first_actions);
}
+// GFunc to call pcmk__block_colocation_dependents()
+static void
+block_colocation_dependents(gpointer data, gpointer user_data)
+{
+ pcmk__block_colocation_dependents(data);
+}
+
+// GFunc to call pcmk__update_action_for_orderings()
+static void
+update_action_for_orderings(gpointer data, gpointer user_data)
+{
+ pcmk__update_action_for_orderings((pcmk_action_t *) data,
+ (pcmk_scheduler_t *) user_data);
+}
+
+/*!
+ * \internal
+ * \brief Apply all ordering constraints
+ *
+ * \param[in,out] sched Scheduler data
+ */
void
-pcmk__apply_orderings(pe_working_set_t *data_set)
+pcmk__apply_orderings(pcmk_scheduler_t *sched)
{
crm_trace("Applying ordering constraints");
@@ -1370,16 +1402,16 @@ pcmk__apply_orderings(pe_working_set_t *data_set)
* @TODO This is brittle and should be carefully redesigned so that the
* order of creation doesn't matter, and the reverse becomes unneeded.
*/
- data_set->ordering_constraints = g_list_reverse(data_set->ordering_constraints);
+ sched->ordering_constraints = g_list_reverse(sched->ordering_constraints);
- for (GList *gIter = data_set->ordering_constraints;
- gIter != NULL; gIter = gIter->next) {
+ for (GList *iter = sched->ordering_constraints;
+ iter != NULL; iter = iter->next) {
- pe__ordering_t *order = gIter->data;
- pe_resource_t *rsc = order->lh_rsc;
+ pe__ordering_t *order = iter->data;
+ pcmk_resource_t *rsc = order->lh_rsc;
if (rsc != NULL) {
- rsc_order_first(rsc, order, data_set);
+ rsc_order_first(rsc, order);
continue;
}
@@ -1394,17 +1426,15 @@ pcmk__apply_orderings(pe_working_set_t *data_set)
}
}
- g_list_foreach(data_set->actions, (GFunc) pcmk__block_colocation_dependents,
- data_set);
+ g_list_foreach(sched->actions, block_colocation_dependents, NULL);
crm_trace("Ordering probes");
- pcmk__order_probes(data_set);
+ pcmk__order_probes(sched);
- crm_trace("Updating %d actions", g_list_length(data_set->actions));
- g_list_foreach(data_set->actions,
- (GFunc) pcmk__update_action_for_orderings, data_set);
+ crm_trace("Updating %d actions", g_list_length(sched->actions));
+ g_list_foreach(sched->actions, update_action_for_orderings, sched);
- pcmk__disable_invalid_orderings(data_set);
+ pcmk__disable_invalid_orderings(sched);
}
/*!
@@ -1415,18 +1445,18 @@ pcmk__apply_orderings(pe_working_set_t *data_set)
* \param[in,out] list List of "before" actions
*/
void
-pcmk__order_after_each(pe_action_t *after, GList *list)
+pcmk__order_after_each(pcmk_action_t *after, GList *list)
{
const char *after_desc = (after->task == NULL)? after->uuid : after->task;
for (GList *iter = list; iter != NULL; iter = iter->next) {
- pe_action_t *before = (pe_action_t *) iter->data;
+ pcmk_action_t *before = (pcmk_action_t *) iter->data;
const char *before_desc = before->task? before->task : before->uuid;
crm_debug("Ordering %s on %s before %s on %s",
before_desc, pe__node_name(before->node),
after_desc, pe__node_name(after->node));
- order_actions(before, after, pe_order_optional);
+ order_actions(before, after, pcmk__ar_ordered);
}
}
@@ -1437,27 +1467,34 @@ pcmk__order_after_each(pe_action_t *after, GList *list)
* \param[in,out] rsc Clone or bundle to order
*/
void
-pcmk__promotable_restart_ordering(pe_resource_t *rsc)
+pcmk__promotable_restart_ordering(pcmk_resource_t *rsc)
{
// Order start and promote after all instances are stopped
- pcmk__order_resource_actions(rsc, RSC_STOPPED, rsc, RSC_START,
- pe_order_optional);
- pcmk__order_resource_actions(rsc, RSC_STOPPED, rsc, RSC_PROMOTE,
- pe_order_optional);
+ pcmk__order_resource_actions(rsc, PCMK_ACTION_STOPPED,
+ rsc, PCMK_ACTION_START,
+ pcmk__ar_ordered);
+ pcmk__order_resource_actions(rsc, PCMK_ACTION_STOPPED,
+ rsc, PCMK_ACTION_PROMOTE,
+ pcmk__ar_ordered);
// Order stop, start, and promote after all instances are demoted
- pcmk__order_resource_actions(rsc, RSC_DEMOTED, rsc, RSC_STOP,
- pe_order_optional);
- pcmk__order_resource_actions(rsc, RSC_DEMOTED, rsc, RSC_START,
- pe_order_optional);
- pcmk__order_resource_actions(rsc, RSC_DEMOTED, rsc, RSC_PROMOTE,
- pe_order_optional);
+ pcmk__order_resource_actions(rsc, PCMK_ACTION_DEMOTED,
+ rsc, PCMK_ACTION_STOP,
+ pcmk__ar_ordered);
+ pcmk__order_resource_actions(rsc, PCMK_ACTION_DEMOTED,
+ rsc, PCMK_ACTION_START,
+ pcmk__ar_ordered);
+ pcmk__order_resource_actions(rsc, PCMK_ACTION_DEMOTED,
+ rsc, PCMK_ACTION_PROMOTE,
+ pcmk__ar_ordered);
// Order promote after all instances are started
- pcmk__order_resource_actions(rsc, RSC_STARTED, rsc, RSC_PROMOTE,
- pe_order_optional);
+ pcmk__order_resource_actions(rsc, PCMK_ACTION_RUNNING,
+ rsc, PCMK_ACTION_PROMOTE,
+ pcmk__ar_ordered);
// Order demote after all instances are demoted
- pcmk__order_resource_actions(rsc, RSC_DEMOTE, rsc, RSC_DEMOTED,
- pe_order_optional);
+ pcmk__order_resource_actions(rsc, PCMK_ACTION_DEMOTE,
+ rsc, PCMK_ACTION_DEMOTED,
+ pcmk__ar_ordered);
}
diff --git a/lib/pacemaker/pcmk_sched_primitive.c b/lib/pacemaker/pcmk_sched_primitive.c
index aefbf9a..96acf1c 100644
--- a/lib/pacemaker/pcmk_sched_primitive.c
+++ b/lib/pacemaker/pcmk_sched_primitive.c
@@ -10,20 +10,26 @@
#include <crm_internal.h>
#include <stdbool.h>
+#include <stdint.h> // uint8_t, uint32_t
#include <crm/msg_xml.h>
#include <pacemaker-internal.h>
#include "libpacemaker_private.h"
-static void stop_resource(pe_resource_t *rsc, pe_node_t *node, bool optional);
-static void start_resource(pe_resource_t *rsc, pe_node_t *node, bool optional);
-static void demote_resource(pe_resource_t *rsc, pe_node_t *node, bool optional);
-static void promote_resource(pe_resource_t *rsc, pe_node_t *node,
+static void stop_resource(pcmk_resource_t *rsc, pcmk_node_t *node,
+ bool optional);
+static void start_resource(pcmk_resource_t *rsc, pcmk_node_t *node,
+ bool optional);
+static void demote_resource(pcmk_resource_t *rsc, pcmk_node_t *node,
+ bool optional);
+static void promote_resource(pcmk_resource_t *rsc, pcmk_node_t *node,
bool optional);
-static void assert_role_error(pe_resource_t *rsc, pe_node_t *node,
+static void assert_role_error(pcmk_resource_t *rsc, pcmk_node_t *node,
bool optional);
+#define RSC_ROLE_MAX (pcmk_role_promoted + 1)
+
static enum rsc_role_e rsc_state_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = {
/* This array lists the immediate next role when transitioning from one role
* to a target role. For example, when going from Stopped to Promoted, the
@@ -34,35 +40,35 @@ static enum rsc_role_e rsc_state_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = {
* Current role Immediate next role Final target role
* ------------ ------------------- -----------------
*/
- /* Unknown */ { RSC_ROLE_UNKNOWN, /* Unknown */
- RSC_ROLE_STOPPED, /* Stopped */
- RSC_ROLE_STOPPED, /* Started */
- RSC_ROLE_STOPPED, /* Unpromoted */
- RSC_ROLE_STOPPED, /* Promoted */
+ /* Unknown */ { pcmk_role_unknown, /* Unknown */
+ pcmk_role_stopped, /* Stopped */
+ pcmk_role_stopped, /* Started */
+ pcmk_role_stopped, /* Unpromoted */
+ pcmk_role_stopped, /* Promoted */
},
- /* Stopped */ { RSC_ROLE_STOPPED, /* Unknown */
- RSC_ROLE_STOPPED, /* Stopped */
- RSC_ROLE_STARTED, /* Started */
- RSC_ROLE_UNPROMOTED, /* Unpromoted */
- RSC_ROLE_UNPROMOTED, /* Promoted */
+ /* Stopped */ { pcmk_role_stopped, /* Unknown */
+ pcmk_role_stopped, /* Stopped */
+ pcmk_role_started, /* Started */
+ pcmk_role_unpromoted, /* Unpromoted */
+ pcmk_role_unpromoted, /* Promoted */
},
- /* Started */ { RSC_ROLE_STOPPED, /* Unknown */
- RSC_ROLE_STOPPED, /* Stopped */
- RSC_ROLE_STARTED, /* Started */
- RSC_ROLE_UNPROMOTED, /* Unpromoted */
- RSC_ROLE_PROMOTED, /* Promoted */
+ /* Started */ { pcmk_role_stopped, /* Unknown */
+ pcmk_role_stopped, /* Stopped */
+ pcmk_role_started, /* Started */
+ pcmk_role_unpromoted, /* Unpromoted */
+ pcmk_role_promoted, /* Promoted */
},
- /* Unpromoted */ { RSC_ROLE_STOPPED, /* Unknown */
- RSC_ROLE_STOPPED, /* Stopped */
- RSC_ROLE_STOPPED, /* Started */
- RSC_ROLE_UNPROMOTED, /* Unpromoted */
- RSC_ROLE_PROMOTED, /* Promoted */
+ /* Unpromoted */ { pcmk_role_stopped, /* Unknown */
+ pcmk_role_stopped, /* Stopped */
+ pcmk_role_stopped, /* Started */
+ pcmk_role_unpromoted, /* Unpromoted */
+ pcmk_role_promoted, /* Promoted */
},
- /* Promoted */ { RSC_ROLE_STOPPED, /* Unknown */
- RSC_ROLE_UNPROMOTED, /* Stopped */
- RSC_ROLE_UNPROMOTED, /* Started */
- RSC_ROLE_UNPROMOTED, /* Unpromoted */
- RSC_ROLE_PROMOTED, /* Promoted */
+ /* Promoted */ { pcmk_role_stopped, /* Unknown */
+ pcmk_role_unpromoted, /* Stopped */
+ pcmk_role_unpromoted, /* Started */
+ pcmk_role_unpromoted, /* Unpromoted */
+ pcmk_role_promoted, /* Promoted */
},
};
@@ -74,7 +80,7 @@ static enum rsc_role_e rsc_state_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = {
* \param[in,out] node Node where resource will be in its next role
* \param[in] optional Whether scheduled actions should be optional
*/
-typedef void (*rsc_transition_fn)(pe_resource_t *rsc, pe_node_t *node,
+typedef void (*rsc_transition_fn)(pcmk_resource_t *rsc, pcmk_node_t *node,
bool optional);
static rsc_transition_fn rsc_action_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = {
@@ -118,14 +124,14 @@ static rsc_transition_fn rsc_action_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = {
/*!
* \internal
- * \brief Get a list of a resource's allowed nodes sorted by node weight
+ * \brief Get a list of a resource's allowed nodes sorted by node score
*
* \param[in] rsc Resource to check
*
- * \return List of allowed nodes sorted by node weight
+ * \return List of allowed nodes sorted by node score
*/
static GList *
-sorted_allowed_nodes(const pe_resource_t *rsc)
+sorted_allowed_nodes(const pcmk_resource_t *rsc)
{
if (rsc->allowed_nodes != NULL) {
GList *nodes = g_hash_table_get_values(rsc->allowed_nodes);
@@ -141,33 +147,43 @@ sorted_allowed_nodes(const pe_resource_t *rsc)
* \internal
* \brief Assign a resource to its best allowed node, if possible
*
- * \param[in,out] rsc Resource to choose a node for
- * \param[in] prefer If not NULL, prefer this node when all else equal
+ * \param[in,out] rsc Resource to choose a node for
+ * \param[in] prefer If not \c NULL, prefer this node when all else
+ * equal
+ * \param[in] stop_if_fail If \c true and \p rsc can't be assigned to a
+ * node, set next role to stopped and update
+ * existing actions
*
* \return true if \p rsc could be assigned to a node, otherwise false
+ *
+ * \note If \p stop_if_fail is \c false, then \c pcmk__unassign_resource() can
+ * completely undo the assignment. A successful assignment can be either
+ * undone or left alone as final. A failed assignment has the same effect
+ * as calling pcmk__unassign_resource(); there are no side effects on
+ * roles or actions.
*/
static bool
-assign_best_node(pe_resource_t *rsc, const pe_node_t *prefer)
+assign_best_node(pcmk_resource_t *rsc, const pcmk_node_t *prefer,
+ bool stop_if_fail)
{
GList *nodes = NULL;
- pe_node_t *chosen = NULL;
- pe_node_t *best = NULL;
- bool result = false;
- const pe_node_t *most_free_node = pcmk__ban_insufficient_capacity(rsc);
+ pcmk_node_t *chosen = NULL;
+ pcmk_node_t *best = NULL;
+ const pcmk_node_t *most_free_node = pcmk__ban_insufficient_capacity(rsc);
if (prefer == NULL) {
prefer = most_free_node;
}
- if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_unassigned)) {
// We've already finished assignment of resources to nodes
return rsc->allocated_to != NULL;
}
- // Sort allowed nodes by weight
+ // Sort allowed nodes by score
nodes = sorted_allowed_nodes(rsc);
if (nodes != NULL) {
- best = (pe_node_t *) nodes->data; // First node has best score
+ best = (pcmk_node_t *) nodes->data; // First node has best score
}
if ((prefer != NULL) && (nodes != NULL)) {
@@ -178,11 +194,11 @@ assign_best_node(pe_resource_t *rsc, const pe_node_t *prefer)
pe_rsc_trace(rsc, "Preferred node %s for %s was unknown",
pe__node_name(prefer), rsc->id);
- /* Favor the preferred node as long as its weight is at least as good as
+ /* Favor the preferred node as long as its score is at least as good as
* the best allowed node's.
*
* An alternative would be to favor the preferred node even if the best
- * node is better, when the best node's weight is less than INFINITY.
+ * node is better, when the best node's score is less than INFINITY.
*/
} else if (chosen->weight < best->weight) {
pe_rsc_trace(rsc, "Preferred node %s for %s was unsuitable",
@@ -196,7 +212,8 @@ assign_best_node(pe_resource_t *rsc, const pe_node_t *prefer)
} else {
pe_rsc_trace(rsc,
- "Chose preferred node %s for %s (ignoring %d candidates)",
+ "Chose preferred node %s for %s "
+ "(ignoring %d candidates)",
pe__node_name(chosen), rsc->id, g_list_length(nodes));
}
}
@@ -220,23 +237,24 @@ assign_best_node(pe_resource_t *rsc, const pe_node_t *prefer)
* remaining unassigned instances to prefer a node that's already
* running another instance.
*/
- pe_node_t *running = pe__current_node(rsc);
+ pcmk_node_t *running = pe__current_node(rsc);
if (running == NULL) {
// Nothing to do
} else if (!pcmk__node_available(running, true, false)) {
- pe_rsc_trace(rsc, "Current node for %s (%s) can't run resources",
+ pe_rsc_trace(rsc,
+ "Current node for %s (%s) can't run resources",
rsc->id, pe__node_name(running));
} else {
int nodes_with_best_score = 1;
for (GList *iter = nodes->next; iter; iter = iter->next) {
- pe_node_t *allowed = (pe_node_t *) iter->data;
+ pcmk_node_t *allowed = (pcmk_node_t *) iter->data;
if (allowed->weight != chosen->weight) {
- // The nodes are sorted by weight, so no more are equal
+ // The nodes are sorted by score, so no more are equal
break;
}
if (pe__same_node(allowed, running)) {
@@ -247,7 +265,12 @@ assign_best_node(pe_resource_t *rsc, const pe_node_t *prefer)
}
if (nodes_with_best_score > 1) {
- do_crm_log(((chosen->weight >= INFINITY)? LOG_WARNING : LOG_INFO),
+ uint8_t log_level = LOG_INFO;
+
+ if (chosen->weight >= INFINITY) {
+ log_level = LOG_WARNING;
+ }
+ do_crm_log(log_level,
"Chose %s for %s from %d nodes with score %s",
pe__node_name(chosen), rsc->id,
nodes_with_best_score,
@@ -260,40 +283,37 @@ assign_best_node(pe_resource_t *rsc, const pe_node_t *prefer)
pe__node_name(chosen), rsc->id, g_list_length(nodes));
}
- result = pcmk__finalize_assignment(rsc, chosen, false);
+ pcmk__assign_resource(rsc, chosen, false, stop_if_fail);
g_list_free(nodes);
- return result;
+ return rsc->allocated_to != NULL;
}
/*!
* \internal
* \brief Apply a "this with" colocation to a node's allowed node scores
*
- * \param[in,out] data Colocation to apply
- * \param[in,out] user_data Resource being assigned
+ * \param[in,out] colocation Colocation to apply
+ * \param[in,out] rsc Resource being assigned
*/
static void
-apply_this_with(gpointer data, gpointer user_data)
+apply_this_with(pcmk__colocation_t *colocation, pcmk_resource_t *rsc)
{
- pcmk__colocation_t *colocation = (pcmk__colocation_t *) data;
- pe_resource_t *rsc = (pe_resource_t *) user_data;
-
GHashTable *archive = NULL;
- pe_resource_t *other = colocation->primary;
+ pcmk_resource_t *other = colocation->primary;
// In certain cases, we will need to revert the node scores
- if ((colocation->dependent_role >= RSC_ROLE_PROMOTED)
+ if ((colocation->dependent_role >= pcmk_role_promoted)
|| ((colocation->score < 0) && (colocation->score > -INFINITY))) {
archive = pcmk__copy_node_table(rsc->allowed_nodes);
}
- if (pcmk_is_set(other->flags, pe_rsc_provisional)) {
+ if (pcmk_is_set(other->flags, pcmk_rsc_unassigned)) {
pe_rsc_trace(rsc,
"%s: Assigning colocation %s primary %s first"
"(score=%d role=%s)",
rsc->id, colocation->id, other->id,
colocation->score, role2text(colocation->dependent_role));
- other->cmds->assign(other, NULL);
+ other->cmds->assign(other, NULL, true);
}
// Apply the colocation score to this resource's allowed node scores
@@ -320,15 +340,15 @@ apply_this_with(gpointer data, gpointer user_data)
* \param[in] connection Connection resource that has been assigned
*/
static void
-remote_connection_assigned(const pe_resource_t *connection)
+remote_connection_assigned(const pcmk_resource_t *connection)
{
- pe_node_t *remote_node = pe_find_node(connection->cluster->nodes,
- connection->id);
+ pcmk_node_t *remote_node = pe_find_node(connection->cluster->nodes,
+ connection->id);
CRM_CHECK(remote_node != NULL, return);
if ((connection->allocated_to != NULL)
- && (connection->next_role != RSC_ROLE_STOPPED)) {
+ && (connection->next_role != pcmk_role_stopped)) {
crm_trace("Pacemaker Remote node %s will be online",
remote_node->details->id);
@@ -352,42 +372,59 @@ remote_connection_assigned(const pe_resource_t *connection)
* \internal
* \brief Assign a primitive resource to a node
*
- * \param[in,out] rsc Resource to assign to a node
- * \param[in] prefer Node to prefer, if all else is equal
+ * \param[in,out] rsc Resource to assign to a node
+ * \param[in] prefer Node to prefer, if all else is equal
+ * \param[in] stop_if_fail If \c true and \p rsc can't be assigned to a
+ * node, set next role to stopped and update
+ * existing actions
*
* \return Node that \p rsc is assigned to, if assigned entirely to one node
+ *
+ * \note If \p stop_if_fail is \c false, then \c pcmk__unassign_resource() can
+ * completely undo the assignment. A successful assignment can be either
+ * undone or left alone as final. A failed assignment has the same effect
+ * as calling pcmk__unassign_resource(); there are no side effects on
+ * roles or actions.
*/
-pe_node_t *
-pcmk__primitive_assign(pe_resource_t *rsc, const pe_node_t *prefer)
+pcmk_node_t *
+pcmk__primitive_assign(pcmk_resource_t *rsc, const pcmk_node_t *prefer,
+ bool stop_if_fail)
{
GList *this_with_colocations = NULL;
GList *with_this_colocations = NULL;
GList *iter = NULL;
pcmk__colocation_t *colocation = NULL;
- CRM_ASSERT(rsc != NULL);
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_primitive));
// Never assign a child without parent being assigned first
if ((rsc->parent != NULL)
- && !pcmk_is_set(rsc->parent->flags, pe_rsc_allocating)) {
+ && !pcmk_is_set(rsc->parent->flags, pcmk_rsc_assigning)) {
pe_rsc_debug(rsc, "%s: Assigning parent %s first",
rsc->id, rsc->parent->id);
- rsc->parent->cmds->assign(rsc->parent, prefer);
+ rsc->parent->cmds->assign(rsc->parent, prefer, stop_if_fail);
}
- if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
- return rsc->allocated_to; // Assignment has already been done
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_unassigned)) {
+ // Assignment has already been done
+ const char *node_name = "no node";
+
+ if (rsc->allocated_to != NULL) {
+ node_name = pe__node_name(rsc->allocated_to);
+ }
+ pe_rsc_debug(rsc, "%s: pre-assigned to %s", rsc->id, node_name);
+ return rsc->allocated_to;
}
// Ensure we detect assignment loops
- if (pcmk_is_set(rsc->flags, pe_rsc_allocating)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_assigning)) {
pe_rsc_debug(rsc, "Breaking assignment loop involving %s", rsc->id);
return NULL;
}
- pe__set_resource_flags(rsc, pe_rsc_allocating);
+ pe__set_resource_flags(rsc, pcmk_rsc_assigning);
- pe__show_node_weights(true, rsc, "Pre-assignment", rsc->allowed_nodes,
- rsc->cluster);
+ pe__show_node_scores(true, rsc, "Pre-assignment", rsc->allowed_nodes,
+ rsc->cluster);
this_with_colocations = pcmk__this_with_colocations(rsc);
with_this_colocations = pcmk__with_this_colocations(rsc);
@@ -395,21 +432,23 @@ pcmk__primitive_assign(pe_resource_t *rsc, const pe_node_t *prefer)
// Apply mandatory colocations first, to satisfy as many as possible
for (iter = this_with_colocations; iter != NULL; iter = iter->next) {
colocation = iter->data;
+
if ((colocation->score <= -CRM_SCORE_INFINITY)
|| (colocation->score >= CRM_SCORE_INFINITY)) {
- apply_this_with(iter->data, rsc);
+ apply_this_with(colocation, rsc);
}
}
for (iter = with_this_colocations; iter != NULL; iter = iter->next) {
colocation = iter->data;
+
if ((colocation->score <= -CRM_SCORE_INFINITY)
|| (colocation->score >= CRM_SCORE_INFINITY)) {
- pcmk__add_dependent_scores(iter->data, rsc);
+ pcmk__add_dependent_scores(colocation, rsc);
}
}
- pe__show_node_weights(true, rsc, "Mandatory-colocations",
- rsc->allowed_nodes, rsc->cluster);
+ pe__show_node_scores(true, rsc, "Mandatory-colocations",
+ rsc->allowed_nodes, rsc->cluster);
// Then apply optional colocations
for (iter = this_with_colocations; iter != NULL; iter = iter->next) {
@@ -417,7 +456,7 @@ pcmk__primitive_assign(pe_resource_t *rsc, const pe_node_t *prefer)
if ((colocation->score > -CRM_SCORE_INFINITY)
&& (colocation->score < CRM_SCORE_INFINITY)) {
- apply_this_with(iter->data, rsc);
+ apply_this_with(colocation, rsc);
}
}
for (iter = with_this_colocations; iter != NULL; iter = iter->next) {
@@ -425,14 +464,14 @@ pcmk__primitive_assign(pe_resource_t *rsc, const pe_node_t *prefer)
if ((colocation->score > -CRM_SCORE_INFINITY)
&& (colocation->score < CRM_SCORE_INFINITY)) {
- pcmk__add_dependent_scores(iter->data, rsc);
+ pcmk__add_dependent_scores(colocation, rsc);
}
}
g_list_free(this_with_colocations);
g_list_free(with_this_colocations);
- if (rsc->next_role == RSC_ROLE_STOPPED) {
+ if (rsc->next_role == pcmk_role_stopped) {
pe_rsc_trace(rsc,
"Banning %s from all nodes because it will be stopped",
rsc->id);
@@ -440,64 +479,62 @@ pcmk__primitive_assign(pe_resource_t *rsc, const pe_node_t *prefer)
rsc->cluster);
} else if ((rsc->next_role > rsc->role)
- && !pcmk_is_set(rsc->cluster->flags, pe_flag_have_quorum)
- && (rsc->cluster->no_quorum_policy == no_quorum_freeze)) {
+ && !pcmk_is_set(rsc->cluster->flags, pcmk_sched_quorate)
+ && (rsc->cluster->no_quorum_policy == pcmk_no_quorum_freeze)) {
crm_notice("Resource %s cannot be elevated from %s to %s due to "
"no-quorum-policy=freeze",
rsc->id, role2text(rsc->role), role2text(rsc->next_role));
pe__set_next_role(rsc, rsc->role, "no-quorum-policy=freeze");
}
- pe__show_node_weights(!pcmk_is_set(rsc->cluster->flags, pe_flag_show_scores),
- rsc, __func__, rsc->allowed_nodes, rsc->cluster);
+ pe__show_node_scores(!pcmk_is_set(rsc->cluster->flags,
+ pcmk_sched_output_scores),
+ rsc, __func__, rsc->allowed_nodes, rsc->cluster);
// Unmanage resource if fencing is enabled but no device is configured
- if (pcmk_is_set(rsc->cluster->flags, pe_flag_stonith_enabled)
- && !pcmk_is_set(rsc->cluster->flags, pe_flag_have_stonith_resource)) {
- pe__clear_resource_flags(rsc, pe_rsc_managed);
+ if (pcmk_is_set(rsc->cluster->flags, pcmk_sched_fencing_enabled)
+ && !pcmk_is_set(rsc->cluster->flags, pcmk_sched_have_fencing)) {
+ pe__clear_resource_flags(rsc, pcmk_rsc_managed);
}
- if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
// Unmanaged resources stay on their current node
const char *reason = NULL;
- pe_node_t *assign_to = NULL;
+ pcmk_node_t *assign_to = NULL;
pe__set_next_role(rsc, rsc->role, "unmanaged");
assign_to = pe__current_node(rsc);
if (assign_to == NULL) {
reason = "inactive";
- } else if (rsc->role == RSC_ROLE_PROMOTED) {
+ } else if (rsc->role == pcmk_role_promoted) {
reason = "promoted";
- } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
+ } else if (pcmk_is_set(rsc->flags, pcmk_rsc_failed)) {
reason = "failed";
} else {
reason = "active";
}
pe_rsc_info(rsc, "Unmanaged resource %s assigned to %s: %s", rsc->id,
(assign_to? assign_to->details->uname : "no node"), reason);
- pcmk__finalize_assignment(rsc, assign_to, true);
-
- } else if (pcmk_is_set(rsc->cluster->flags, pe_flag_stop_everything)) {
- pe_rsc_debug(rsc, "Forcing %s to stop: stop-all-resources", rsc->id);
- pcmk__finalize_assignment(rsc, NULL, true);
+ pcmk__assign_resource(rsc, assign_to, true, stop_if_fail);
- } else if (pcmk_is_set(rsc->flags, pe_rsc_provisional)
- && assign_best_node(rsc, prefer)) {
- // Assignment successful
+ } else if (pcmk_is_set(rsc->cluster->flags, pcmk_sched_stop_all)) {
+ // Must stop at some point, but be consistent with stop_if_fail
+ if (stop_if_fail) {
+ pe_rsc_debug(rsc, "Forcing %s to stop: stop-all-resources",
+ rsc->id);
+ }
+ pcmk__assign_resource(rsc, NULL, true, stop_if_fail);
- } else if (rsc->allocated_to == NULL) {
- if (!pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
+ } else if (!assign_best_node(rsc, prefer, stop_if_fail)) {
+ // Assignment failed
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_removed)) {
pe_rsc_info(rsc, "Resource %s cannot run anywhere", rsc->id);
- } else if (rsc->running_on != NULL) {
+ } else if ((rsc->running_on != NULL) && stop_if_fail) {
pe_rsc_info(rsc, "Stopping orphan resource %s", rsc->id);
}
-
- } else {
- pe_rsc_debug(rsc, "%s: pre-assigned to %s", rsc->id,
- pe__node_name(rsc->allocated_to));
}
- pe__clear_resource_flags(rsc, pe_rsc_allocating);
+ pe__clear_resource_flags(rsc, pcmk_rsc_assigning);
if (rsc->is_remote_node) {
remote_connection_assigned(rsc);
@@ -518,18 +555,18 @@ pcmk__primitive_assign(pe_resource_t *rsc, const pe_node_t *prefer)
* \return Role that resource would have after scheduled actions are taken
*/
static void
-schedule_restart_actions(pe_resource_t *rsc, pe_node_t *current,
+schedule_restart_actions(pcmk_resource_t *rsc, pcmk_node_t *current,
bool need_stop, bool need_promote)
{
enum rsc_role_e role = rsc->role;
enum rsc_role_e next_role;
rsc_transition_fn fn = NULL;
- pe__set_resource_flags(rsc, pe_rsc_restarting);
+ pe__set_resource_flags(rsc, pcmk_rsc_restarting);
// Bring resource down to a stop on its current node
- while (role != RSC_ROLE_STOPPED) {
- next_role = rsc_state_matrix[role][RSC_ROLE_STOPPED];
+ while (role != pcmk_role_stopped) {
+ next_role = rsc_state_matrix[role][pcmk_role_stopped];
pe_rsc_trace(rsc, "Creating %s action to take %s down from %s to %s",
(need_stop? "required" : "optional"), rsc->id,
role2text(role), role2text(next_role));
@@ -543,11 +580,11 @@ schedule_restart_actions(pe_resource_t *rsc, pe_node_t *current,
// Bring resource up to its next role on its next node
while ((rsc->role <= rsc->next_role) && (role != rsc->role)
- && !pcmk_is_set(rsc->flags, pe_rsc_block)) {
+ && !pcmk_is_set(rsc->flags, pcmk_rsc_blocked)) {
bool required = need_stop;
next_role = rsc_state_matrix[role][rsc->role];
- if ((next_role == RSC_ROLE_PROMOTED) && need_promote) {
+ if ((next_role == pcmk_role_promoted) && need_promote) {
required = true;
}
pe_rsc_trace(rsc, "Creating %s action to take %s up from %s to %s",
@@ -561,7 +598,7 @@ schedule_restart_actions(pe_resource_t *rsc, pe_node_t *current,
role = next_role;
}
- pe__clear_resource_flags(rsc, pe_rsc_restarting);
+ pe__clear_resource_flags(rsc, pcmk_rsc_restarting);
}
/*!
@@ -573,16 +610,16 @@ schedule_restart_actions(pe_resource_t *rsc, pe_node_t *current,
* \return "explicit" if next role was explicitly set, otherwise "implicit"
*/
static const char *
-set_default_next_role(pe_resource_t *rsc)
+set_default_next_role(pcmk_resource_t *rsc)
{
- if (rsc->next_role != RSC_ROLE_UNKNOWN) {
+ if (rsc->next_role != pcmk_role_unknown) {
return "explicit";
}
if (rsc->allocated_to == NULL) {
- pe__set_next_role(rsc, RSC_ROLE_STOPPED, "assignment");
+ pe__set_next_role(rsc, pcmk_role_stopped, "assignment");
} else {
- pe__set_next_role(rsc, RSC_ROLE_STARTED, "assignment");
+ pe__set_next_role(rsc, pcmk_role_started, "assignment");
}
return "implicit";
}
@@ -594,15 +631,15 @@ set_default_next_role(pe_resource_t *rsc)
* \param[in,out] rsc Resource to create start action for
*/
static void
-create_pending_start(pe_resource_t *rsc)
+create_pending_start(pcmk_resource_t *rsc)
{
- pe_action_t *start = NULL;
+ pcmk_action_t *start = NULL;
pe_rsc_trace(rsc,
"Creating action for %s to represent already pending start",
rsc->id);
start = start_action(rsc, rsc->allocated_to, TRUE);
- pe__set_action_flags(start, pe_action_print_always);
+ pe__set_action_flags(start, pcmk_action_always_in_graph);
}
/*!
@@ -612,7 +649,7 @@ create_pending_start(pe_resource_t *rsc)
* \param[in,out] rsc Resource to schedule actions for
*/
static void
-schedule_role_transition_actions(pe_resource_t *rsc)
+schedule_role_transition_actions(pcmk_resource_t *rsc)
{
enum rsc_role_e role = rsc->role;
@@ -640,7 +677,7 @@ schedule_role_transition_actions(pe_resource_t *rsc)
* \param[in,out] rsc Primitive resource to create actions for
*/
void
-pcmk__primitive_create_actions(pe_resource_t *rsc)
+pcmk__primitive_create_actions(pcmk_resource_t *rsc)
{
bool need_stop = false;
bool need_promote = false;
@@ -648,12 +685,12 @@ pcmk__primitive_create_actions(pe_resource_t *rsc)
bool allow_migrate = false;
bool multiply_active = false;
- pe_node_t *current = NULL;
+ pcmk_node_t *current = NULL;
unsigned int num_all_active = 0;
unsigned int num_clean_active = 0;
const char *next_role_source = NULL;
- CRM_ASSERT(rsc != NULL);
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_primitive));
next_role_source = set_default_next_role(rsc);
pe_rsc_trace(rsc,
@@ -668,8 +705,8 @@ pcmk__primitive_create_actions(pe_resource_t *rsc)
rsc);
if ((current != NULL) && (rsc->allocated_to != NULL)
- && (current->details != rsc->allocated_to->details)
- && (rsc->next_role >= RSC_ROLE_STARTED)) {
+ && !pe__same_node(current, rsc->allocated_to)
+ && (rsc->next_role >= pcmk_role_started)) {
pe_rsc_trace(rsc, "Moving %s from %s to %s",
rsc->id, pe__node_name(current),
@@ -715,7 +752,7 @@ pcmk__primitive_create_actions(pe_resource_t *rsc)
rsc->partial_migration_source = rsc->partial_migration_target = NULL;
allow_migrate = false;
- } else if (pcmk_is_set(rsc->flags, pe_rsc_needs_fencing)) {
+ } else if (pcmk_is_set(rsc->flags, pcmk_rsc_needs_fencing)) {
multiply_active = (num_all_active > 1);
} else {
/* If a resource has "requires" set to nothing or quorum, don't consider
@@ -739,51 +776,51 @@ pcmk__primitive_create_actions(pe_resource_t *rsc)
"#Resource_is_Too_Active for more information");
switch (rsc->recovery_type) {
- case recovery_stop_start:
+ case pcmk_multiply_active_restart:
need_stop = true;
break;
- case recovery_stop_unexpected:
+ case pcmk_multiply_active_unexpected:
need_stop = true; // stop_resource() will skip expected node
- pe__set_resource_flags(rsc, pe_rsc_stop_unexpected);
+ pe__set_resource_flags(rsc, pcmk_rsc_stop_unexpected);
break;
default:
break;
}
} else {
- pe__clear_resource_flags(rsc, pe_rsc_stop_unexpected);
+ pe__clear_resource_flags(rsc, pcmk_rsc_stop_unexpected);
}
- if (pcmk_is_set(rsc->flags, pe_rsc_start_pending)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_start_pending)) {
create_pending_start(rsc);
}
if (is_moving) {
// Remaining tests are only for resources staying where they are
- } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
- if (pcmk_is_set(rsc->flags, pe_rsc_stop)) {
+ } else if (pcmk_is_set(rsc->flags, pcmk_rsc_failed)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_stop_if_failed)) {
need_stop = true;
pe_rsc_trace(rsc, "Recovering %s", rsc->id);
} else {
pe_rsc_trace(rsc, "Recovering %s by demotion", rsc->id);
- if (rsc->next_role == RSC_ROLE_PROMOTED) {
+ if (rsc->next_role == pcmk_role_promoted) {
need_promote = true;
}
}
- } else if (pcmk_is_set(rsc->flags, pe_rsc_block)) {
+ } else if (pcmk_is_set(rsc->flags, pcmk_rsc_blocked)) {
pe_rsc_trace(rsc, "Blocking further actions on %s", rsc->id);
need_stop = true;
- } else if ((rsc->role > RSC_ROLE_STARTED) && (current != NULL)
+ } else if ((rsc->role > pcmk_role_started) && (current != NULL)
&& (rsc->allocated_to != NULL)) {
- pe_action_t *start = NULL;
+ pcmk_action_t *start = NULL;
pe_rsc_trace(rsc, "Creating start action for promoted resource %s",
rsc->id);
start = start_action(rsc, rsc->allocated_to, TRUE);
- if (!pcmk_is_set(start->flags, pe_action_optional)) {
+ if (!pcmk_is_set(start->flags, pcmk_action_optional)) {
// Recovery of a promoted resource
pe_rsc_trace(rsc, "%s restart is required for recovery", rsc->id);
need_stop = true;
@@ -810,10 +847,10 @@ pcmk__primitive_create_actions(pe_resource_t *rsc)
* \param[in] rsc Resource to check
*/
static void
-rsc_avoids_remote_nodes(const pe_resource_t *rsc)
+rsc_avoids_remote_nodes(const pcmk_resource_t *rsc)
{
GHashTableIter iter;
- pe_node_t *node = NULL;
+ pcmk_node_t *node = NULL;
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
@@ -837,7 +874,7 @@ rsc_avoids_remote_nodes(const pe_resource_t *rsc)
* \note Callers should take care not to rely on the list being sorted.
*/
static GList *
-allowed_nodes_as_list(const pe_resource_t *rsc)
+allowed_nodes_as_list(const pcmk_resource_t *rsc)
{
GList *allowed_nodes = NULL;
@@ -859,15 +896,15 @@ allowed_nodes_as_list(const pe_resource_t *rsc)
* \param[in,out] rsc Primitive resource to create implicit constraints for
*/
void
-pcmk__primitive_internal_constraints(pe_resource_t *rsc)
+pcmk__primitive_internal_constraints(pcmk_resource_t *rsc)
{
GList *allowed_nodes = NULL;
bool check_unfencing = false;
bool check_utilization = false;
- CRM_ASSERT(rsc != NULL);
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_primitive));
- if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
pe_rsc_trace(rsc,
"Skipping implicit constraints for unmanaged resource %s",
rsc->id);
@@ -875,9 +912,10 @@ pcmk__primitive_internal_constraints(pe_resource_t *rsc)
}
// Whether resource requires unfencing
- check_unfencing = !pcmk_is_set(rsc->flags, pe_rsc_fence_device)
- && pcmk_is_set(rsc->cluster->flags, pe_flag_enable_unfencing)
- && pcmk_is_set(rsc->flags, pe_rsc_needs_unfencing);
+ check_unfencing = !pcmk_is_set(rsc->flags, pcmk_rsc_fence_device)
+ && pcmk_is_set(rsc->cluster->flags,
+ pcmk_sched_enable_unfencing)
+ && pcmk_is_set(rsc->flags, pcmk_rsc_needs_unfencing);
// Whether a non-default placement strategy is used
check_utilization = (g_hash_table_size(rsc->utilization) > 0)
@@ -885,29 +923,37 @@ pcmk__primitive_internal_constraints(pe_resource_t *rsc)
"default", pcmk__str_casei);
// Order stops before starts (i.e. restart)
- pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
- rsc, pcmk__op_key(rsc->id, RSC_START, 0), NULL,
- pe_order_optional|pe_order_implies_then|pe_order_restart,
+ pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, PCMK_ACTION_STOP, 0), NULL,
+ rsc, pcmk__op_key(rsc->id, PCMK_ACTION_START, 0), NULL,
+ pcmk__ar_ordered
+ |pcmk__ar_first_implies_then
+ |pcmk__ar_intermediate_stop,
rsc->cluster);
// Promotable ordering: demote before stop, start before promote
if (pcmk_is_set(pe__const_top_resource(rsc, false)->flags,
- pe_rsc_promotable)
- || (rsc->role > RSC_ROLE_UNPROMOTED)) {
+ pcmk_rsc_promotable)
+ || (rsc->role > pcmk_role_unpromoted)) {
- pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_DEMOTE, 0), NULL,
- rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
- pe_order_promoted_implies_first, rsc->cluster);
+ pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, PCMK_ACTION_DEMOTE, 0),
+ NULL,
+ rsc, pcmk__op_key(rsc->id, PCMK_ACTION_STOP, 0),
+ NULL,
+ pcmk__ar_promoted_then_implies_first, rsc->cluster);
- pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_START, 0), NULL,
- rsc, pcmk__op_key(rsc->id, RSC_PROMOTE, 0), NULL,
- pe_order_runnable_left, rsc->cluster);
+ pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, PCMK_ACTION_START, 0),
+ NULL,
+ rsc, pcmk__op_key(rsc->id, PCMK_ACTION_PROMOTE, 0),
+ NULL,
+ pcmk__ar_unrunnable_first_blocks, rsc->cluster);
}
// Don't clear resource history if probing on same node
- pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, CRM_OP_LRM_DELETE, 0),
- NULL, rsc, pcmk__op_key(rsc->id, RSC_STATUS, 0),
- NULL, pe_order_same_node|pe_order_then_cancels_first,
+ pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, PCMK_ACTION_LRM_DELETE, 0),
+ NULL, rsc,
+ pcmk__op_key(rsc->id, PCMK_ACTION_MONITOR, 0),
+ NULL,
+ pcmk__ar_if_on_same_node|pcmk__ar_then_cancels_first,
rsc->cluster);
// Certain checks need allowed nodes
@@ -924,7 +970,7 @@ pcmk__primitive_internal_constraints(pe_resource_t *rsc)
}
if (rsc->container != NULL) {
- pe_resource_t *remote_rsc = NULL;
+ pcmk_resource_t *remote_rsc = NULL;
if (rsc->is_remote_node) {
// rsc is the implicit remote connection for a guest or bundle node
@@ -932,7 +978,7 @@ pcmk__primitive_internal_constraints(pe_resource_t *rsc)
/* Guest resources are not allowed to run on Pacemaker Remote nodes,
* to avoid nesting remotes. However, bundles are allowed.
*/
- if (!pcmk_is_set(rsc->flags, pe_rsc_allow_remote_remotes)) {
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_remote_nesting_allowed)) {
rsc_avoids_remote_nodes(rsc->container);
}
@@ -942,8 +988,9 @@ pcmk__primitive_internal_constraints(pe_resource_t *rsc)
* so that if we detect the container running, we will trigger a new
* transition and avoid the unnecessary recovery.
*/
- pcmk__order_resource_actions(rsc->container, RSC_STATUS, rsc,
- RSC_STOP, pe_order_optional);
+ pcmk__order_resource_actions(rsc->container, PCMK_ACTION_MONITOR,
+ rsc, PCMK_ACTION_STOP,
+ pcmk__ar_ordered);
/* A user can specify that a resource must start on a Pacemaker Remote
* node by explicitly configuring it with the container=NODENAME
@@ -964,7 +1011,7 @@ pcmk__primitive_internal_constraints(pe_resource_t *rsc)
* colocating the resource with the container resource.
*/
for (GList *item = allowed_nodes; item; item = item->next) {
- pe_node_t *node = item->data;
+ pcmk_node_t *node = item->data;
if (node->details->remote_rsc != remote_rsc) {
node->weight = -INFINITY;
@@ -982,29 +1029,36 @@ pcmk__primitive_internal_constraints(pe_resource_t *rsc)
rsc->id, rsc->container->id);
pcmk__new_ordering(rsc->container,
- pcmk__op_key(rsc->container->id, RSC_START, 0),
- NULL, rsc, pcmk__op_key(rsc->id, RSC_START, 0),
+ pcmk__op_key(rsc->container->id,
+ PCMK_ACTION_START, 0),
+ NULL, rsc,
+ pcmk__op_key(rsc->id, PCMK_ACTION_START, 0),
NULL,
- pe_order_implies_then|pe_order_runnable_left,
+ pcmk__ar_first_implies_then
+ |pcmk__ar_unrunnable_first_blocks,
rsc->cluster);
- pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
+ pcmk__new_ordering(rsc,
+ pcmk__op_key(rsc->id, PCMK_ACTION_STOP, 0),
+ NULL,
rsc->container,
- pcmk__op_key(rsc->container->id, RSC_STOP, 0),
- NULL, pe_order_implies_first, rsc->cluster);
+ pcmk__op_key(rsc->container->id,
+ PCMK_ACTION_STOP, 0),
+ NULL, pcmk__ar_then_implies_first, rsc->cluster);
- if (pcmk_is_set(rsc->flags, pe_rsc_allow_remote_remotes)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_remote_nesting_allowed)) {
score = 10000; /* Highly preferred but not essential */
} else {
score = INFINITY; /* Force them to run on the same host */
}
- pcmk__new_colocation("resource-with-container", NULL, score, rsc,
- rsc->container, NULL, NULL, true,
- rsc->cluster);
+ pcmk__new_colocation("#resource-with-container", NULL, score, rsc,
+ rsc->container, NULL, NULL,
+ pcmk__coloc_influence);
}
}
- if (rsc->is_remote_node || pcmk_is_set(rsc->flags, pe_rsc_fence_device)) {
+ if (rsc->is_remote_node
+ || pcmk_is_set(rsc->flags, pcmk_rsc_fence_device)) {
/* Remote connections and fencing devices are not allowed to run on
* Pacemaker Remote nodes
*/
@@ -1015,27 +1069,27 @@ pcmk__primitive_internal_constraints(pe_resource_t *rsc)
/*!
* \internal
- * \brief Apply a colocation's score to node weights or resource priority
+ * \brief Apply a colocation's score to node scores or resource priority
*
* Given a colocation constraint, apply its score to the dependent's
- * allowed node weights (if we are still placing resources) or priority (if
+ * allowed node scores (if we are still placing resources) or priority (if
* we are choosing promotable clone instance roles).
*
* \param[in,out] dependent Dependent resource in colocation
* \param[in] primary Primary resource in colocation
* \param[in] colocation Colocation constraint to apply
- * \param[in] for_dependent true if called on behalf of dependent
+ * \param[in] for_dependent true if called on behalf of dependent
*/
void
-pcmk__primitive_apply_coloc_score(pe_resource_t *dependent,
- const pe_resource_t *primary,
+pcmk__primitive_apply_coloc_score(pcmk_resource_t *dependent,
+ const pcmk_resource_t *primary,
const pcmk__colocation_t *colocation,
bool for_dependent)
{
enum pcmk__coloc_affects filter_results;
- CRM_CHECK((colocation != NULL) && (dependent != NULL) && (primary != NULL),
- return);
+ CRM_ASSERT((dependent != NULL) && (primary != NULL)
+ && (colocation != NULL));
if (for_dependent) {
// Always process on behalf of primary resource
@@ -1055,7 +1109,7 @@ pcmk__primitive_apply_coloc_score(pe_resource_t *dependent,
pcmk__apply_coloc_to_priority(dependent, primary, colocation);
break;
case pcmk__coloc_affects_location:
- pcmk__apply_coloc_to_weights(dependent, primary, colocation);
+ pcmk__apply_coloc_to_scores(dependent, primary, colocation);
break;
default: // pcmk__coloc_affects_nothing
return;
@@ -1063,40 +1117,62 @@ pcmk__primitive_apply_coloc_score(pe_resource_t *dependent,
}
/* Primitive implementation of
- * resource_alloc_functions_t:with_this_colocations()
+ * pcmk_assignment_methods_t:with_this_colocations()
*/
void
-pcmk__with_primitive_colocations(const pe_resource_t *rsc,
- const pe_resource_t *orig_rsc, GList **list)
+pcmk__with_primitive_colocations(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc, GList **list)
{
- // Primitives don't have children, so rsc should also be orig_rsc
- CRM_CHECK((rsc != NULL) && (rsc->variant == pe_native)
- && (rsc == orig_rsc) && (list != NULL),
- return);
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_primitive)
+ && (list != NULL));
+
+ if (rsc == orig_rsc) {
+ /* For the resource itself, add all of its own colocations and relevant
+ * colocations from its parent (if any).
+ */
+ pcmk__add_with_this_list(list, rsc->rsc_cons_lhs, orig_rsc);
+ if (rsc->parent != NULL) {
+ rsc->parent->cmds->with_this_colocations(rsc->parent, orig_rsc, list);
+ }
+ } else {
+ // For an ancestor, add only explicitly configured constraints
+ for (GList *iter = rsc->rsc_cons_lhs; iter != NULL; iter = iter->next) {
+ pcmk__colocation_t *colocation = iter->data;
- // Add primitive's own colocations plus any relevant ones from parent
- pcmk__add_with_this_list(list, rsc->rsc_cons_lhs);
- if (rsc->parent != NULL) {
- rsc->parent->cmds->with_this_colocations(rsc->parent, rsc, list);
+ if (pcmk_is_set(colocation->flags, pcmk__coloc_explicit)) {
+ pcmk__add_with_this(list, colocation, orig_rsc);
+ }
+ }
}
}
/* Primitive implementation of
- * resource_alloc_functions_t:this_with_colocations()
+ * pcmk_assignment_methods_t:this_with_colocations()
*/
void
-pcmk__primitive_with_colocations(const pe_resource_t *rsc,
- const pe_resource_t *orig_rsc, GList **list)
+pcmk__primitive_with_colocations(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc, GList **list)
{
- // Primitives don't have children, so rsc should also be orig_rsc
- CRM_CHECK((rsc != NULL) && (rsc->variant == pe_native)
- && (rsc == orig_rsc) && (list != NULL),
- return);
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_primitive)
+ && (list != NULL));
+
+ if (rsc == orig_rsc) {
+ /* For the resource itself, add all of its own colocations and relevant
+ * colocations from its parent (if any).
+ */
+ pcmk__add_this_with_list(list, rsc->rsc_cons, orig_rsc);
+ if (rsc->parent != NULL) {
+ rsc->parent->cmds->this_with_colocations(rsc->parent, orig_rsc, list);
+ }
+ } else {
+ // For an ancestor, add only explicitly configured constraints
+ for (GList *iter = rsc->rsc_cons; iter != NULL; iter = iter->next) {
+ pcmk__colocation_t *colocation = iter->data;
- // Add primitive's own colocations plus any relevant ones from parent
- pcmk__add_this_with_list(list, rsc->rsc_cons);
- if (rsc->parent != NULL) {
- rsc->parent->cmds->this_with_colocations(rsc->parent, rsc, list);
+ if (pcmk_is_set(colocation->flags, pcmk__coloc_explicit)) {
+ pcmk__add_this_with(list, colocation, orig_rsc);
+ }
+ }
}
}
@@ -1109,11 +1185,11 @@ pcmk__primitive_with_colocations(const pe_resource_t *rsc,
*
* \return Flags appropriate to \p action on \p node
*/
-enum pe_action_flags
-pcmk__primitive_action_flags(pe_action_t *action, const pe_node_t *node)
+uint32_t
+pcmk__primitive_action_flags(pcmk_action_t *action, const pcmk_node_t *node)
{
CRM_ASSERT(action != NULL);
- return action->flags;
+ return (uint32_t) action->flags;
}
/*!
@@ -1130,11 +1206,11 @@ pcmk__primitive_action_flags(pe_action_t *action, const pe_node_t *node)
* been unpacked and resources have been assigned to nodes.
*/
static bool
-is_expected_node(const pe_resource_t *rsc, const pe_node_t *node)
+is_expected_node(const pcmk_resource_t *rsc, const pcmk_node_t *node)
{
return pcmk_all_flags_set(rsc->flags,
- pe_rsc_stop_unexpected|pe_rsc_restarting)
- && (rsc->next_role > RSC_ROLE_STOPPED)
+ pcmk_rsc_stop_unexpected|pcmk_rsc_restarting)
+ && (rsc->next_role > pcmk_role_stopped)
&& pe__same_node(rsc->allocated_to, node);
}
@@ -1147,11 +1223,11 @@ is_expected_node(const pe_resource_t *rsc, const pe_node_t *node)
* \param[in] optional Whether actions should be optional
*/
static void
-stop_resource(pe_resource_t *rsc, pe_node_t *node, bool optional)
+stop_resource(pcmk_resource_t *rsc, pcmk_node_t *node, bool optional)
{
for (GList *iter = rsc->running_on; iter != NULL; iter = iter->next) {
- pe_node_t *current = (pe_node_t *) iter->data;
- pe_action_t *stop = NULL;
+ pcmk_node_t *current = (pcmk_node_t *) iter->data;
+ pcmk_action_t *stop = NULL;
if (is_expected_node(rsc, current)) {
/* We are scheduling restart actions for a multiply active resource
@@ -1189,8 +1265,8 @@ stop_resource(pe_resource_t *rsc, pe_node_t *node, bool optional)
if (rsc->allocated_to == NULL) {
pe_action_set_reason(stop, "node availability", true);
- } else if (pcmk_all_flags_set(rsc->flags, pe_rsc_restarting
- |pe_rsc_stop_unexpected)) {
+ } else if (pcmk_all_flags_set(rsc->flags, pcmk_rsc_restarting
+ |pcmk_rsc_stop_unexpected)) {
/* We are stopping a multiply active resource on a node that is
* not its expected node, and we are still scheduling restart
* actions, so the stop is for being multiply active.
@@ -1198,19 +1274,19 @@ stop_resource(pe_resource_t *rsc, pe_node_t *node, bool optional)
pe_action_set_reason(stop, "being multiply active", true);
}
- if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
- pe__clear_action_flags(stop, pe_action_runnable);
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
+ pe__clear_action_flags(stop, pcmk_action_runnable);
}
- if (pcmk_is_set(rsc->cluster->flags, pe_flag_remove_after_stop)) {
+ if (pcmk_is_set(rsc->cluster->flags, pcmk_sched_remove_after_stop)) {
pcmk__schedule_cleanup(rsc, current, optional);
}
- if (pcmk_is_set(rsc->flags, pe_rsc_needs_unfencing)) {
- pe_action_t *unfence = pe_fence_op(current, "on", true, NULL, false,
- rsc->cluster);
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_needs_unfencing)) {
+ pcmk_action_t *unfence = pe_fence_op(current, PCMK_ACTION_ON, true,
+ NULL, false, rsc->cluster);
- order_actions(stop, unfence, pe_order_implies_first);
+ order_actions(stop, unfence, pcmk__ar_then_implies_first);
if (!pcmk__node_unfenced(current)) {
pe_proc_err("Stopping %s until %s can be unfenced",
rsc->id, pe__node_name(current));
@@ -1228,9 +1304,9 @@ stop_resource(pe_resource_t *rsc, pe_node_t *node, bool optional)
* \param[in] optional Whether actions should be optional
*/
static void
-start_resource(pe_resource_t *rsc, pe_node_t *node, bool optional)
+start_resource(pcmk_resource_t *rsc, pcmk_node_t *node, bool optional)
{
- pe_action_t *start = NULL;
+ pcmk_action_t *start = NULL;
CRM_ASSERT(node != NULL);
@@ -1239,10 +1315,10 @@ start_resource(pe_resource_t *rsc, pe_node_t *node, bool optional)
pe__node_name(node), node->weight);
start = start_action(rsc, node, TRUE);
- pcmk__order_vs_unfence(rsc, node, start, pe_order_implies_then);
+ pcmk__order_vs_unfence(rsc, node, start, pcmk__ar_first_implies_then);
- if (pcmk_is_set(start->flags, pe_action_runnable) && !optional) {
- pe__clear_action_flags(start, pe_action_optional);
+ if (pcmk_is_set(start->flags, pcmk_action_runnable) && !optional) {
+ pe__clear_action_flags(start, pcmk_action_optional);
}
if (is_expected_node(rsc, node)) {
@@ -1253,7 +1329,7 @@ start_resource(pe_resource_t *rsc, pe_node_t *node, bool optional)
"Start of multiply active resouce %s "
"on expected node %s will be a pseudo-action",
rsc->id, pe__node_name(node));
- pe__set_action_flags(start, pe_action_pseudo);
+ pe__set_action_flags(start, pcmk_action_pseudo);
}
}
@@ -1266,7 +1342,7 @@ start_resource(pe_resource_t *rsc, pe_node_t *node, bool optional)
* \param[in] optional Whether actions should be optional
*/
static void
-promote_resource(pe_resource_t *rsc, pe_node_t *node, bool optional)
+promote_resource(pcmk_resource_t *rsc, pcmk_node_t *node, bool optional)
{
GList *iter = NULL;
GList *action_list = NULL;
@@ -1275,18 +1351,18 @@ promote_resource(pe_resource_t *rsc, pe_node_t *node, bool optional)
CRM_ASSERT(node != NULL);
// Any start must be runnable for promotion to be runnable
- action_list = pe__resource_actions(rsc, node, RSC_START, true);
+ action_list = pe__resource_actions(rsc, node, PCMK_ACTION_START, true);
for (iter = action_list; iter != NULL; iter = iter->next) {
- pe_action_t *start = (pe_action_t *) iter->data;
+ pcmk_action_t *start = (pcmk_action_t *) iter->data;
- if (!pcmk_is_set(start->flags, pe_action_runnable)) {
+ if (!pcmk_is_set(start->flags, pcmk_action_runnable)) {
runnable = false;
}
}
g_list_free(action_list);
if (runnable) {
- pe_action_t *promote = promote_action(rsc, node, optional);
+ pcmk_action_t *promote = promote_action(rsc, node, optional);
pe_rsc_trace(rsc, "Scheduling %s promotion of %s on %s",
(optional? "optional" : "required"), rsc->id,
@@ -1300,16 +1376,17 @@ promote_resource(pe_resource_t *rsc, pe_node_t *node, bool optional)
"Promotion of multiply active resouce %s "
"on expected node %s will be a pseudo-action",
rsc->id, pe__node_name(node));
- pe__set_action_flags(promote, pe_action_pseudo);
+ pe__set_action_flags(promote, pcmk_action_pseudo);
}
} else {
pe_rsc_trace(rsc, "Not promoting %s on %s: start unrunnable",
rsc->id, pe__node_name(node));
- action_list = pe__resource_actions(rsc, node, RSC_PROMOTE, true);
+ action_list = pe__resource_actions(rsc, node, PCMK_ACTION_PROMOTE,
+ true);
for (iter = action_list; iter != NULL; iter = iter->next) {
- pe_action_t *promote = (pe_action_t *) iter->data;
+ pcmk_action_t *promote = (pcmk_action_t *) iter->data;
- pe__clear_action_flags(promote, pe_action_runnable);
+ pe__clear_action_flags(promote, pcmk_action_runnable);
}
g_list_free(action_list);
}
@@ -1324,7 +1401,7 @@ promote_resource(pe_resource_t *rsc, pe_node_t *node, bool optional)
* \param[in] optional Whether actions should be optional
*/
static void
-demote_resource(pe_resource_t *rsc, pe_node_t *node, bool optional)
+demote_resource(pcmk_resource_t *rsc, pcmk_node_t *node, bool optional)
{
/* Since this will only be called for a primitive (possibly as an instance
* of a collective resource), the resource is multiply active if it is
@@ -1332,7 +1409,7 @@ demote_resource(pe_resource_t *rsc, pe_node_t *node, bool optional)
* part of recovery, regardless of which one is the desired node.
*/
for (GList *iter = rsc->running_on; iter != NULL; iter = iter->next) {
- pe_node_t *current = (pe_node_t *) iter->data;
+ pcmk_node_t *current = (pcmk_node_t *) iter->data;
if (is_expected_node(rsc, current)) {
pe_rsc_trace(rsc,
@@ -1349,7 +1426,7 @@ demote_resource(pe_resource_t *rsc, pe_node_t *node, bool optional)
}
static void
-assert_role_error(pe_resource_t *rsc, pe_node_t *node, bool optional)
+assert_role_error(pcmk_resource_t *rsc, pcmk_node_t *node, bool optional)
{
CRM_ASSERT(false);
}
@@ -1363,18 +1440,19 @@ assert_role_error(pe_resource_t *rsc, pe_node_t *node, bool optional)
* \param[in] optional Whether clean-up should be optional
*/
void
-pcmk__schedule_cleanup(pe_resource_t *rsc, const pe_node_t *node, bool optional)
+pcmk__schedule_cleanup(pcmk_resource_t *rsc, const pcmk_node_t *node,
+ bool optional)
{
/* If the cleanup is required, its orderings are optional, because they're
* relevant only if both actions are required. Conversely, if the cleanup is
* optional, the orderings make the then action required if the first action
* becomes required.
*/
- uint32_t flag = optional? pe_order_implies_then : pe_order_optional;
+ uint32_t flag = optional? pcmk__ar_first_implies_then : pcmk__ar_ordered;
CRM_CHECK((rsc != NULL) && (node != NULL), return);
- if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_failed)) {
pe_rsc_trace(rsc, "Skipping clean-up of %s on %s: resource failed",
rsc->id, pe__node_name(node));
return;
@@ -1390,8 +1468,10 @@ pcmk__schedule_cleanup(pe_resource_t *rsc, const pe_node_t *node, bool optional)
delete_action(rsc, node, optional);
// stop -> clean-up -> start
- pcmk__order_resource_actions(rsc, RSC_STOP, rsc, RSC_DELETE, flag);
- pcmk__order_resource_actions(rsc, RSC_DELETE, rsc, RSC_START, flag);
+ pcmk__order_resource_actions(rsc, PCMK_ACTION_STOP,
+ rsc, PCMK_ACTION_DELETE, flag);
+ pcmk__order_resource_actions(rsc, PCMK_ACTION_DELETE,
+ rsc, PCMK_ACTION_START, flag);
}
/*!
@@ -1402,13 +1482,14 @@ pcmk__schedule_cleanup(pe_resource_t *rsc, const pe_node_t *node, bool optional)
* \param[in,out] xml Transition graph action attributes XML to add to
*/
void
-pcmk__primitive_add_graph_meta(const pe_resource_t *rsc, xmlNode *xml)
+pcmk__primitive_add_graph_meta(const pcmk_resource_t *rsc, xmlNode *xml)
{
char *name = NULL;
char *value = NULL;
- const pe_resource_t *parent = NULL;
+ const pcmk_resource_t *parent = NULL;
- CRM_ASSERT((rsc != NULL) && (xml != NULL));
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_primitive)
+ && (xml != NULL));
/* Clone instance numbers get set internally as meta-attributes, and are
* needed in the transition graph (for example, to tell unique clone
@@ -1450,13 +1531,16 @@ pcmk__primitive_add_graph_meta(const pe_resource_t *rsc, xmlNode *xml)
}
}
-// Primitive implementation of resource_alloc_functions_t:add_utilization()
+// Primitive implementation of pcmk_assignment_methods_t:add_utilization()
void
-pcmk__primitive_add_utilization(const pe_resource_t *rsc,
- const pe_resource_t *orig_rsc, GList *all_rscs,
- GHashTable *utilization)
+pcmk__primitive_add_utilization(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc,
+ GList *all_rscs, GHashTable *utilization)
{
- if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_primitive)
+ && (orig_rsc != NULL) && (utilization != NULL));
+
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_unassigned)) {
return;
}
@@ -1474,7 +1558,7 @@ pcmk__primitive_add_utilization(const pe_resource_t *rsc,
* \return Epoch time corresponding to shutdown attribute if set or now if not
*/
static time_t
-shutdown_time(pe_node_t *node)
+shutdown_time(pcmk_node_t *node)
{
const char *shutdown = pe_node_attribute_raw(node, XML_CIB_ATTR_SHUTDOWN);
time_t result = 0;
@@ -1499,8 +1583,8 @@ shutdown_time(pe_node_t *node)
static void
ban_if_not_locked(gpointer data, gpointer user_data)
{
- const pe_node_t *node = (const pe_node_t *) data;
- pe_resource_t *rsc = (pe_resource_t *) user_data;
+ const pcmk_node_t *node = (const pcmk_node_t *) data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) user_data;
if (strcmp(node->details->uname, rsc->lock_node->details->uname) != 0) {
resource_location(rsc, node, -CRM_SCORE_INFINITY,
@@ -1508,15 +1592,19 @@ ban_if_not_locked(gpointer data, gpointer user_data)
}
}
-// Primitive implementation of resource_alloc_functions_t:shutdown_lock()
+// Primitive implementation of pcmk_assignment_methods_t:shutdown_lock()
void
-pcmk__primitive_shutdown_lock(pe_resource_t *rsc)
+pcmk__primitive_shutdown_lock(pcmk_resource_t *rsc)
{
- const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
+ const char *class = NULL;
+
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_primitive));
+
+ class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
// Fence devices and remote connections can't be locked
if (pcmk__str_eq(class, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_null_matches)
- || pe__resource_is_remote_conn(rsc, rsc->cluster)) {
+ || pe__resource_is_remote_conn(rsc)) {
return;
}
@@ -1531,14 +1619,14 @@ pcmk__primitive_shutdown_lock(pe_resource_t *rsc)
pe_rsc_info(rsc,
"Cancelling shutdown lock because %s is already active",
rsc->id);
- pe__clear_resource_history(rsc, rsc->lock_node, rsc->cluster);
+ pe__clear_resource_history(rsc, rsc->lock_node);
rsc->lock_node = NULL;
rsc->lock_time = 0;
}
// Only a resource active on exactly one node can be locked
} else if (pcmk__list_of_1(rsc->running_on)) {
- pe_node_t *node = rsc->running_on->data;
+ pcmk_node_t *node = rsc->running_on->data;
if (node->details->shutdown) {
if (node->details->unclean) {
@@ -1562,7 +1650,8 @@ pcmk__primitive_shutdown_lock(pe_resource_t *rsc)
pe_rsc_info(rsc, "Locking %s to %s due to shutdown (expires @%lld)",
rsc->id, pe__node_name(rsc->lock_node),
(long long) lock_expiration);
- pe__update_recheck_time(++lock_expiration, rsc->cluster);
+ pe__update_recheck_time(++lock_expiration, rsc->cluster,
+ "shutdown lock expiration");
} else {
pe_rsc_info(rsc, "Locking %s to %s due to shutdown",
rsc->id, pe__node_name(rsc->lock_node));
diff --git a/lib/pacemaker/pcmk_sched_probes.c b/lib/pacemaker/pcmk_sched_probes.c
index 919e523..e31e8d2 100644
--- a/lib/pacemaker/pcmk_sched_probes.c
+++ b/lib/pacemaker/pcmk_sched_probes.c
@@ -25,17 +25,17 @@
* \param[in] node Node that probe will run on
*/
static void
-add_expected_result(pe_action_t *probe, const pe_resource_t *rsc,
- const pe_node_t *node)
+add_expected_result(pcmk_action_t *probe, const pcmk_resource_t *rsc,
+ const pcmk_node_t *node)
{
// Check whether resource is currently active on node
- pe_node_t *running = pe_find_node_id(rsc->running_on, node->details->id);
+ pcmk_node_t *running = pe_find_node_id(rsc->running_on, node->details->id);
// The expected result is what we think the resource's current state is
if (running == NULL) {
pe__add_action_expected_result(probe, CRM_EX_NOT_RUNNING);
- } else if (rsc->role == RSC_ROLE_PROMOTED) {
+ } else if (rsc->role == pcmk_role_promoted) {
pe__add_action_expected_result(probe, CRM_EX_PROMOTED);
}
}
@@ -50,12 +50,12 @@ add_expected_result(pe_action_t *probe, const pe_resource_t *rsc,
* \return true if any probe was created, otherwise false
*/
bool
-pcmk__probe_resource_list(GList *rscs, pe_node_t *node)
+pcmk__probe_resource_list(GList *rscs, pcmk_node_t *node)
{
bool any_created = false;
for (GList *iter = rscs; iter != NULL; iter = iter->next) {
- pe_resource_t *rsc = (pe_resource_t *) iter->data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
if (rsc->cmds->create_probe(rsc, node)) {
any_created = true;
@@ -72,15 +72,18 @@ pcmk__probe_resource_list(GList *rscs, pe_node_t *node)
* \param[in] rsc2 Resource that might be started
*/
static void
-probe_then_start(pe_resource_t *rsc1, pe_resource_t *rsc2)
+probe_then_start(pcmk_resource_t *rsc1, pcmk_resource_t *rsc2)
{
if ((rsc1->allocated_to != NULL)
&& (g_hash_table_lookup(rsc1->known_on,
rsc1->allocated_to->details->id) == NULL)) {
- pcmk__new_ordering(rsc1, pcmk__op_key(rsc1->id, RSC_STATUS, 0), NULL,
- rsc2, pcmk__op_key(rsc2->id, RSC_START, 0), NULL,
- pe_order_optional, rsc1->cluster);
+ pcmk__new_ordering(rsc1,
+ pcmk__op_key(rsc1->id, PCMK_ACTION_MONITOR, 0),
+ NULL,
+ rsc2, pcmk__op_key(rsc2->id, PCMK_ACTION_START, 0),
+ NULL,
+ pcmk__ar_ordered, rsc1->cluster);
}
}
@@ -93,20 +96,20 @@ probe_then_start(pe_resource_t *rsc1, pe_resource_t *rsc2)
* \return true if guest resource will likely stop, otherwise false
*/
static bool
-guest_resource_will_stop(const pe_node_t *node)
+guest_resource_will_stop(const pcmk_node_t *node)
{
- const pe_resource_t *guest_rsc = node->details->remote_rsc->container;
+ const pcmk_resource_t *guest_rsc = node->details->remote_rsc->container;
/* Ideally, we'd check whether the guest has a required stop, but that
* information doesn't exist yet, so approximate it ...
*/
return node->details->remote_requires_reset
|| node->details->unclean
- || pcmk_is_set(guest_rsc->flags, pe_rsc_failed)
- || (guest_rsc->next_role == RSC_ROLE_STOPPED)
+ || pcmk_is_set(guest_rsc->flags, pcmk_rsc_failed)
+ || (guest_rsc->next_role == pcmk_role_stopped)
// Guest is moving
- || ((guest_rsc->role > RSC_ROLE_STOPPED)
+ || ((guest_rsc->role > pcmk_role_stopped)
&& (guest_rsc->allocated_to != NULL)
&& (pe_find_node(guest_rsc->running_on,
guest_rsc->allocated_to->details->uname) == NULL));
@@ -121,20 +124,20 @@ guest_resource_will_stop(const pe_node_t *node)
*
* \return Newly created probe action
*/
-static pe_action_t *
-probe_action(pe_resource_t *rsc, pe_node_t *node)
+static pcmk_action_t *
+probe_action(pcmk_resource_t *rsc, pcmk_node_t *node)
{
- pe_action_t *probe = NULL;
- char *key = pcmk__op_key(rsc->id, RSC_STATUS, 0);
+ pcmk_action_t *probe = NULL;
+ char *key = pcmk__op_key(rsc->id, PCMK_ACTION_MONITOR, 0);
crm_debug("Scheduling probe of %s %s on %s",
role2text(rsc->role), rsc->id, pe__node_name(node));
- probe = custom_action(rsc, key, RSC_STATUS, node, FALSE, TRUE,
+ probe = custom_action(rsc, key, PCMK_ACTION_MONITOR, node, FALSE,
rsc->cluster);
- pe__clear_action_flags(probe, pe_action_optional);
+ pe__clear_action_flags(probe, pcmk_action_optional);
- pcmk__order_vs_unfence(rsc, node, probe, pe_order_optional);
+ pcmk__order_vs_unfence(rsc, node, probe, pcmk__ar_ordered);
add_expected_result(probe, rsc, node);
return probe;
}
@@ -151,17 +154,17 @@ probe_action(pe_resource_t *rsc, pe_node_t *node)
* \return true if any probe was created, otherwise false
*/
bool
-pcmk__probe_rsc_on_node(pe_resource_t *rsc, pe_node_t *node)
+pcmk__probe_rsc_on_node(pcmk_resource_t *rsc, pcmk_node_t *node)
{
- uint32_t flags = pe_order_optional;
- pe_action_t *probe = NULL;
- pe_node_t *allowed = NULL;
- pe_resource_t *top = uber_parent(rsc);
+ uint32_t flags = pcmk__ar_ordered;
+ pcmk_action_t *probe = NULL;
+ pcmk_node_t *allowed = NULL;
+ pcmk_resource_t *top = uber_parent(rsc);
const char *reason = NULL;
- CRM_CHECK((rsc != NULL) && (node != NULL), return false);
+ CRM_ASSERT((rsc != NULL) && (node != NULL));
- if (!pcmk_is_set(rsc->cluster->flags, pe_flag_startup_probes)) {
+ if (!pcmk_is_set(rsc->cluster->flags, pcmk_sched_probe_resources)) {
reason = "start-up probes are disabled";
goto no_probe;
}
@@ -193,7 +196,7 @@ pcmk__probe_rsc_on_node(pe_resource_t *rsc, pe_node_t *node)
reason = "resource is inside a container";
goto no_probe;
- } else if (pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
+ } else if (pcmk_is_set(rsc->flags, pcmk_rsc_removed)) {
reason = "resource is orphaned";
goto no_probe;
@@ -213,7 +216,7 @@ pcmk__probe_rsc_on_node(pe_resource_t *rsc, pe_node_t *node)
"on node";
goto no_probe;
- } else if (allowed->rsc_discover_mode != pe_discover_exclusive) {
+ } else if (allowed->rsc_discover_mode != pcmk_probe_exclusive) {
// ... but no constraint marks this node for discovery of resource
reason = "resource has exclusive discovery but is not enabled "
"on node";
@@ -224,15 +227,15 @@ pcmk__probe_rsc_on_node(pe_resource_t *rsc, pe_node_t *node)
if (allowed == NULL) {
allowed = node;
}
- if (allowed->rsc_discover_mode == pe_discover_never) {
+ if (allowed->rsc_discover_mode == pcmk_probe_never) {
reason = "node has discovery disabled";
goto no_probe;
}
if (pe__is_guest_node(node)) {
- pe_resource_t *guest = node->details->remote_rsc->container;
+ pcmk_resource_t *guest = node->details->remote_rsc->container;
- if (guest->role == RSC_ROLE_STOPPED) {
+ if (guest->role == pcmk_role_stopped) {
// The guest is stopped, so we know no resource is active there
reason = "node's guest is stopped";
probe_then_start(guest, top);
@@ -242,9 +245,11 @@ pcmk__probe_rsc_on_node(pe_resource_t *rsc, pe_node_t *node)
reason = "node's guest will stop";
// Order resource start after guest stop (in case it's restarting)
- pcmk__new_ordering(guest, pcmk__op_key(guest->id, RSC_STOP, 0),
- NULL, top, pcmk__op_key(top->id, RSC_START, 0),
- NULL, pe_order_optional, rsc->cluster);
+ pcmk__new_ordering(guest,
+ pcmk__op_key(guest->id, PCMK_ACTION_STOP, 0),
+ NULL, top,
+ pcmk__op_key(top->id, PCMK_ACTION_START, 0),
+ NULL, pcmk__ar_ordered, rsc->cluster);
goto no_probe;
}
}
@@ -264,17 +269,17 @@ pcmk__probe_rsc_on_node(pe_resource_t *rsc, pe_node_t *node)
/* Prevent a start if the resource can't be probed, but don't cause the
* resource or entire clone to stop if already active.
*/
- if (!pcmk_is_set(probe->flags, pe_action_runnable)
+ if (!pcmk_is_set(probe->flags, pcmk_action_runnable)
&& (top->running_on == NULL)) {
- pe__set_order_flags(flags, pe_order_runnable_left);
+ pe__set_order_flags(flags, pcmk__ar_unrunnable_first_blocks);
}
// Start or reload after probing the resource
pcmk__new_ordering(rsc, NULL, probe,
- top, pcmk__op_key(top->id, RSC_START, 0), NULL,
+ top, pcmk__op_key(top->id, PCMK_ACTION_START, 0), NULL,
flags, rsc->cluster);
pcmk__new_ordering(rsc, NULL, probe, top, reload_key(rsc), NULL,
- pe_order_optional, rsc->cluster);
+ pcmk__ar_ordered, rsc->cluster);
return true;
@@ -295,23 +300,23 @@ no_probe:
* \return true if \p probe should be ordered before \p then, otherwise false
*/
static bool
-probe_needed_before_action(const pe_action_t *probe, const pe_action_t *then)
+probe_needed_before_action(const pcmk_action_t *probe,
+ const pcmk_action_t *then)
{
// Probes on a node are performed after unfencing it, not before
- if (pcmk__str_eq(then->task, CRM_OP_FENCE, pcmk__str_casei)
- && (probe->node != NULL) && (then->node != NULL)
- && (probe->node->details == then->node->details)) {
+ if (pcmk__str_eq(then->task, PCMK_ACTION_STONITH, pcmk__str_none)
+ && pe__same_node(probe->node, then->node)) {
const char *op = g_hash_table_lookup(then->meta, "stonith_action");
- if (pcmk__str_eq(op, "on", pcmk__str_casei)) {
+ if (pcmk__str_eq(op, PCMK_ACTION_ON, pcmk__str_casei)) {
return false;
}
}
// Probes should be done on a node before shutting it down
- if (pcmk__str_eq(then->task, CRM_OP_SHUTDOWN, pcmk__str_none)
+ if (pcmk__str_eq(then->task, PCMK_ACTION_DO_SHUTDOWN, pcmk__str_none)
&& (probe->node != NULL) && (then->node != NULL)
- && (probe->node->details != then->node->details)) {
+ && !pe__same_node(probe->node, then->node)) {
return false;
}
@@ -330,21 +335,23 @@ probe_needed_before_action(const pe_action_t *probe, const pe_action_t *then)
* resource", add implicit "probe this resource then do something" equivalents
* so the relation is upheld until we know whether a stop is needed.
*
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*/
static void
-add_probe_orderings_for_stops(pe_working_set_t *data_set)
+add_probe_orderings_for_stops(pcmk_scheduler_t *scheduler)
{
- for (GList *iter = data_set->ordering_constraints; iter != NULL;
+ for (GList *iter = scheduler->ordering_constraints; iter != NULL;
iter = iter->next) {
pe__ordering_t *order = iter->data;
- uint32_t order_flags = pe_order_optional;
+ uint32_t order_flags = pcmk__ar_ordered;
GList *probes = NULL;
GList *then_actions = NULL;
+ pcmk_action_t *first = NULL;
+ pcmk_action_t *then = NULL;
// Skip disabled orderings
- if (order->flags == pe_order_none) {
+ if (order->flags == pcmk__ar_none) {
continue;
}
@@ -354,17 +361,20 @@ add_probe_orderings_for_stops(pe_working_set_t *data_set)
}
// Skip invalid orderings (shouldn't be possible)
- if (((order->lh_action == NULL) && (order->lh_action_task == NULL)) ||
- ((order->rh_action == NULL) && (order->rh_action_task == NULL))) {
+ first = order->lh_action;
+ then = order->rh_action;
+ if (((first == NULL) && (order->lh_action_task == NULL))
+ || ((then == NULL) && (order->rh_action_task == NULL))) {
continue;
}
// Skip orderings for first actions other than stop
- if ((order->lh_action != NULL)
- && !pcmk__str_eq(order->lh_action->task, RSC_STOP, pcmk__str_none)) {
+ if ((first != NULL) && !pcmk__str_eq(first->task, PCMK_ACTION_STOP,
+ pcmk__str_none)) {
continue;
- } else if ((order->lh_action == NULL)
- && !pcmk__ends_with(order->lh_action_task, "_" RSC_STOP "_0")) {
+ } else if ((first == NULL)
+ && !pcmk__ends_with(order->lh_action_task,
+ "_" PCMK_ACTION_STOP "_0")) {
continue;
}
@@ -375,41 +385,40 @@ add_probe_orderings_for_stops(pe_working_set_t *data_set)
if ((order->rh_rsc != NULL)
&& (order->lh_rsc->container == order->rh_rsc)) {
- if ((order->rh_action != NULL)
- && pcmk__str_eq(order->rh_action->task, RSC_STOP,
- pcmk__str_none)) {
+ if ((then != NULL) && pcmk__str_eq(then->task, PCMK_ACTION_STOP,
+ pcmk__str_none)) {
continue;
- } else if ((order->rh_action == NULL)
+ } else if ((then == NULL)
&& pcmk__ends_with(order->rh_action_task,
- "_" RSC_STOP "_0")) {
+ "_" PCMK_ACTION_STOP "_0")) {
continue;
}
}
// Preserve certain order options for future filtering
- if (pcmk_is_set(order->flags, pe_order_apply_first_non_migratable)) {
- pe__set_order_flags(order_flags,
- pe_order_apply_first_non_migratable);
+ if (pcmk_is_set(order->flags, pcmk__ar_if_first_unmigratable)) {
+ pe__set_order_flags(order_flags, pcmk__ar_if_first_unmigratable);
}
- if (pcmk_is_set(order->flags, pe_order_same_node)) {
- pe__set_order_flags(order_flags, pe_order_same_node);
+ if (pcmk_is_set(order->flags, pcmk__ar_if_on_same_node)) {
+ pe__set_order_flags(order_flags, pcmk__ar_if_on_same_node);
}
// Preserve certain order types for future filtering
- if ((order->flags == pe_order_anti_colocation)
- || (order->flags == pe_order_load)) {
+ if ((order->flags == pcmk__ar_if_required_on_same_node)
+ || (order->flags == pcmk__ar_if_on_same_node_or_target)) {
order_flags = order->flags;
}
// List all scheduled probes for the first resource
- probes = pe__resource_actions(order->lh_rsc, NULL, RSC_STATUS, FALSE);
+ probes = pe__resource_actions(order->lh_rsc, NULL, PCMK_ACTION_MONITOR,
+ FALSE);
if (probes == NULL) { // There aren't any
continue;
}
// List all relevant "then" actions
- if (order->rh_action != NULL) {
- then_actions = g_list_prepend(NULL, order->rh_action);
+ if (then != NULL) {
+ then_actions = g_list_prepend(NULL, then);
} else if (order->rh_rsc != NULL) {
then_actions = find_actions(order->rh_rsc->actions,
@@ -422,19 +431,19 @@ add_probe_orderings_for_stops(pe_working_set_t *data_set)
crm_trace("Implying 'probe then' orderings for '%s then %s' "
"(id=%d, type=%.6x)",
- order->lh_action? order->lh_action->uuid : order->lh_action_task,
- order->rh_action? order->rh_action->uuid : order->rh_action_task,
+ ((first == NULL)? order->lh_action_task : first->uuid),
+ ((then == NULL)? order->rh_action_task : then->uuid),
order->id, order->flags);
for (GList *probe_iter = probes; probe_iter != NULL;
probe_iter = probe_iter->next) {
- pe_action_t *probe = (pe_action_t *) probe_iter->data;
+ pcmk_action_t *probe = (pcmk_action_t *) probe_iter->data;
for (GList *then_iter = then_actions; then_iter != NULL;
then_iter = then_iter->next) {
- pe_action_t *then = (pe_action_t *) then_iter->data;
+ pcmk_action_t *then = (pcmk_action_t *) then_iter->data;
if (probe_needed_before_action(probe, then)) {
order_actions(probe, then, order_flags);
@@ -458,53 +467,53 @@ add_probe_orderings_for_stops(pe_working_set_t *data_set)
* \param[in,out] after 'then' action wrapper in the ordering
*/
static void
-add_start_orderings_for_probe(pe_action_t *probe, pe_action_wrapper_t *after)
+add_start_orderings_for_probe(pcmk_action_t *probe,
+ pcmk__related_action_t *after)
{
- uint32_t flags = pe_order_optional|pe_order_runnable_left;
+ uint32_t flags = pcmk__ar_ordered|pcmk__ar_unrunnable_first_blocks;
/* Although the ordering between the probe of the clone instance and the
* start of its parent has been added in pcmk__probe_rsc_on_node(), we
- * avoided enforcing `pe_order_runnable_left` order type for that as long as
- * any of the clone instances are running to prevent them from being
- * unexpectedly stopped.
+ * avoided enforcing `pcmk__ar_unrunnable_first_blocks` order type for that
+ * as long as any of the clone instances are running to prevent them from
+ * being unexpectedly stopped.
*
* On the other hand, we still need to prevent any inactive instances from
* starting unless the probe is runnable so that we don't risk starting too
* many instances before we know the state on all nodes.
*/
- if (after->action->rsc->variant <= pe_group
- || pcmk_is_set(probe->flags, pe_action_runnable)
+ if ((after->action->rsc->variant <= pcmk_rsc_variant_group)
+ || pcmk_is_set(probe->flags, pcmk_action_runnable)
// The order type is already enforced for its parent.
- || pcmk_is_set(after->type, pe_order_runnable_left)
+ || pcmk_is_set(after->type, pcmk__ar_unrunnable_first_blocks)
|| (pe__const_top_resource(probe->rsc, false) != after->action->rsc)
- || !pcmk__str_eq(after->action->task, RSC_START, pcmk__str_none)) {
+ || !pcmk__str_eq(after->action->task, PCMK_ACTION_START,
+ pcmk__str_none)) {
return;
}
- crm_trace("Adding probe start orderings for '%s@%s (%s) "
+ crm_trace("Adding probe start orderings for 'unrunnable %s@%s "
"then instances of %s@%s'",
probe->uuid, pe__node_name(probe->node),
- pcmk_is_set(probe->flags, pe_action_runnable)? "runnable" : "unrunnable",
after->action->uuid, pe__node_name(after->action->node));
for (GList *then_iter = after->action->actions_after; then_iter != NULL;
then_iter = then_iter->next) {
- pe_action_wrapper_t *then = (pe_action_wrapper_t *) then_iter->data;
+ pcmk__related_action_t *then = then_iter->data;
if (then->action->rsc->running_on
|| (pe__const_top_resource(then->action->rsc, false)
!= after->action->rsc)
- || !pcmk__str_eq(then->action->task, RSC_START, pcmk__str_none)) {
+ || !pcmk__str_eq(then->action->task, PCMK_ACTION_START,
+ pcmk__str_none)) {
continue;
}
- crm_trace("Adding probe start ordering for '%s@%s (%s) "
+ crm_trace("Adding probe start ordering for 'unrunnable %s@%s "
"then %s@%s' (type=%#.6x)",
probe->uuid, pe__node_name(probe->node),
- pcmk_is_set(probe->flags, pe_action_runnable)? "runnable" : "unrunnable",
- then->action->uuid, pe__node_name(then->action->node),
- flags);
+ then->action->uuid, pe__node_name(then->action->node), flags);
/* Prevent the instance from starting if the instance can't, but don't
* cause any other intances to stop if already active.
@@ -526,28 +535,26 @@ add_start_orderings_for_probe(pe_action_t *probe, pe_action_wrapper_t *after)
*
* \param[in,out] probe Probe as 'first' action in an ordering
* \param[in,out] after 'then' action in the ordering
- * \param[in,out] data_set Cluster working set
*/
static void
-add_restart_orderings_for_probe(pe_action_t *probe, pe_action_t *after,
- pe_working_set_t *data_set)
+add_restart_orderings_for_probe(pcmk_action_t *probe, pcmk_action_t *after)
{
GList *iter = NULL;
bool interleave = false;
- pe_resource_t *compatible_rsc = NULL;
+ pcmk_resource_t *compatible_rsc = NULL;
// Validate that this is a resource probe followed by some action
if ((after == NULL) || (probe == NULL) || (probe->rsc == NULL)
- || (probe->rsc->variant != pe_native)
- || !pcmk__str_eq(probe->task, RSC_STATUS, pcmk__str_casei)) {
+ || (probe->rsc->variant != pcmk_rsc_variant_primitive)
+ || !pcmk__str_eq(probe->task, PCMK_ACTION_MONITOR, pcmk__str_none)) {
return;
}
// Avoid running into any possible loop
- if (pcmk_is_set(after->flags, pe_action_tracking)) {
+ if (pcmk_is_set(after->flags, pcmk_action_detect_loop)) {
return;
}
- pe__set_action_flags(after, pe_action_tracking);
+ pe__set_action_flags(after, pcmk_action_detect_loop);
crm_trace("Adding probe restart orderings for '%s@%s then %s@%s'",
probe->uuid, pe__node_name(probe->node),
@@ -556,26 +563,28 @@ add_restart_orderings_for_probe(pe_action_t *probe, pe_action_t *after,
/* Add restart orderings if "then" is for a different primitive.
* Orderings for collective resources will be added later.
*/
- if ((after->rsc != NULL) && (after->rsc->variant == pe_native)
+ if ((after->rsc != NULL)
+ && (after->rsc->variant == pcmk_rsc_variant_primitive)
&& (probe->rsc != after->rsc)) {
GList *then_actions = NULL;
- if (pcmk__str_eq(after->task, RSC_START, pcmk__str_casei)) {
- then_actions = pe__resource_actions(after->rsc, NULL, RSC_STOP,
- FALSE);
+ if (pcmk__str_eq(after->task, PCMK_ACTION_START, pcmk__str_none)) {
+ then_actions = pe__resource_actions(after->rsc, NULL,
+ PCMK_ACTION_STOP, FALSE);
- } else if (pcmk__str_eq(after->task, RSC_PROMOTE, pcmk__str_casei)) {
+ } else if (pcmk__str_eq(after->task, PCMK_ACTION_PROMOTE,
+ pcmk__str_none)) {
then_actions = pe__resource_actions(after->rsc, NULL,
- RSC_DEMOTE, FALSE);
+ PCMK_ACTION_DEMOTE, FALSE);
}
for (iter = then_actions; iter != NULL; iter = iter->next) {
- pe_action_t *then = (pe_action_t *) iter->data;
+ pcmk_action_t *then = (pcmk_action_t *) iter->data;
// Skip pseudo-actions (for example, those implied by fencing)
- if (!pcmk_is_set(then->flags, pe_action_pseudo)) {
- order_actions(probe, then, pe_order_optional);
+ if (!pcmk_is_set(then->flags, pcmk_action_pseudo)) {
+ order_actions(probe, then, pcmk__ar_ordered);
}
}
g_list_free(then_actions);
@@ -585,7 +594,7 @@ add_restart_orderings_for_probe(pe_action_t *probe, pe_action_t *after,
* to add orderings only for the relevant instance.
*/
if ((after->rsc != NULL)
- && (after->rsc->variant > pe_group)) {
+ && (after->rsc->variant > pcmk_rsc_variant_group)) {
const char *interleave_s = g_hash_table_lookup(after->rsc->meta,
XML_RSC_ATTR_INTERLEAVE);
@@ -593,7 +602,7 @@ add_restart_orderings_for_probe(pe_action_t *probe, pe_action_t *after,
if (interleave) {
compatible_rsc = pcmk__find_compatible_instance(probe->rsc,
after->rsc,
- RSC_ROLE_UNKNOWN,
+ pcmk_role_unknown,
false);
}
}
@@ -603,29 +612,30 @@ add_restart_orderings_for_probe(pe_action_t *probe, pe_action_t *after,
* ordered before its individual instances' actions.
*/
for (iter = after->actions_after; iter != NULL; iter = iter->next) {
- pe_action_wrapper_t *after_wrapper = (pe_action_wrapper_t *) iter->data;
+ pcmk__related_action_t *after_wrapper = iter->data;
- /* pe_order_implies_then is the reason why a required A.start
+ /* pcmk__ar_first_implies_then is the reason why a required A.start
* implies/enforces B.start to be required too, which is the cause of
* B.restart/re-promote.
*
- * Not sure about pe_order_implies_then_on_node though. It's now only
- * used for unfencing case, which tends to introduce transition
+ * Not sure about pcmk__ar_first_implies_same_node_then though. It's now
+ * only used for unfencing case, which tends to introduce transition
* loops...
*/
- if (!pcmk_is_set(after_wrapper->type, pe_order_implies_then)) {
+ if (!pcmk_is_set(after_wrapper->type, pcmk__ar_first_implies_then)) {
/* The order type between a group/clone and its child such as
* B.start-> B_child.start is:
- * pe_order_implies_first_printed | pe_order_runnable_left
+ * pcmk__ar_then_implies_first_graphed
+ * |pcmk__ar_unrunnable_first_blocks
*
* Proceed through the ordering chain and build dependencies with
* its children.
*/
if ((after->rsc == NULL)
- || (after->rsc->variant < pe_group)
+ || (after->rsc->variant < pcmk_rsc_variant_group)
|| (probe->rsc->parent == after->rsc)
|| (after_wrapper->action->rsc == NULL)
- || (after_wrapper->action->rsc->variant > pe_group)
+ || (after_wrapper->action->rsc->variant > pcmk_rsc_variant_group)
|| (after->rsc != after_wrapper->action->rsc->parent)) {
continue;
}
@@ -633,7 +643,7 @@ add_restart_orderings_for_probe(pe_action_t *probe, pe_action_t *after,
/* Proceed to the children of a group or a non-interleaved clone.
* For an interleaved clone, proceed only to the relevant child.
*/
- if ((after->rsc->variant > pe_group) && interleave
+ if ((after->rsc->variant > pcmk_rsc_variant_group) && interleave
&& ((compatible_rsc == NULL)
|| (compatible_rsc != after_wrapper->action->rsc))) {
continue;
@@ -647,7 +657,7 @@ add_restart_orderings_for_probe(pe_action_t *probe, pe_action_t *after,
pe__node_name(after_wrapper->action->node),
after_wrapper->type);
- add_restart_orderings_for_probe(probe, after_wrapper->action, data_set);
+ add_restart_orderings_for_probe(probe, after_wrapper->action);
}
}
@@ -655,17 +665,15 @@ add_restart_orderings_for_probe(pe_action_t *probe, pe_action_t *after,
* \internal
* \brief Clear the tracking flag on all scheduled actions
*
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*/
static void
-clear_actions_tracking_flag(pe_working_set_t *data_set)
+clear_actions_tracking_flag(pcmk_scheduler_t *scheduler)
{
- GList *gIter = NULL;
-
- for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
- pe_action_t *action = (pe_action_t *) gIter->data;
+ for (GList *iter = scheduler->actions; iter != NULL; iter = iter->next) {
+ pcmk_action_t *action = iter->data;
- pe__clear_action_flags(action, pe_action_tracking);
+ pe__clear_action_flags(action, pcmk_action_detect_loop);
}
}
@@ -673,37 +681,37 @@ clear_actions_tracking_flag(pe_working_set_t *data_set)
* \internal
* \brief Add start and restart orderings for probes scheduled for a resource
*
- * \param[in,out] rsc Resource whose probes should be ordered
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] data Resource whose probes should be ordered
+ * \param[in] user_data Unused
*/
static void
-add_start_restart_orderings_for_rsc(pe_resource_t *rsc,
- pe_working_set_t *data_set)
+add_start_restart_orderings_for_rsc(gpointer data, gpointer user_data)
{
+ pcmk_resource_t *rsc = data;
GList *probes = NULL;
// For collective resources, order each instance recursively
- if (rsc->variant != pe_native) {
- g_list_foreach(rsc->children,
- (GFunc) add_start_restart_orderings_for_rsc, data_set);
+ if (rsc->variant != pcmk_rsc_variant_primitive) {
+ g_list_foreach(rsc->children, add_start_restart_orderings_for_rsc,
+ NULL);
return;
}
// Find all probes for given resource
- probes = pe__resource_actions(rsc, NULL, RSC_STATUS, FALSE);
+ probes = pe__resource_actions(rsc, NULL, PCMK_ACTION_MONITOR, FALSE);
// Add probe restart orderings for each probe found
for (GList *iter = probes; iter != NULL; iter = iter->next) {
- pe_action_t *probe = (pe_action_t *) iter->data;
+ pcmk_action_t *probe = (pcmk_action_t *) iter->data;
for (GList *then_iter = probe->actions_after; then_iter != NULL;
then_iter = then_iter->next) {
- pe_action_wrapper_t *then = (pe_action_wrapper_t *) then_iter->data;
+ pcmk__related_action_t *then = then_iter->data;
add_start_orderings_for_probe(probe, then);
- add_restart_orderings_for_probe(probe, then->action, data_set);
- clear_actions_tracking_flag(data_set);
+ add_restart_orderings_for_probe(probe, then->action);
+ clear_actions_tracking_flag(rsc->cluster);
}
}
@@ -714,12 +722,12 @@ add_start_restart_orderings_for_rsc(pe_resource_t *rsc,
* \internal
* \brief Add "A then probe B" orderings for "A then B" orderings
*
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*
* \note This function is currently disabled (see next comment).
*/
static void
-order_then_probes(pe_working_set_t *data_set)
+order_then_probes(pcmk_scheduler_t *scheduler)
{
#if 0
/* Given an ordering "A then B", we would prefer to wait for A to be started
@@ -751,14 +759,14 @@ order_then_probes(pe_working_set_t *data_set)
* narrowing use case suggests that this code should remain disabled until
* someone gets smarter.
*/
- for (GList *iter = data_set->resources; iter != NULL; iter = iter->next) {
- pe_resource_t *rsc = (pe_resource_t *) iter->data;
+ for (GList *iter = scheduler->resources; iter != NULL; iter = iter->next) {
+ pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
- pe_action_t *start = NULL;
+ pcmk_action_t *start = NULL;
GList *actions = NULL;
GList *probes = NULL;
- actions = pe__resource_actions(rsc, NULL, RSC_START, FALSE);
+ actions = pe__resource_actions(rsc, NULL, PCMK_ACTION_START, FALSE);
if (actions) {
start = actions->data;
@@ -770,22 +778,22 @@ order_then_probes(pe_working_set_t *data_set)
continue;
}
- probes = pe__resource_actions(rsc, NULL, RSC_STATUS, FALSE);
+ probes = pe__resource_actions(rsc, NULL, PCMK_ACTION_MONITOR, FALSE);
for (actions = start->actions_before; actions != NULL;
actions = actions->next) {
- pe_action_wrapper_t *before = (pe_action_wrapper_t *) actions->data;
+ pcmk__related_action_t *before = actions->data;
- pe_action_t *first = before->action;
- pe_resource_t *first_rsc = first->rsc;
+ pcmk_action_t *first = before->action;
+ pcmk_resource_t *first_rsc = first->rsc;
if (first->required_runnable_before) {
for (GList *clone_actions = first->actions_before;
clone_actions != NULL;
clone_actions = clone_actions->next) {
- before = (pe_action_wrapper_t *) clone_actions->data;
+ before = clone_actions->data;
crm_trace("Testing '%s then %s' for %s",
first->uuid, before->action->uuid, start->uuid);
@@ -795,7 +803,8 @@ order_then_probes(pe_working_set_t *data_set)
break;
}
- } else if (!pcmk__str_eq(first->task, RSC_START, pcmk__str_none)) {
+ } else if (!pcmk__str_eq(first->task, PCMK_ACTION_START,
+ pcmk__str_none)) {
crm_trace("Not a start op %s for %s", first->uuid, start->uuid);
}
@@ -819,10 +828,10 @@ order_then_probes(pe_working_set_t *data_set)
for (GList *probe_iter = probes; probe_iter != NULL;
probe_iter = probe_iter->next) {
- pe_action_t *probe = (pe_action_t *) probe_iter->data;
+ pcmk_action_t *probe = (pcmk_action_t *) probe_iter->data;
crm_err("Ordering %s before %s", first->uuid, probe->uuid);
- order_actions(first, probe, pe_order_optional);
+ order_actions(first, probe, pcmk__ar_ordered);
}
}
}
@@ -830,35 +839,35 @@ order_then_probes(pe_working_set_t *data_set)
}
void
-pcmk__order_probes(pe_working_set_t *data_set)
+pcmk__order_probes(pcmk_scheduler_t *scheduler)
{
// Add orderings for "probe then X"
- g_list_foreach(data_set->resources,
- (GFunc) add_start_restart_orderings_for_rsc, data_set);
- add_probe_orderings_for_stops(data_set);
+ g_list_foreach(scheduler->resources, add_start_restart_orderings_for_rsc,
+ NULL);
+ add_probe_orderings_for_stops(scheduler);
- order_then_probes(data_set);
+ order_then_probes(scheduler);
}
/*!
* \internal
* \brief Schedule any probes needed
*
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*
* \note This may also schedule fencing of failed remote nodes.
*/
void
-pcmk__schedule_probes(pe_working_set_t *data_set)
+pcmk__schedule_probes(pcmk_scheduler_t *scheduler)
{
// Schedule probes on each node in the cluster as needed
- for (GList *iter = data_set->nodes; iter != NULL; iter = iter->next) {
- pe_node_t *node = (pe_node_t *) iter->data;
+ for (GList *iter = scheduler->nodes; iter != NULL; iter = iter->next) {
+ pcmk_node_t *node = (pcmk_node_t *) iter->data;
const char *probed = NULL;
if (!node->details->online) { // Don't probe offline nodes
if (pcmk__is_failed_remote_node(node)) {
- pe_fence_node(data_set, node,
+ pe_fence_node(scheduler, node,
"the connection is unrecoverable", FALSE);
}
continue;
@@ -878,19 +887,18 @@ pcmk__schedule_probes(pe_working_set_t *data_set)
*/
probed = pe_node_attribute_raw(node, CRM_OP_PROBED);
if (probed != NULL && crm_is_true(probed) == FALSE) {
- pe_action_t *probe_op = NULL;
+ pcmk_action_t *probe_op = NULL;
probe_op = custom_action(NULL,
crm_strdup_printf("%s-%s", CRM_OP_REPROBE,
node->details->uname),
- CRM_OP_REPROBE, node, FALSE, TRUE,
- data_set);
+ CRM_OP_REPROBE, node, FALSE, scheduler);
add_hash_param(probe_op->meta, XML_ATTR_TE_NOWAIT,
XML_BOOLEAN_TRUE);
continue;
}
// Probe each resource in the cluster on this node, as needed
- pcmk__probe_resource_list(data_set->resources, node);
+ pcmk__probe_resource_list(scheduler->resources, node);
}
}
diff --git a/lib/pacemaker/pcmk_sched_promotable.c b/lib/pacemaker/pcmk_sched_promotable.c
index d12d017..7f81a13 100644
--- a/lib/pacemaker/pcmk_sched_promotable.c
+++ b/lib/pacemaker/pcmk_sched_promotable.c
@@ -23,19 +23,22 @@
* \param[in,out] last Previous instance ordered (NULL if \p child is first)
*/
static void
-order_instance_promotion(pe_resource_t *clone, pe_resource_t *child,
- pe_resource_t *last)
+order_instance_promotion(pcmk_resource_t *clone, pcmk_resource_t *child,
+ pcmk_resource_t *last)
{
// "Promote clone" -> promote instance -> "clone promoted"
- pcmk__order_resource_actions(clone, RSC_PROMOTE, child, RSC_PROMOTE,
- pe_order_optional);
- pcmk__order_resource_actions(child, RSC_PROMOTE, clone, RSC_PROMOTED,
- pe_order_optional);
+ pcmk__order_resource_actions(clone, PCMK_ACTION_PROMOTE,
+ child, PCMK_ACTION_PROMOTE,
+ pcmk__ar_ordered);
+ pcmk__order_resource_actions(child, PCMK_ACTION_PROMOTE,
+ clone, PCMK_ACTION_PROMOTED,
+ pcmk__ar_ordered);
// If clone is ordered, order this instance relative to last
if ((last != NULL) && pe__clone_is_ordered(clone)) {
- pcmk__order_resource_actions(last, RSC_PROMOTE, child, RSC_PROMOTE,
- pe_order_optional);
+ pcmk__order_resource_actions(last, PCMK_ACTION_PROMOTE,
+ child, PCMK_ACTION_PROMOTE,
+ pcmk__ar_ordered);
}
}
@@ -48,19 +51,21 @@ order_instance_promotion(pe_resource_t *clone, pe_resource_t *child,
* \param[in] last Previous instance ordered (NULL if \p child is first)
*/
static void
-order_instance_demotion(pe_resource_t *clone, pe_resource_t *child,
- pe_resource_t *last)
+order_instance_demotion(pcmk_resource_t *clone, pcmk_resource_t *child,
+ pcmk_resource_t *last)
{
// "Demote clone" -> demote instance -> "clone demoted"
- pcmk__order_resource_actions(clone, RSC_DEMOTE, child, RSC_DEMOTE,
- pe_order_implies_first_printed);
- pcmk__order_resource_actions(child, RSC_DEMOTE, clone, RSC_DEMOTED,
- pe_order_implies_then_printed);
+ pcmk__order_resource_actions(clone, PCMK_ACTION_DEMOTE, child,
+ PCMK_ACTION_DEMOTE,
+ pcmk__ar_then_implies_first_graphed);
+ pcmk__order_resource_actions(child, PCMK_ACTION_DEMOTE,
+ clone, PCMK_ACTION_DEMOTED,
+ pcmk__ar_first_implies_then_graphed);
// If clone is ordered, order this instance relative to last
if ((last != NULL) && pe__clone_is_ordered(clone)) {
- pcmk__order_resource_actions(child, RSC_DEMOTE, last, RSC_DEMOTE,
- pe_order_optional);
+ pcmk__order_resource_actions(child, PCMK_ACTION_DEMOTE, last,
+ PCMK_ACTION_DEMOTE, pcmk__ar_ordered);
}
}
@@ -73,32 +78,35 @@ order_instance_demotion(pe_resource_t *clone, pe_resource_t *child,
* \param[out] promoting If \p rsc will be promoted, this will be set to true
*/
static void
-check_for_role_change(const pe_resource_t *rsc, bool *demoting, bool *promoting)
+check_for_role_change(const pcmk_resource_t *rsc, bool *demoting,
+ bool *promoting)
{
const GList *iter = NULL;
// If this is a cloned group, check group members recursively
if (rsc->children != NULL) {
for (iter = rsc->children; iter != NULL; iter = iter->next) {
- check_for_role_change((const pe_resource_t *) iter->data,
+ check_for_role_change((const pcmk_resource_t *) iter->data,
demoting, promoting);
}
return;
}
for (iter = rsc->actions; iter != NULL; iter = iter->next) {
- const pe_action_t *action = (const pe_action_t *) iter->data;
+ const pcmk_action_t *action = (const pcmk_action_t *) iter->data;
if (*promoting && *demoting) {
return;
- } else if (pcmk_is_set(action->flags, pe_action_optional)) {
+ } else if (pcmk_is_set(action->flags, pcmk_action_optional)) {
continue;
- } else if (pcmk__str_eq(RSC_DEMOTE, action->task, pcmk__str_none)) {
+ } else if (pcmk__str_eq(PCMK_ACTION_DEMOTE, action->task,
+ pcmk__str_none)) {
*demoting = true;
- } else if (pcmk__str_eq(RSC_PROMOTE, action->task, pcmk__str_none)) {
+ } else if (pcmk__str_eq(PCMK_ACTION_PROMOTE, action->task,
+ pcmk__str_none)) {
*promoting = true;
}
}
@@ -117,28 +125,29 @@ check_for_role_change(const pe_resource_t *rsc, bool *demoting, bool *promoting)
* \param[in] chosen Node where \p child will be placed
*/
static void
-apply_promoted_locations(pe_resource_t *child,
+apply_promoted_locations(pcmk_resource_t *child,
const GList *location_constraints,
- const pe_node_t *chosen)
+ const pcmk_node_t *chosen)
{
for (const GList *iter = location_constraints; iter; iter = iter->next) {
const pe__location_t *location = iter->data;
- pe_node_t *weighted_node = NULL;
+ const pcmk_node_t *constraint_node = NULL;
- if (location->role_filter == RSC_ROLE_PROMOTED) {
- weighted_node = pe_find_node_id(location->node_list_rh,
- chosen->details->id);
+ if (location->role_filter == pcmk_role_promoted) {
+ constraint_node = pe_find_node_id(location->node_list_rh,
+ chosen->details->id);
}
- if (weighted_node != NULL) {
+ if (constraint_node != NULL) {
int new_priority = pcmk__add_scores(child->priority,
- weighted_node->weight);
+ constraint_node->weight);
pe_rsc_trace(child,
"Applying location %s to %s promotion priority on %s: "
"%s + %s = %s",
- location->id, child->id, pe__node_name(weighted_node),
+ location->id, child->id,
+ pe__node_name(constraint_node),
pcmk_readable_score(child->priority),
- pcmk_readable_score(weighted_node->weight),
+ pcmk_readable_score(constraint_node->weight),
pcmk_readable_score(new_priority));
child->priority = new_priority;
}
@@ -153,16 +162,16 @@ apply_promoted_locations(pe_resource_t *child,
*
* \return Node that \p rsc will be promoted on, or NULL if none
*/
-static pe_node_t *
-node_to_be_promoted_on(const pe_resource_t *rsc)
+static pcmk_node_t *
+node_to_be_promoted_on(const pcmk_resource_t *rsc)
{
- pe_node_t *node = NULL;
- pe_node_t *local_node = NULL;
- const pe_resource_t *parent = NULL;
+ pcmk_node_t *node = NULL;
+ pcmk_node_t *local_node = NULL;
+ const pcmk_resource_t *parent = NULL;
// If this is a cloned group, bail if any group member can't be promoted
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
- pe_resource_t *child = (pe_resource_t *) iter->data;
+ pcmk_resource_t *child = (pcmk_resource_t *) iter->data;
if (node_to_be_promoted_on(child) == NULL) {
pe_rsc_trace(rsc,
@@ -178,8 +187,8 @@ node_to_be_promoted_on(const pe_resource_t *rsc)
rsc->id);
return NULL;
- } else if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
- if (rsc->fns->state(rsc, TRUE) == RSC_ROLE_PROMOTED) {
+ } else if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
+ if (rsc->fns->state(rsc, TRUE) == pcmk_role_promoted) {
crm_notice("Unmanaged instance %s will be left promoted on %s",
rsc->id, pe__node_name(node));
} else {
@@ -202,14 +211,14 @@ node_to_be_promoted_on(const pe_resource_t *rsc)
}
parent = pe__const_top_resource(rsc, false);
- local_node = pe_hash_table_lookup(parent->allowed_nodes, node->details->id);
+ local_node = g_hash_table_lookup(parent->allowed_nodes, node->details->id);
if (local_node == NULL) {
- /* It should not be possible for the scheduler to have allocated the
+ /* It should not be possible for the scheduler to have assigned the
* instance to a node where its parent is not allowed, but it's good to
* have a fail-safe.
*/
- if (pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
crm_warn("%s can't be promoted because %s is not allowed on %s "
"(scheduler bug?)",
rsc->id, parent->id, pe__node_name(node));
@@ -217,7 +226,7 @@ node_to_be_promoted_on(const pe_resource_t *rsc)
return NULL;
} else if ((local_node->count >= pe__clone_promoted_node_max(parent))
- && pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ && pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
pe_rsc_trace(rsc,
"%s can't be promoted because %s has "
"maximum promoted instances already",
@@ -242,11 +251,11 @@ node_to_be_promoted_on(const pe_resource_t *rsc)
static gint
cmp_promotable_instance(gconstpointer a, gconstpointer b)
{
- const pe_resource_t *rsc1 = (const pe_resource_t *) a;
- const pe_resource_t *rsc2 = (const pe_resource_t *) b;
+ const pcmk_resource_t *rsc1 = (const pcmk_resource_t *) a;
+ const pcmk_resource_t *rsc2 = (const pcmk_resource_t *) b;
- enum rsc_role_e role1 = RSC_ROLE_UNKNOWN;
- enum rsc_role_e role2 = RSC_ROLE_UNKNOWN;
+ enum rsc_role_e role1 = pcmk_role_unknown;
+ enum rsc_role_e role2 = pcmk_role_unknown;
CRM_ASSERT((rsc1 != NULL) && (rsc2 != NULL));
@@ -288,23 +297,23 @@ cmp_promotable_instance(gconstpointer a, gconstpointer b)
/*!
* \internal
- * \brief Add a promotable clone instance's sort index to its node's weight
+ * \brief Add a promotable clone instance's sort index to its node's score
*
* Add a promotable clone instance's sort index (which sums its promotion
* preferences and scores of relevant location constraints for the promoted
- * role) to the node weight of the instance's allocated node.
+ * role) to the node score of the instance's assigned node.
*
* \param[in] data Promotable clone instance
* \param[in,out] user_data Clone parent of \p data
*/
static void
-add_sort_index_to_node_weight(gpointer data, gpointer user_data)
+add_sort_index_to_node_score(gpointer data, gpointer user_data)
{
- const pe_resource_t *child = (const pe_resource_t *) data;
- pe_resource_t *clone = (pe_resource_t *) user_data;
+ const pcmk_resource_t *child = (const pcmk_resource_t *) data;
+ pcmk_resource_t *clone = (pcmk_resource_t *) user_data;
- pe_node_t *node = NULL;
- const pe_node_t *chosen = NULL;
+ pcmk_node_t *node = NULL;
+ const pcmk_node_t *chosen = NULL;
if (child->sort_index < 0) {
pe_rsc_trace(clone, "Not adding sort index of %s: negative", child->id);
@@ -317,8 +326,7 @@ add_sort_index_to_node_weight(gpointer data, gpointer user_data)
return;
}
- node = (pe_node_t *) pe_hash_table_lookup(clone->allowed_nodes,
- chosen->details->id);
+ node = g_hash_table_lookup(clone->allowed_nodes, chosen->details->id);
CRM_ASSERT(node != NULL);
node->weight = pcmk__add_scores(child->sort_index, node->weight);
@@ -330,7 +338,7 @@ add_sort_index_to_node_weight(gpointer data, gpointer user_data)
/*!
* \internal
- * \brief Apply colocation to dependent's node weights if for promoted role
+ * \brief Apply colocation to dependent's node scores if for promoted role
*
* \param[in,out] data Colocation constraint to apply
* \param[in,out] user_data Promotable clone that is constraint's dependent
@@ -338,31 +346,30 @@ add_sort_index_to_node_weight(gpointer data, gpointer user_data)
static void
apply_coloc_to_dependent(gpointer data, gpointer user_data)
{
- pcmk__colocation_t *constraint = (pcmk__colocation_t *) data;
- pe_resource_t *clone = (pe_resource_t *) user_data;
- pe_resource_t *primary = constraint->primary;
+ pcmk__colocation_t *colocation = data;
+ pcmk_resource_t *clone = user_data;
+ pcmk_resource_t *primary = colocation->primary;
uint32_t flags = pcmk__coloc_select_default;
- float factor = constraint->score / (float) INFINITY;
+ float factor = colocation->score / (float) INFINITY;
- if (constraint->dependent_role != RSC_ROLE_PROMOTED) {
+ if (colocation->dependent_role != pcmk_role_promoted) {
return;
}
- if (constraint->score < INFINITY) {
+ if (colocation->score < INFINITY) {
flags = pcmk__coloc_select_active;
}
pe_rsc_trace(clone, "Applying colocation %s (promoted %s with %s) @%s",
- constraint->id, constraint->dependent->id,
- constraint->primary->id,
- pcmk_readable_score(constraint->score));
- primary->cmds->add_colocated_node_scores(primary, clone->id,
- &clone->allowed_nodes,
- constraint->node_attribute, factor,
- flags);
+ colocation->id, colocation->dependent->id,
+ colocation->primary->id,
+ pcmk_readable_score(colocation->score));
+ primary->cmds->add_colocated_node_scores(primary, clone, clone->id,
+ &clone->allowed_nodes, colocation,
+ factor, flags);
}
/*!
* \internal
- * \brief Apply colocation to primary's node weights if for promoted role
+ * \brief Apply colocation to primary's node scores if for promoted role
*
* \param[in,out] data Colocation constraint to apply
* \param[in,out] user_data Promotable clone that is constraint's primary
@@ -370,45 +377,44 @@ apply_coloc_to_dependent(gpointer data, gpointer user_data)
static void
apply_coloc_to_primary(gpointer data, gpointer user_data)
{
- pcmk__colocation_t *constraint = (pcmk__colocation_t *) data;
- pe_resource_t *clone = (pe_resource_t *) user_data;
- pe_resource_t *dependent = constraint->dependent;
- const float factor = constraint->score / (float) INFINITY;
+ pcmk__colocation_t *colocation = data;
+ pcmk_resource_t *clone = user_data;
+ pcmk_resource_t *dependent = colocation->dependent;
+ const float factor = colocation->score / (float) INFINITY;
const uint32_t flags = pcmk__coloc_select_active
|pcmk__coloc_select_nonnegative;
- if ((constraint->primary_role != RSC_ROLE_PROMOTED)
- || !pcmk__colocation_has_influence(constraint, NULL)) {
+ if ((colocation->primary_role != pcmk_role_promoted)
+ || !pcmk__colocation_has_influence(colocation, NULL)) {
return;
}
pe_rsc_trace(clone, "Applying colocation %s (%s with promoted %s) @%s",
- constraint->id, constraint->dependent->id,
- constraint->primary->id,
- pcmk_readable_score(constraint->score));
- dependent->cmds->add_colocated_node_scores(dependent, clone->id,
+ colocation->id, colocation->dependent->id,
+ colocation->primary->id,
+ pcmk_readable_score(colocation->score));
+ dependent->cmds->add_colocated_node_scores(dependent, clone, clone->id,
&clone->allowed_nodes,
- constraint->node_attribute,
- factor, flags);
+ colocation, factor, flags);
}
/*!
* \internal
- * \brief Set clone instance's sort index to its node's weight
+ * \brief Set clone instance's sort index to its node's score
*
* \param[in,out] data Promotable clone instance
* \param[in] user_data Parent clone of \p data
*/
static void
-set_sort_index_to_node_weight(gpointer data, gpointer user_data)
+set_sort_index_to_node_score(gpointer data, gpointer user_data)
{
- pe_resource_t *child = (pe_resource_t *) data;
- const pe_resource_t *clone = (const pe_resource_t *) user_data;
+ pcmk_resource_t *child = (pcmk_resource_t *) data;
+ const pcmk_resource_t *clone = (const pcmk_resource_t *) user_data;
- pe_node_t *chosen = child->fns->location(child, NULL, FALSE);
+ pcmk_node_t *chosen = child->fns->location(child, NULL, FALSE);
- if (!pcmk_is_set(child->flags, pe_rsc_managed)
- && (child->next_role == RSC_ROLE_PROMOTED)) {
+ if (!pcmk_is_set(child->flags, pcmk_rsc_managed)
+ && (child->next_role == pcmk_role_promoted)) {
child->sort_index = INFINITY;
pe_rsc_trace(clone,
"Final sort index for %s is INFINITY (unmanaged promoted)",
@@ -416,18 +422,17 @@ set_sort_index_to_node_weight(gpointer data, gpointer user_data)
} else if ((chosen == NULL) || (child->sort_index < 0)) {
pe_rsc_trace(clone,
- "Final sort index for %s is %d (ignoring node weight)",
+ "Final sort index for %s is %d (ignoring node score)",
child->id, child->sort_index);
} else {
- const pe_node_t *node = NULL;
+ const pcmk_node_t *node = g_hash_table_lookup(clone->allowed_nodes,
+ chosen->details->id);
- node = pe_hash_table_lookup(clone->allowed_nodes, chosen->details->id);
CRM_ASSERT(node != NULL);
-
child->sort_index = node->weight;
pe_rsc_trace(clone,
- "Merging weights for %s: final sort index for %s is %d",
+ "Adding scores for %s: final sort index for %s is %d",
clone->id, child->id, child->sort_index);
}
}
@@ -439,44 +444,48 @@ set_sort_index_to_node_weight(gpointer data, gpointer user_data)
* \param[in,out] clone Promotable clone to sort
*/
static void
-sort_promotable_instances(pe_resource_t *clone)
+sort_promotable_instances(pcmk_resource_t *clone)
{
- if (pe__set_clone_flag(clone, pe__clone_promotion_constrained)
+ GList *colocations = NULL;
+
+ if (pe__set_clone_flag(clone, pcmk__clone_promotion_constrained)
== pcmk_rc_already) {
return;
}
- pe__set_resource_flags(clone, pe_rsc_merging);
+ pe__set_resource_flags(clone, pcmk_rsc_updating_nodes);
for (GList *iter = clone->children; iter != NULL; iter = iter->next) {
- pe_resource_t *child = (pe_resource_t *) iter->data;
+ pcmk_resource_t *child = (pcmk_resource_t *) iter->data;
pe_rsc_trace(clone,
- "Merging weights for %s: initial sort index for %s is %d",
+ "Adding scores for %s: initial sort index for %s is %d",
clone->id, child->id, child->sort_index);
}
- pe__show_node_weights(true, clone, "Before", clone->allowed_nodes,
- clone->cluster);
+ pe__show_node_scores(true, clone, "Before", clone->allowed_nodes,
+ clone->cluster);
- /* Because the this_with_colocations() and with_this_colocations() methods
- * boil down to copies of rsc_cons and rsc_cons_lhs for clones, we can use
- * those here directly for efficiency.
- */
- g_list_foreach(clone->children, add_sort_index_to_node_weight, clone);
- g_list_foreach(clone->rsc_cons, apply_coloc_to_dependent, clone);
- g_list_foreach(clone->rsc_cons_lhs, apply_coloc_to_primary, clone);
+ g_list_foreach(clone->children, add_sort_index_to_node_score, clone);
+
+ colocations = pcmk__this_with_colocations(clone);
+ g_list_foreach(colocations, apply_coloc_to_dependent, clone);
+ g_list_free(colocations);
+
+ colocations = pcmk__with_this_colocations(clone);
+ g_list_foreach(colocations, apply_coloc_to_primary, clone);
+ g_list_free(colocations);
// Ban resource from all nodes if it needs a ticket but doesn't have it
pcmk__require_promotion_tickets(clone);
- pe__show_node_weights(true, clone, "After", clone->allowed_nodes,
- clone->cluster);
+ pe__show_node_scores(true, clone, "After", clone->allowed_nodes,
+ clone->cluster);
- // Reset sort indexes to final node weights
- g_list_foreach(clone->children, set_sort_index_to_node_weight, clone);
+ // Reset sort indexes to final node scores
+ g_list_foreach(clone->children, set_sort_index_to_node_score, clone);
// Finally, sort instances in descending order of promotion priority
clone->children = g_list_sort(clone->children, cmp_promotable_instance);
- pe__clear_resource_flags(clone, pe_rsc_merging);
+ pe__clear_resource_flags(clone, pcmk_rsc_updating_nodes);
}
/*!
@@ -489,17 +498,18 @@ sort_promotable_instances(pe_resource_t *clone)
*
* \return
*/
-static pe_resource_t *
-find_active_anon_instance(const pe_resource_t *clone, const char *id,
- const pe_node_t *node)
+static pcmk_resource_t *
+find_active_anon_instance(const pcmk_resource_t *clone, const char *id,
+ const pcmk_node_t *node)
{
for (GList *iter = clone->children; iter; iter = iter->next) {
- pe_resource_t *child = iter->data;
- pe_resource_t *active = NULL;
+ pcmk_resource_t *child = iter->data;
+ pcmk_resource_t *active = NULL;
// Use ->find_rsc() in case this is a cloned group
active = clone->fns->find_rsc(child, id, node,
- pe_find_clone|pe_find_current);
+ pcmk_rsc_match_clone_only
+ |pcmk_rsc_match_current_node);
if (active != NULL) {
return active;
}
@@ -518,16 +528,17 @@ find_active_anon_instance(const pe_resource_t *clone, const char *id,
* otherwise false
*/
static bool
-anonymous_known_on(const pe_resource_t *clone, const char *id,
- const pe_node_t *node)
+anonymous_known_on(const pcmk_resource_t *clone, const char *id,
+ const pcmk_node_t *node)
{
for (GList *iter = clone->children; iter; iter = iter->next) {
- pe_resource_t *child = iter->data;
+ pcmk_resource_t *child = iter->data;
/* Use ->find_rsc() because this might be a cloned group, and knowing
* that other members of the group are known here implies nothing.
*/
- child = clone->fns->find_rsc(child, id, NULL, pe_find_clone);
+ child = clone->fns->find_rsc(child, id, NULL,
+ pcmk_rsc_match_clone_only);
CRM_LOG_ASSERT(child != NULL);
if (child != NULL) {
if (g_hash_table_lookup(child->known_on, node->details->id)) {
@@ -548,10 +559,10 @@ anonymous_known_on(const pe_resource_t *clone, const char *id,
* \return true if \p node is allowed to run \p rsc, otherwise false
*/
static bool
-is_allowed(const pe_resource_t *rsc, const pe_node_t *node)
+is_allowed(const pcmk_resource_t *rsc, const pcmk_node_t *node)
{
- pe_node_t *allowed = pe_hash_table_lookup(rsc->allowed_nodes,
- node->details->id);
+ pcmk_node_t *allowed = g_hash_table_lookup(rsc->allowed_nodes,
+ node->details->id);
return (allowed != NULL) && (allowed->weight >= 0);
}
@@ -566,15 +577,15 @@ is_allowed(const pe_resource_t *rsc, const pe_node_t *node)
* otherwise false
*/
static bool
-promotion_score_applies(const pe_resource_t *rsc, const pe_node_t *node)
+promotion_score_applies(const pcmk_resource_t *rsc, const pcmk_node_t *node)
{
char *id = clone_strip(rsc->id);
- const pe_resource_t *parent = pe__const_top_resource(rsc, false);
- pe_resource_t *active = NULL;
+ const pcmk_resource_t *parent = pe__const_top_resource(rsc, false);
+ pcmk_resource_t *active = NULL;
const char *reason = "allowed";
// Some checks apply only to anonymous clone instances
- if (!pcmk_is_set(rsc->flags, pe_rsc_unique)) {
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_unique)) {
// If instance is active on the node, its score definitely applies
active = find_active_anon_instance(parent, id, node);
@@ -604,7 +615,7 @@ promotion_score_applies(const pe_resource_t *rsc, const pe_node_t *node)
/* Otherwise, we've probed and/or started the resource *somewhere*, so
* consider promotion scores on nodes where we know the status.
*/
- if ((pe_hash_table_lookup(rsc->known_on, node->details->id) != NULL)
+ if ((g_hash_table_lookup(rsc->known_on, node->details->id) != NULL)
|| (pe_find_node_id(rsc->running_on, node->details->id) != NULL)) {
reason = "known";
} else {
@@ -640,16 +651,20 @@ check_allowed:
* \return Value of promotion score node attribute for \p rsc on \p node
*/
static const char *
-promotion_attr_value(const pe_resource_t *rsc, const pe_node_t *node,
+promotion_attr_value(const pcmk_resource_t *rsc, const pcmk_node_t *node,
const char *name)
{
char *attr_name = NULL;
const char *attr_value = NULL;
+ enum pcmk__rsc_node node_type = pcmk__rsc_node_assigned;
- CRM_CHECK((rsc != NULL) && (node != NULL) && (name != NULL), return NULL);
-
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_unassigned)) {
+ // Not assigned yet
+ node_type = pcmk__rsc_node_current;
+ }
attr_name = pcmk_promotion_score_name(name);
- attr_value = pe_node_attribute_calculated(node, attr_name, rsc);
+ attr_value = pe__node_attribute_calculated(node, attr_name, rsc, node_type,
+ false);
free(attr_name);
return attr_value;
}
@@ -665,7 +680,7 @@ promotion_attr_value(const pe_resource_t *rsc, const pe_node_t *node,
* \return Promotion score for \p rsc on \p node (or 0 if none)
*/
static int
-promotion_score(const pe_resource_t *rsc, const pe_node_t *node,
+promotion_score(const pcmk_resource_t *rsc, const pcmk_node_t *node,
bool *is_default)
{
char *name = NULL;
@@ -686,7 +701,7 @@ promotion_score(const pe_resource_t *rsc, const pe_node_t *node,
for (const GList *iter = rsc->children;
iter != NULL; iter = iter->next) {
- const pe_resource_t *child = (const pe_resource_t *) iter->data;
+ const pcmk_resource_t *child = (const pcmk_resource_t *) iter->data;
bool child_default = false;
int child_score = promotion_score(child, node, &child_default);
@@ -712,7 +727,7 @@ promotion_score(const pe_resource_t *rsc, const pe_node_t *node,
if (attr_value != NULL) {
pe_rsc_trace(rsc, "Promotion score for %s on %s = %s",
name, pe__node_name(node), pcmk__s(attr_value, "(unset)"));
- } else if (!pcmk_is_set(rsc->flags, pe_rsc_unique)) {
+ } else if (!pcmk_is_set(rsc->flags, pcmk_rsc_unique)) {
/* If we don't have any resource history yet, we won't have clone_name.
* In that case, for anonymous clones, try the resource name without
* any instance number.
@@ -739,22 +754,23 @@ promotion_score(const pe_resource_t *rsc, const pe_node_t *node,
/*!
* \internal
- * \brief Include promotion scores in instances' node weights and priorities
+ * \brief Include promotion scores in instances' node scores and priorities
*
* \param[in,out] rsc Promotable clone resource to update
*/
void
-pcmk__add_promotion_scores(pe_resource_t *rsc)
+pcmk__add_promotion_scores(pcmk_resource_t *rsc)
{
- if (pe__set_clone_flag(rsc, pe__clone_promotion_added) == pcmk_rc_already) {
+ if (pe__set_clone_flag(rsc,
+ pcmk__clone_promotion_added) == pcmk_rc_already) {
return;
}
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
- pe_resource_t *child_rsc = (pe_resource_t *) iter->data;
+ pcmk_resource_t *child_rsc = (pcmk_resource_t *) iter->data;
GHashTableIter iter;
- pe_node_t *node = NULL;
+ pcmk_node_t *node = NULL;
int score, new_score;
g_hash_table_iter_init(&iter, child_rsc->allowed_nodes);
@@ -800,11 +816,11 @@ pcmk__add_promotion_scores(pe_resource_t *rsc)
static void
set_current_role_unpromoted(void *data, void *user_data)
{
- pe_resource_t *rsc = (pe_resource_t *) data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) data;
- if (rsc->role == RSC_ROLE_STARTED) {
+ if (rsc->role == pcmk_role_started) {
// Promotable clones should use unpromoted role instead of started
- rsc->role = RSC_ROLE_UNPROMOTED;
+ rsc->role = pcmk_role_unpromoted;
}
g_list_foreach(rsc->children, set_current_role_unpromoted, NULL);
}
@@ -819,14 +835,14 @@ set_current_role_unpromoted(void *data, void *user_data)
static void
set_next_role_unpromoted(void *data, void *user_data)
{
- pe_resource_t *rsc = (pe_resource_t *) data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) data;
GList *assigned = NULL;
rsc->fns->location(rsc, &assigned, FALSE);
if (assigned == NULL) {
- pe__set_next_role(rsc, RSC_ROLE_STOPPED, "stopped instance");
+ pe__set_next_role(rsc, pcmk_role_stopped, "stopped instance");
} else {
- pe__set_next_role(rsc, RSC_ROLE_UNPROMOTED, "unpromoted instance");
+ pe__set_next_role(rsc, pcmk_role_unpromoted, "unpromoted instance");
g_list_free(assigned);
}
g_list_foreach(rsc->children, set_next_role_unpromoted, NULL);
@@ -842,10 +858,10 @@ set_next_role_unpromoted(void *data, void *user_data)
static void
set_next_role_promoted(void *data, gpointer user_data)
{
- pe_resource_t *rsc = (pe_resource_t *) data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) data;
- if (rsc->next_role == RSC_ROLE_UNKNOWN) {
- pe__set_next_role(rsc, RSC_ROLE_PROMOTED, "promoted instance");
+ if (rsc->next_role == pcmk_role_unknown) {
+ pe__set_next_role(rsc, pcmk_role_promoted, "promoted instance");
}
g_list_foreach(rsc->children, set_next_role_promoted, NULL);
}
@@ -857,11 +873,11 @@ set_next_role_promoted(void *data, gpointer user_data)
* \param[in,out] instance Promotable clone instance to show
*/
static void
-show_promotion_score(pe_resource_t *instance)
+show_promotion_score(pcmk_resource_t *instance)
{
- pe_node_t *chosen = instance->fns->location(instance, NULL, FALSE);
+ pcmk_node_t *chosen = instance->fns->location(instance, NULL, FALSE);
- if (pcmk_is_set(instance->cluster->flags, pe_flag_show_scores)
+ if (pcmk_is_set(instance->cluster->flags, pcmk_sched_output_scores)
&& !pcmk__is_daemon && (instance->cluster->priv != NULL)) {
pcmk__output_t *out = instance->cluster->priv;
@@ -888,16 +904,16 @@ show_promotion_score(pe_resource_t *instance)
static void
set_instance_priority(gpointer data, gpointer user_data)
{
- pe_resource_t *instance = (pe_resource_t *) data;
- const pe_resource_t *clone = (const pe_resource_t *) user_data;
- const pe_node_t *chosen = NULL;
- enum rsc_role_e next_role = RSC_ROLE_UNKNOWN;
+ pcmk_resource_t *instance = (pcmk_resource_t *) data;
+ const pcmk_resource_t *clone = (const pcmk_resource_t *) user_data;
+ const pcmk_node_t *chosen = NULL;
+ enum rsc_role_e next_role = pcmk_role_unknown;
GList *list = NULL;
pe_rsc_trace(clone, "Assigning priority for %s: %s", instance->id,
role2text(instance->next_role));
- if (instance->fns->state(instance, TRUE) == RSC_ROLE_STARTED) {
+ if (instance->fns->state(instance, TRUE) == pcmk_role_started) {
set_current_role_unpromoted(instance, NULL);
}
@@ -914,8 +930,8 @@ set_instance_priority(gpointer data, gpointer user_data)
next_role = instance->fns->state(instance, FALSE);
switch (next_role) {
- case RSC_ROLE_STARTED:
- case RSC_ROLE_UNKNOWN:
+ case pcmk_role_started:
+ case pcmk_role_unknown:
// Set instance priority to its promotion score (or -1 if none)
{
bool is_default = false;
@@ -935,13 +951,13 @@ set_instance_priority(gpointer data, gpointer user_data)
}
break;
- case RSC_ROLE_UNPROMOTED:
- case RSC_ROLE_STOPPED:
+ case pcmk_role_unpromoted:
+ case pcmk_role_stopped:
// Instance can't be promoted
instance->priority = -INFINITY;
break;
- case RSC_ROLE_PROMOTED:
+ case pcmk_role_promoted:
// Nothing needed (re-creating actions after scheduling fencing)
break;
@@ -964,7 +980,7 @@ set_instance_priority(gpointer data, gpointer user_data)
g_list_free(list);
instance->sort_index = instance->priority;
- if (next_role == RSC_ROLE_PROMOTED) {
+ if (next_role == pcmk_role_promoted) {
instance->sort_index = INFINITY;
}
pe_rsc_trace(clone, "Assigning %s priority = %d",
@@ -981,11 +997,11 @@ set_instance_priority(gpointer data, gpointer user_data)
static void
set_instance_role(gpointer data, gpointer user_data)
{
- pe_resource_t *instance = (pe_resource_t *) data;
+ pcmk_resource_t *instance = (pcmk_resource_t *) data;
int *count = (int *) user_data;
- const pe_resource_t *clone = pe__const_top_resource(instance, false);
- pe_node_t *chosen = NULL;
+ const pcmk_resource_t *clone = pe__const_top_resource(instance, false);
+ pcmk_node_t *chosen = NULL;
show_promotion_score(instance);
@@ -994,7 +1010,7 @@ set_instance_role(gpointer data, gpointer user_data)
instance->id);
} else if ((*count < pe__clone_promoted_max(instance))
- || !pcmk_is_set(clone->flags, pe_rsc_managed)) {
+ || !pcmk_is_set(clone->flags, pcmk_rsc_managed)) {
chosen = node_to_be_promoted_on(instance);
}
@@ -1003,9 +1019,9 @@ set_instance_role(gpointer data, gpointer user_data)
return;
}
- if ((instance->role < RSC_ROLE_PROMOTED)
- && !pcmk_is_set(instance->cluster->flags, pe_flag_have_quorum)
- && (instance->cluster->no_quorum_policy == no_quorum_freeze)) {
+ if ((instance->role < pcmk_role_promoted)
+ && !pcmk_is_set(instance->cluster->flags, pcmk_sched_quorate)
+ && (instance->cluster->no_quorum_policy == pcmk_no_quorum_freeze)) {
crm_notice("Clone instance %s cannot be promoted without quorum",
instance->id);
set_next_role_unpromoted(instance, NULL);
@@ -1027,13 +1043,13 @@ set_instance_role(gpointer data, gpointer user_data)
* \param[in,out] rsc Promotable clone resource to update
*/
void
-pcmk__set_instance_roles(pe_resource_t *rsc)
+pcmk__set_instance_roles(pcmk_resource_t *rsc)
{
int promoted = 0;
GHashTableIter iter;
- pe_node_t *node = NULL;
+ pcmk_node_t *node = NULL;
- // Repurpose count to track the number of promoted instances allocated
+ // Repurpose count to track the number of promoted instances assigned
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
node->count = 0;
@@ -1059,11 +1075,11 @@ pcmk__set_instance_roles(pe_resource_t *rsc)
* \param[out] any_demoting Will be set true if any instance is demoting
*/
static void
-create_promotable_instance_actions(pe_resource_t *clone,
+create_promotable_instance_actions(pcmk_resource_t *clone,
bool *any_promoting, bool *any_demoting)
{
for (GList *iter = clone->children; iter != NULL; iter = iter->next) {
- pe_resource_t *instance = (pe_resource_t *) iter->data;
+ pcmk_resource_t *instance = (pcmk_resource_t *) iter->data;
instance->cmds->create_actions(instance);
check_for_role_change(instance, any_demoting, any_promoting);
@@ -1081,10 +1097,10 @@ create_promotable_instance_actions(pe_resource_t *clone,
* \param[in,out] clone Promotable clone to reset
*/
static void
-reset_instance_priorities(pe_resource_t *clone)
+reset_instance_priorities(pcmk_resource_t *clone)
{
for (GList *iter = clone->children; iter != NULL; iter = iter->next) {
- pe_resource_t *instance = (pe_resource_t *) iter->data;
+ pcmk_resource_t *instance = (pcmk_resource_t *) iter->data;
instance->priority = clone->priority;
}
@@ -1097,7 +1113,7 @@ reset_instance_priorities(pe_resource_t *clone)
* \param[in,out] clone Promotable clone to create actions for
*/
void
-pcmk__create_promotable_actions(pe_resource_t *clone)
+pcmk__create_promotable_actions(pcmk_resource_t *clone)
{
bool any_promoting = false;
bool any_demoting = false;
@@ -1119,19 +1135,19 @@ pcmk__create_promotable_actions(pe_resource_t *clone)
* \param[in,out] clone Promotable clone instance to order
*/
void
-pcmk__order_promotable_instances(pe_resource_t *clone)
+pcmk__order_promotable_instances(pcmk_resource_t *clone)
{
- pe_resource_t *previous = NULL; // Needed for ordered clones
+ pcmk_resource_t *previous = NULL; // Needed for ordered clones
pcmk__promotable_restart_ordering(clone);
for (GList *iter = clone->children; iter != NULL; iter = iter->next) {
- pe_resource_t *instance = (pe_resource_t *) iter->data;
+ pcmk_resource_t *instance = (pcmk_resource_t *) iter->data;
// Demote before promote
- pcmk__order_resource_actions(instance, RSC_DEMOTE,
- instance, RSC_PROMOTE,
- pe_order_optional);
+ pcmk__order_resource_actions(instance, PCMK_ACTION_DEMOTE,
+ instance, PCMK_ACTION_PROMOTE,
+ pcmk__ar_ordered);
order_instance_promotion(clone, instance, previous);
order_instance_demotion(clone, instance, previous);
@@ -1144,29 +1160,26 @@ pcmk__order_promotable_instances(pe_resource_t *clone)
* \brief Update dependent's allowed nodes for colocation with promotable
*
* \param[in,out] dependent Dependent resource to update
+ * \param[in] primary Primary resource
* \param[in] primary_node Node where an instance of the primary will be
* \param[in] colocation Colocation constraint to apply
*/
static void
-update_dependent_allowed_nodes(pe_resource_t *dependent,
- const pe_node_t *primary_node,
+update_dependent_allowed_nodes(pcmk_resource_t *dependent,
+ const pcmk_resource_t *primary,
+ const pcmk_node_t *primary_node,
const pcmk__colocation_t *colocation)
{
GHashTableIter iter;
- pe_node_t *node = NULL;
+ pcmk_node_t *node = NULL;
const char *primary_value = NULL;
- const char *attr = NULL;
+ const char *attr = colocation->node_attribute;
if (colocation->score >= INFINITY) {
return; // Colocation is mandatory, so allowed node scores don't matter
}
- // Get value of primary's colocation node attribute
- attr = colocation->node_attribute;
- if (attr == NULL) {
- attr = CRM_ATTR_UNAME;
- }
- primary_value = pe_node_attribute_raw(primary_node, attr);
+ primary_value = pcmk__colocation_node_attr(primary_node, attr, primary);
pe_rsc_trace(colocation->primary,
"Applying %s (%s with %s on %s by %s @%d) to %s",
@@ -1176,7 +1189,8 @@ update_dependent_allowed_nodes(pe_resource_t *dependent,
g_hash_table_iter_init(&iter, dependent->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
- const char *dependent_value = pe_node_attribute_raw(node, attr);
+ const char *dependent_value = pcmk__colocation_node_attr(node, attr,
+ dependent);
if (pcmk__str_eq(primary_value, dependent_value, pcmk__str_casei)) {
node->weight = pcmk__add_scores(node->weight, colocation->score);
@@ -1197,8 +1211,8 @@ update_dependent_allowed_nodes(pe_resource_t *dependent,
* \param[in] colocation Colocation constraint to apply
*/
void
-pcmk__update_dependent_with_promotable(const pe_resource_t *primary,
- pe_resource_t *dependent,
+pcmk__update_dependent_with_promotable(const pcmk_resource_t *primary,
+ pcmk_resource_t *dependent,
const pcmk__colocation_t *colocation)
{
GList *affected_nodes = NULL;
@@ -1208,35 +1222,36 @@ pcmk__update_dependent_with_promotable(const pe_resource_t *primary,
* each one.
*/
for (GList *iter = primary->children; iter != NULL; iter = iter->next) {
- pe_resource_t *instance = (pe_resource_t *) iter->data;
- pe_node_t *node = instance->fns->location(instance, NULL, FALSE);
+ pcmk_resource_t *instance = (pcmk_resource_t *) iter->data;
+ pcmk_node_t *node = instance->fns->location(instance, NULL, FALSE);
if (node == NULL) {
continue;
}
if (instance->fns->state(instance, FALSE) == colocation->primary_role) {
- update_dependent_allowed_nodes(dependent, node, colocation);
+ update_dependent_allowed_nodes(dependent, primary, node,
+ colocation);
affected_nodes = g_list_prepend(affected_nodes, node);
}
}
- /* For mandatory colocations, add the primary's node weight to the
- * dependent's node weight for each affected node, and ban the dependent
+ /* For mandatory colocations, add the primary's node score to the
+ * dependent's node score for each affected node, and ban the dependent
* from all other nodes.
*
* However, skip this for promoted-with-promoted colocations, otherwise
* inactive dependent instances can't start (in the unpromoted role).
*/
if ((colocation->score >= INFINITY)
- && ((colocation->dependent_role != RSC_ROLE_PROMOTED)
- || (colocation->primary_role != RSC_ROLE_PROMOTED))) {
+ && ((colocation->dependent_role != pcmk_role_promoted)
+ || (colocation->primary_role != pcmk_role_promoted))) {
pe_rsc_trace(colocation->primary,
"Applying %s (mandatory %s with %s) to %s",
colocation->id, colocation->dependent->id,
colocation->primary->id, dependent->id);
- node_list_exclude(dependent->allowed_nodes, affected_nodes,
- TRUE);
+ pcmk__colocation_intersect_nodes(dependent, primary, colocation,
+ affected_nodes, true);
}
g_list_free(affected_nodes);
}
@@ -1250,11 +1265,11 @@ pcmk__update_dependent_with_promotable(const pe_resource_t *primary,
* \param[in] colocation Colocation constraint to apply
*/
void
-pcmk__update_promotable_dependent_priority(const pe_resource_t *primary,
- pe_resource_t *dependent,
+pcmk__update_promotable_dependent_priority(const pcmk_resource_t *primary,
+ pcmk_resource_t *dependent,
const pcmk__colocation_t *colocation)
{
- pe_resource_t *primary_instance = NULL;
+ pcmk_resource_t *primary_instance = NULL;
// Look for a primary instance where dependent will be
primary_instance = pcmk__find_compatible_instance(dependent, primary,
diff --git a/lib/pacemaker/pcmk_sched_recurring.c b/lib/pacemaker/pcmk_sched_recurring.c
index c1b929b..9210fab 100644
--- a/lib/pacemaker/pcmk_sched_recurring.c
+++ b/lib/pacemaker/pcmk_sched_recurring.c
@@ -12,6 +12,7 @@
#include <stdbool.h>
#include <crm/msg_xml.h>
+#include <crm/common/scheduler_internal.h>
#include <pacemaker-internal.h>
#include "libpacemaker_private.h"
@@ -24,7 +25,7 @@ struct op_history {
// Parsed information
char *key; // Operation key for action
- enum rsc_role_e role; // Action role (or RSC_ROLE_UNKNOWN for default)
+ enum rsc_role_e role; // Action role (or pcmk_role_unknown for default)
guint interval_ms; // Action interval
};
@@ -55,7 +56,7 @@ xe_interval(const xmlNode *xml)
* once in the operation history of \p rsc, otherwise false
*/
static bool
-is_op_dup(const pe_resource_t *rsc, const char *name, guint interval_ms)
+is_op_dup(const pcmk_resource_t *rsc, const char *name, guint interval_ms)
{
const char *id = NULL;
@@ -63,8 +64,7 @@ is_op_dup(const pe_resource_t *rsc, const char *name, guint interval_ms)
op != NULL; op = crm_next_same_xml(op)) {
// Check whether action name and interval match
- if (!pcmk__str_eq(crm_element_value(op, "name"),
- name, pcmk__str_none)
+ if (!pcmk__str_eq(crm_element_value(op, "name"), name, pcmk__str_none)
|| (xe_interval(op) != interval_ms)) {
continue;
}
@@ -104,9 +104,11 @@ is_op_dup(const pe_resource_t *rsc, const char *name, guint interval_ms)
static bool
op_cannot_recur(const char *name)
{
- return pcmk__str_any_of(name, RSC_STOP, RSC_START, RSC_DEMOTE, RSC_PROMOTE,
- CRMD_ACTION_RELOAD_AGENT, CRMD_ACTION_MIGRATE,
- CRMD_ACTION_MIGRATED, NULL);
+ return pcmk__str_any_of(name, PCMK_ACTION_STOP, PCMK_ACTION_START,
+ PCMK_ACTION_DEMOTE, PCMK_ACTION_PROMOTE,
+ PCMK_ACTION_RELOAD_AGENT,
+ PCMK_ACTION_MIGRATE_TO, PCMK_ACTION_MIGRATE_FROM,
+ NULL);
}
/*!
@@ -120,7 +122,7 @@ op_cannot_recur(const char *name)
* \return true if \p xml is for a recurring action, otherwise false
*/
static bool
-is_recurring_history(const pe_resource_t *rsc, const xmlNode *xml,
+is_recurring_history(const pcmk_resource_t *rsc, const xmlNode *xml,
struct op_history *op)
{
const char *role = NULL;
@@ -151,24 +153,28 @@ is_recurring_history(const pe_resource_t *rsc, const xmlNode *xml,
// Ensure role is valid if specified
role = crm_element_value(xml, "role");
if (role == NULL) {
- op->role = RSC_ROLE_UNKNOWN;
+ op->role = pcmk_role_unknown;
} else {
op->role = text2role(role);
- if (op->role == RSC_ROLE_UNKNOWN) {
+ if (op->role == pcmk_role_unknown) {
pcmk__config_err("Ignoring %s because %s is not a valid role",
op->id, role);
+ return false;
}
}
- // Disabled resources don't get monitored
- op->key = pcmk__op_key(rsc->id, op->name, op->interval_ms);
- if (find_rsc_op_entry(rsc, op->key) == NULL) {
- crm_trace("Not creating recurring action %s for disabled resource %s",
- op->id, rsc->id);
- free(op->key);
+ // Only actions that are still configured and enabled matter
+ if (pcmk__find_action_config(rsc, op->name, op->interval_ms,
+ false) == NULL) {
+ pe_rsc_trace(rsc,
+ "Ignoring %s (%s-interval %s for %s) because it is "
+ "disabled or no longer in configuration",
+ op->id, pcmk__readable_interval(op->interval_ms), op->name,
+ rsc->id);
return false;
}
+ op->key = pcmk__op_key(rsc->id, op->name, op->interval_ms);
return true;
}
@@ -184,9 +190,9 @@ is_recurring_history(const pe_resource_t *rsc, const xmlNode *xml,
* \return true if recurring action should be optional, otherwise false
*/
static bool
-active_recurring_should_be_optional(const pe_resource_t *rsc,
- const pe_node_t *node, const char *key,
- pe_action_t *start)
+active_recurring_should_be_optional(const pcmk_resource_t *rsc,
+ const pcmk_node_t *node, const char *key,
+ pcmk_action_t *start)
{
GList *possible_matches = NULL;
@@ -197,7 +203,7 @@ active_recurring_should_be_optional(const pe_resource_t *rsc,
}
if (!pcmk_is_set(rsc->cmds->action_flags(start, NULL),
- pe_action_optional)) {
+ pcmk_action_optional)) {
pe_rsc_trace(rsc, "%s will be mandatory because %s is",
key, start->uuid);
return false;
@@ -213,9 +219,9 @@ active_recurring_should_be_optional(const pe_resource_t *rsc,
for (const GList *iter = possible_matches;
iter != NULL; iter = iter->next) {
- const pe_action_t *op = (const pe_action_t *) iter->data;
+ const pcmk_action_t *op = (const pcmk_action_t *) iter->data;
- if (pcmk_is_set(op->flags, pe_action_reschedule)) {
+ if (pcmk_is_set(op->flags, pcmk_action_reschedule)) {
pe_rsc_trace(rsc,
"%s will be mandatory because "
"it needs to be rescheduled", key);
@@ -238,43 +244,43 @@ active_recurring_should_be_optional(const pe_resource_t *rsc,
* \param[in] op Resource history entry
*/
static void
-recurring_op_for_active(pe_resource_t *rsc, pe_action_t *start,
- const pe_node_t *node, const struct op_history *op)
+recurring_op_for_active(pcmk_resource_t *rsc, pcmk_action_t *start,
+ const pcmk_node_t *node, const struct op_history *op)
{
- pe_action_t *mon = NULL;
+ pcmk_action_t *mon = NULL;
bool is_optional = true;
+ const bool is_default_role = (op->role == pcmk_role_unknown);
// We're only interested in recurring actions for active roles
- if (op->role == RSC_ROLE_STOPPED) {
+ if (op->role == pcmk_role_stopped) {
return;
}
is_optional = active_recurring_should_be_optional(rsc, node, op->key,
start);
- if (((op->role != RSC_ROLE_UNKNOWN) && (rsc->next_role != op->role))
- || ((op->role == RSC_ROLE_UNKNOWN)
- && (rsc->next_role == RSC_ROLE_PROMOTED))) {
+ if ((!is_default_role && (rsc->next_role != op->role))
+ || (is_default_role && (rsc->next_role == pcmk_role_promoted))) {
// Configured monitor role doesn't match role resource will have
if (is_optional) { // It's running, so cancel it
char *after_key = NULL;
- pe_action_t *cancel_op = pcmk__new_cancel_action(rsc, op->name,
- op->interval_ms,
- node);
+ pcmk_action_t *cancel_op = pcmk__new_cancel_action(rsc, op->name,
+ op->interval_ms,
+ node);
switch (rsc->role) {
- case RSC_ROLE_UNPROMOTED:
- case RSC_ROLE_STARTED:
- if (rsc->next_role == RSC_ROLE_PROMOTED) {
+ case pcmk_role_unpromoted:
+ case pcmk_role_started:
+ if (rsc->next_role == pcmk_role_promoted) {
after_key = promote_key(rsc);
- } else if (rsc->next_role == RSC_ROLE_STOPPED) {
+ } else if (rsc->next_role == pcmk_role_stopped) {
after_key = stop_key(rsc);
}
break;
- case RSC_ROLE_PROMOTED:
+ case pcmk_role_promoted:
after_key = demote_key(rsc);
break;
default:
@@ -283,7 +289,8 @@ recurring_op_for_active(pe_resource_t *rsc, pe_action_t *start,
if (after_key) {
pcmk__new_ordering(rsc, NULL, cancel_op, rsc, after_key, NULL,
- pe_order_runnable_left, rsc->cluster);
+ pcmk__ar_unrunnable_first_blocks,
+ rsc->cluster);
}
}
@@ -291,7 +298,7 @@ recurring_op_for_active(pe_resource_t *rsc, pe_action_t *start,
"%s recurring action %s because %s configured for %s role "
"(not %s)",
(is_optional? "Cancelling" : "Ignoring"), op->key, op->id,
- role2text((op->role == RSC_ROLE_UNKNOWN)? RSC_ROLE_UNPROMOTED : op->role),
+ role2text(is_default_role? pcmk_role_unpromoted : op->role),
role2text(rsc->next_role));
return;
}
@@ -302,51 +309,55 @@ recurring_op_for_active(pe_resource_t *rsc, pe_action_t *start,
op->id, rsc->id, role2text(rsc->next_role),
pe__node_name(node));
- mon = custom_action(rsc, strdup(op->key), op->name, node, is_optional, TRUE,
+ mon = custom_action(rsc, strdup(op->key), op->name, node, is_optional,
rsc->cluster);
- if (!pcmk_is_set(start->flags, pe_action_runnable)) {
+ if (!pcmk_is_set(start->flags, pcmk_action_runnable)) {
pe_rsc_trace(rsc, "%s is unrunnable because start is", mon->uuid);
- pe__clear_action_flags(mon, pe_action_runnable);
+ pe__clear_action_flags(mon, pcmk_action_runnable);
} else if ((node == NULL) || !node->details->online
|| node->details->unclean) {
pe_rsc_trace(rsc, "%s is unrunnable because no node is available",
mon->uuid);
- pe__clear_action_flags(mon, pe_action_runnable);
+ pe__clear_action_flags(mon, pcmk_action_runnable);
- } else if (!pcmk_is_set(mon->flags, pe_action_optional)) {
+ } else if (!pcmk_is_set(mon->flags, pcmk_action_optional)) {
pe_rsc_info(rsc, "Start %s-interval %s for %s on %s",
pcmk__readable_interval(op->interval_ms), mon->task,
rsc->id, pe__node_name(node));
}
- if (rsc->next_role == RSC_ROLE_PROMOTED) {
+ if (rsc->next_role == pcmk_role_promoted) {
pe__add_action_expected_result(mon, CRM_EX_PROMOTED);
}
// Order monitor relative to other actions
- if ((node == NULL) || pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ if ((node == NULL) || pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
pcmk__new_ordering(rsc, start_key(rsc), NULL,
NULL, strdup(mon->uuid), mon,
- pe_order_implies_then|pe_order_runnable_left,
+ pcmk__ar_first_implies_then
+ |pcmk__ar_unrunnable_first_blocks,
rsc->cluster);
pcmk__new_ordering(rsc, reload_key(rsc), NULL,
NULL, strdup(mon->uuid), mon,
- pe_order_implies_then|pe_order_runnable_left,
+ pcmk__ar_first_implies_then
+ |pcmk__ar_unrunnable_first_blocks,
rsc->cluster);
- if (rsc->next_role == RSC_ROLE_PROMOTED) {
+ if (rsc->next_role == pcmk_role_promoted) {
pcmk__new_ordering(rsc, promote_key(rsc), NULL,
rsc, NULL, mon,
- pe_order_optional|pe_order_runnable_left,
+ pcmk__ar_ordered
+ |pcmk__ar_unrunnable_first_blocks,
rsc->cluster);
- } else if (rsc->role == RSC_ROLE_PROMOTED) {
+ } else if (rsc->role == pcmk_role_promoted) {
pcmk__new_ordering(rsc, demote_key(rsc), NULL,
rsc, NULL, mon,
- pe_order_optional|pe_order_runnable_left,
+ pcmk__ar_ordered
+ |pcmk__ar_unrunnable_first_blocks,
rsc->cluster);
}
}
@@ -363,11 +374,11 @@ recurring_op_for_active(pe_resource_t *rsc, pe_action_t *start,
* \param[in] interval_ms Action interval (in milliseconds)
*/
static void
-cancel_if_running(pe_resource_t *rsc, const pe_node_t *node, const char *key,
- const char *name, guint interval_ms)
+cancel_if_running(pcmk_resource_t *rsc, const pcmk_node_t *node,
+ const char *key, const char *name, guint interval_ms)
{
GList *possible_matches = find_actions_exact(rsc->actions, key, node);
- pe_action_t *cancel_op = NULL;
+ pcmk_action_t *cancel_op = NULL;
if (possible_matches == NULL) {
return; // Recurring action isn't running on this node
@@ -377,8 +388,8 @@ cancel_if_running(pe_resource_t *rsc, const pe_node_t *node, const char *key,
cancel_op = pcmk__new_cancel_action(rsc, name, interval_ms, node);
switch (rsc->next_role) {
- case RSC_ROLE_STARTED:
- case RSC_ROLE_UNPROMOTED:
+ case pcmk_role_started:
+ case pcmk_role_unpromoted:
/* Order starts after cancel. If the current role is
* stopped, this cancels the monitor before the resource
* starts; if the current role is started, then this cancels
@@ -386,14 +397,14 @@ cancel_if_running(pe_resource_t *rsc, const pe_node_t *node, const char *key,
*/
pcmk__new_ordering(rsc, NULL, cancel_op,
rsc, start_key(rsc), NULL,
- pe_order_runnable_left, rsc->cluster);
+ pcmk__ar_unrunnable_first_blocks, rsc->cluster);
break;
default:
break;
}
pe_rsc_info(rsc,
"Cancelling %s-interval %s action for %s on %s because "
- "configured for " RSC_ROLE_STOPPED_S " role (not %s)",
+ "configured for " PCMK__ROLE_STOPPED " role (not %s)",
pcmk__readable_interval(interval_ms), name, rsc->id,
pe__node_name(node), role2text(rsc->next_role));
}
@@ -407,14 +418,14 @@ cancel_if_running(pe_resource_t *rsc, const pe_node_t *node, const char *key,
* \param[in,out] action Action to order after probes of \p rsc on \p node
*/
static void
-order_after_probes(pe_resource_t *rsc, const pe_node_t *node,
- pe_action_t *action)
+order_after_probes(pcmk_resource_t *rsc, const pcmk_node_t *node,
+ pcmk_action_t *action)
{
- GList *probes = pe__resource_actions(rsc, node, RSC_STATUS, FALSE);
+ GList *probes = pe__resource_actions(rsc, node, PCMK_ACTION_MONITOR, FALSE);
for (GList *iter = probes; iter != NULL; iter = iter->next) {
- order_actions((pe_action_t *) iter->data, action,
- pe_order_runnable_left);
+ order_actions((pcmk_action_t *) iter->data, action,
+ pcmk__ar_unrunnable_first_blocks);
}
g_list_free(probes);
}
@@ -428,32 +439,33 @@ order_after_probes(pe_resource_t *rsc, const pe_node_t *node,
* \param[in,out] action Action to order after stops of \p rsc on \p node
*/
static void
-order_after_stops(pe_resource_t *rsc, const pe_node_t *node,
- pe_action_t *action)
+order_after_stops(pcmk_resource_t *rsc, const pcmk_node_t *node,
+ pcmk_action_t *action)
{
- GList *stop_ops = pe__resource_actions(rsc, node, RSC_STOP, TRUE);
+ GList *stop_ops = pe__resource_actions(rsc, node, PCMK_ACTION_STOP, TRUE);
for (GList *iter = stop_ops; iter != NULL; iter = iter->next) {
- pe_action_t *stop = (pe_action_t *) iter->data;
+ pcmk_action_t *stop = (pcmk_action_t *) iter->data;
- if (!pcmk_is_set(stop->flags, pe_action_optional)
- && !pcmk_is_set(action->flags, pe_action_optional)
- && !pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ if (!pcmk_is_set(stop->flags, pcmk_action_optional)
+ && !pcmk_is_set(action->flags, pcmk_action_optional)
+ && !pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
pe_rsc_trace(rsc, "%s optional on %s: unmanaged",
action->uuid, pe__node_name(node));
- pe__set_action_flags(action, pe_action_optional);
+ pe__set_action_flags(action, pcmk_action_optional);
}
- if (!pcmk_is_set(stop->flags, pe_action_runnable)) {
+ if (!pcmk_is_set(stop->flags, pcmk_action_runnable)) {
crm_debug("%s unrunnable on %s: stop is unrunnable",
action->uuid, pe__node_name(node));
- pe__clear_action_flags(action, pe_action_runnable);
+ pe__clear_action_flags(action, pcmk_action_runnable);
}
- if (pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
pcmk__new_ordering(rsc, stop_key(rsc), stop,
NULL, NULL, action,
- pe_order_implies_then|pe_order_runnable_left,
+ pcmk__ar_first_implies_then
+ |pcmk__ar_unrunnable_first_blocks,
rsc->cluster);
}
}
@@ -469,18 +481,18 @@ order_after_stops(pe_resource_t *rsc, const pe_node_t *node,
* \param[in] op Resource history entry
*/
static void
-recurring_op_for_inactive(pe_resource_t *rsc, const pe_node_t *node,
+recurring_op_for_inactive(pcmk_resource_t *rsc, const pcmk_node_t *node,
const struct op_history *op)
{
GList *possible_matches = NULL;
// We're only interested in recurring actions for the inactive role
- if (op->role != RSC_ROLE_STOPPED) {
+ if (op->role != pcmk_role_stopped) {
return;
}
- if (!pcmk_is_set(rsc->flags, pe_rsc_unique)) {
- crm_notice("Ignoring %s (recurring monitors for " RSC_ROLE_STOPPED_S
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_unique)) {
+ crm_notice("Ignoring %s (recurring monitors for " PCMK__ROLE_STOPPED
" role are not supported for anonymous clones)", op->id);
return; // @TODO add support
}
@@ -489,10 +501,10 @@ recurring_op_for_inactive(pe_resource_t *rsc, const pe_node_t *node,
"where it should not be running", op->id, rsc->id);
for (GList *iter = rsc->cluster->nodes; iter != NULL; iter = iter->next) {
- pe_node_t *stop_node = (pe_node_t *) iter->data;
+ pcmk_node_t *stop_node = (pcmk_node_t *) iter->data;
bool is_optional = true;
- pe_action_t *stopped_mon = NULL;
+ pcmk_action_t *stopped_mon = NULL;
// Cancel action on node where resource will be active
if ((node != NULL)
@@ -509,16 +521,16 @@ recurring_op_for_inactive(pe_resource_t *rsc, const pe_node_t *node,
pe_rsc_trace(rsc,
"Creating %s recurring action %s for %s (%s "
- RSC_ROLE_STOPPED_S " on %s)",
+ PCMK__ROLE_STOPPED " on %s)",
(is_optional? "optional" : "mandatory"),
op->key, op->id, rsc->id, pe__node_name(stop_node));
stopped_mon = custom_action(rsc, strdup(op->key), op->name, stop_node,
- is_optional, TRUE, rsc->cluster);
+ is_optional, rsc->cluster);
pe__add_action_expected_result(stopped_mon, CRM_EX_NOT_RUNNING);
- if (pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
order_after_probes(rsc, stop_node, stopped_mon);
}
@@ -530,13 +542,13 @@ recurring_op_for_inactive(pe_resource_t *rsc, const pe_node_t *node,
if (!stop_node->details->online || stop_node->details->unclean) {
pe_rsc_debug(rsc, "%s unrunnable on %s: node unavailable)",
stopped_mon->uuid, pe__node_name(stop_node));
- pe__clear_action_flags(stopped_mon, pe_action_runnable);
+ pe__clear_action_flags(stopped_mon, pcmk_action_runnable);
}
- if (pcmk_is_set(stopped_mon->flags, pe_action_runnable)
- && !pcmk_is_set(stopped_mon->flags, pe_action_optional)) {
+ if (pcmk_is_set(stopped_mon->flags, pcmk_action_runnable)
+ && !pcmk_is_set(stopped_mon->flags, pcmk_action_optional)) {
crm_notice("Start recurring %s-interval %s for "
- RSC_ROLE_STOPPED_S " %s on %s",
+ PCMK__ROLE_STOPPED " %s on %s",
pcmk__readable_interval(op->interval_ms),
stopped_mon->task, rsc->id, pe__node_name(stop_node));
}
@@ -550,17 +562,17 @@ recurring_op_for_inactive(pe_resource_t *rsc, const pe_node_t *node,
* \param[in,out] rsc Resource to create recurring actions for
*/
void
-pcmk__create_recurring_actions(pe_resource_t *rsc)
+pcmk__create_recurring_actions(pcmk_resource_t *rsc)
{
- pe_action_t *start = NULL;
+ pcmk_action_t *start = NULL;
- if (pcmk_is_set(rsc->flags, pe_rsc_block)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_blocked)) {
pe_rsc_trace(rsc, "Skipping recurring actions for blocked resource %s",
rsc->id);
return;
}
- if (pcmk_is_set(rsc->flags, pe_rsc_maintenance)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_maintenance)) {
pe_rsc_trace(rsc, "Skipping recurring actions for %s "
"in maintenance mode", rsc->id);
return;
@@ -575,8 +587,8 @@ pcmk__create_recurring_actions(pe_resource_t *rsc)
"in maintenance mode",
rsc->id, pe__node_name(rsc->allocated_to));
- } else if ((rsc->next_role != RSC_ROLE_STOPPED)
- || !pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ } else if ((rsc->next_role != pcmk_role_stopped)
+ || !pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
// Recurring actions for active roles needed
start = start_action(rsc, rsc->allocated_to, TRUE);
}
@@ -612,11 +624,11 @@ pcmk__create_recurring_actions(pe_resource_t *rsc)
*
* \return Created op
*/
-pe_action_t *
-pcmk__new_cancel_action(pe_resource_t *rsc, const char *task, guint interval_ms,
- const pe_node_t *node)
+pcmk_action_t *
+pcmk__new_cancel_action(pcmk_resource_t *rsc, const char *task,
+ guint interval_ms, const pcmk_node_t *node)
{
- pe_action_t *cancel_op = NULL;
+ pcmk_action_t *cancel_op = NULL;
char *key = NULL;
char *interval_ms_s = NULL;
@@ -625,10 +637,10 @@ pcmk__new_cancel_action(pe_resource_t *rsc, const char *task, guint interval_ms,
// @TODO dangerous if possible to schedule another action with this key
key = pcmk__op_key(rsc->id, task, interval_ms);
- cancel_op = custom_action(rsc, key, RSC_CANCEL, node, FALSE, TRUE,
+ cancel_op = custom_action(rsc, key, PCMK_ACTION_CANCEL, node, FALSE,
rsc->cluster);
- pcmk__str_update(&cancel_op->task, RSC_CANCEL);
+ pcmk__str_update(&cancel_op->task, PCMK_ACTION_CANCEL);
pcmk__str_update(&cancel_op->cancel_task, task);
interval_ms_s = crm_strdup_printf("%u", interval_ms);
@@ -648,14 +660,14 @@ pcmk__new_cancel_action(pe_resource_t *rsc, const char *task, guint interval_ms,
* \param[in] task Action name
* \param[in] interval_ms Action interval
* \param[in] node Node that history entry is for
- * \param[in] reason Short description of why action is being cancelled
+ * \param[in] reason Short description of why action is cancelled
*/
void
-pcmk__schedule_cancel(pe_resource_t *rsc, const char *call_id, const char *task,
- guint interval_ms, const pe_node_t *node,
- const char *reason)
+pcmk__schedule_cancel(pcmk_resource_t *rsc, const char *call_id,
+ const char *task, guint interval_ms,
+ const pcmk_node_t *node, const char *reason)
{
- pe_action_t *cancel = NULL;
+ pcmk_action_t *cancel = NULL;
CRM_CHECK((rsc != NULL) && (task != NULL)
&& (node != NULL) && (reason != NULL),
@@ -669,12 +681,12 @@ pcmk__schedule_cancel(pe_resource_t *rsc, const char *call_id, const char *task,
// Cancellations happen after stops
pcmk__new_ordering(rsc, stop_key(rsc), NULL, rsc, NULL, cancel,
- pe_order_optional, rsc->cluster);
+ pcmk__ar_ordered, rsc->cluster);
}
/*!
* \internal
- * \brief Reschedule a recurring action
+ * \brief Create a recurring action marked as needing rescheduling if active
*
* \param[in,out] rsc Resource that action is for
* \param[in] task Name of action being rescheduled
@@ -682,16 +694,16 @@ pcmk__schedule_cancel(pe_resource_t *rsc, const char *call_id, const char *task,
* \param[in,out] node Node where action should be rescheduled
*/
void
-pcmk__reschedule_recurring(pe_resource_t *rsc, const char *task,
- guint interval_ms, pe_node_t *node)
+pcmk__reschedule_recurring(pcmk_resource_t *rsc, const char *task,
+ guint interval_ms, pcmk_node_t *node)
{
- pe_action_t *op = NULL;
+ pcmk_action_t *op = NULL;
trigger_unfencing(rsc, node, "Device parameters changed (reschedule)",
NULL, rsc->cluster);
op = custom_action(rsc, pcmk__op_key(rsc->id, task, interval_ms),
- task, node, TRUE, TRUE, rsc->cluster);
- pe__set_action_flags(op, pe_action_reschedule);
+ task, node, TRUE, rsc->cluster);
+ pe__set_action_flags(op, pcmk_action_reschedule);
}
/*!
@@ -703,7 +715,7 @@ pcmk__reschedule_recurring(pe_resource_t *rsc, const char *task,
* \return true if \p action has a nonzero interval, otherwise false
*/
bool
-pcmk__action_is_recurring(const pe_action_t *action)
+pcmk__action_is_recurring(const pcmk_action_t *action)
{
guint interval_ms = 0;
diff --git a/lib/pacemaker/pcmk_sched_remote.c b/lib/pacemaker/pcmk_sched_remote.c
index 6adb5d4..c915389 100644
--- a/lib/pacemaker/pcmk_sched_remote.c
+++ b/lib/pacemaker/pcmk_sched_remote.c
@@ -50,42 +50,44 @@ state2text(enum remote_connection_state state)
return "impossible";
}
-/* We always use pe_order_preserve with these convenience functions to exempt
- * internally generated constraints from the prohibition of user constraints
- * involving remote connection resources.
+/* We always use pcmk__ar_guest_allowed with these convenience functions to
+ * exempt internally generated constraints from the prohibition of user
+ * constraints involving remote connection resources.
*
- * The start ordering additionally uses pe_order_runnable_left so that the
- * specified action is not runnable if the start is not runnable.
+ * The start ordering additionally uses pcmk__ar_unrunnable_first_blocks so that
+ * the specified action is not runnable if the start is not runnable.
*/
static inline void
-order_start_then_action(pe_resource_t *first_rsc, pe_action_t *then_action,
- uint32_t extra, pe_working_set_t *data_set)
+order_start_then_action(pcmk_resource_t *first_rsc, pcmk_action_t *then_action,
+ uint32_t extra)
{
- if ((first_rsc != NULL) && (then_action != NULL) && (data_set != NULL)) {
+ if ((first_rsc != NULL) && (then_action != NULL)) {
pcmk__new_ordering(first_rsc, start_key(first_rsc), NULL,
then_action->rsc, NULL, then_action,
- pe_order_preserve|pe_order_runnable_left|extra,
- data_set);
+ pcmk__ar_guest_allowed
+ |pcmk__ar_unrunnable_first_blocks
+ |extra,
+ first_rsc->cluster);
}
}
static inline void
-order_action_then_stop(pe_action_t *first_action, pe_resource_t *then_rsc,
- uint32_t extra, pe_working_set_t *data_set)
+order_action_then_stop(pcmk_action_t *first_action, pcmk_resource_t *then_rsc,
+ uint32_t extra)
{
- if ((first_action != NULL) && (then_rsc != NULL) && (data_set != NULL)) {
+ if ((first_action != NULL) && (then_rsc != NULL)) {
pcmk__new_ordering(first_action->rsc, NULL, first_action,
then_rsc, stop_key(then_rsc), NULL,
- pe_order_preserve|extra, data_set);
+ pcmk__ar_guest_allowed|extra, then_rsc->cluster);
}
}
static enum remote_connection_state
-get_remote_node_state(const pe_node_t *node)
+get_remote_node_state(const pcmk_node_t *node)
{
- const pe_resource_t *remote_rsc = NULL;
- const pe_node_t *cluster_node = NULL;
+ const pcmk_resource_t *remote_rsc = NULL;
+ const pcmk_node_t *cluster_node = NULL;
CRM_ASSERT(node != NULL);
@@ -98,7 +100,7 @@ get_remote_node_state(const pe_node_t *node)
* is unclean or went offline, we can't process any operations
* on that remote node until after it starts elsewhere.
*/
- if ((remote_rsc->next_role == RSC_ROLE_STOPPED)
+ if ((remote_rsc->next_role == pcmk_role_stopped)
|| (remote_rsc->allocated_to == NULL)) {
// The connection resource is not going to run anywhere
@@ -110,14 +112,14 @@ get_remote_node_state(const pe_node_t *node)
return remote_state_failed;
}
- if (!pcmk_is_set(remote_rsc->flags, pe_rsc_failed)) {
+ if (!pcmk_is_set(remote_rsc->flags, pcmk_rsc_failed)) {
/* Connection resource is cleanly stopped */
return remote_state_stopped;
}
/* Connection resource is failed */
- if ((remote_rsc->next_role == RSC_ROLE_STOPPED)
+ if ((remote_rsc->next_role == pcmk_role_stopped)
&& remote_rsc->remote_reconnect_ms
&& node->details->remote_was_fenced
&& !pe__shutdown_requested(node)) {
@@ -164,13 +166,13 @@ get_remote_node_state(const pe_node_t *node)
* \param[in,out] action An action scheduled on a Pacemaker Remote node
*/
static void
-apply_remote_ordering(pe_action_t *action)
+apply_remote_ordering(pcmk_action_t *action)
{
- pe_resource_t *remote_rsc = NULL;
+ pcmk_resource_t *remote_rsc = NULL;
enum action_tasks task = text2task(action->task);
enum remote_connection_state state = get_remote_node_state(action->node);
- uint32_t order_opts = pe_order_none;
+ uint32_t order_opts = pcmk__ar_none;
if (action->rsc == NULL) {
return;
@@ -183,37 +185,35 @@ apply_remote_ordering(pe_action_t *action)
crm_trace("Order %s action %s relative to %s%s (state: %s)",
action->task, action->uuid,
- pcmk_is_set(remote_rsc->flags, pe_rsc_failed)? "failed " : "",
+ pcmk_is_set(remote_rsc->flags, pcmk_rsc_failed)? "failed " : "",
remote_rsc->id, state2text(state));
- if (pcmk__strcase_any_of(action->task, CRMD_ACTION_MIGRATE,
- CRMD_ACTION_MIGRATED, NULL)) {
- /* Migration ops map to "no_action", but we need to apply the same
- * ordering as for stop or demote (see get_router_node()).
+ if (pcmk__strcase_any_of(action->task, PCMK_ACTION_MIGRATE_TO,
+ PCMK_ACTION_MIGRATE_FROM, NULL)) {
+ /* Migration ops map to pcmk_action_unspecified, but we need to apply
+ * the same ordering as for stop or demote (see get_router_node()).
*/
- task = stop_rsc;
+ task = pcmk_action_stop;
}
switch (task) {
- case start_rsc:
- case action_promote:
- order_opts = pe_order_none;
+ case pcmk_action_start:
+ case pcmk_action_promote:
+ order_opts = pcmk__ar_none;
if (state == remote_state_failed) {
/* Force recovery, by making this action required */
- pe__set_order_flags(order_opts, pe_order_implies_then);
+ pe__set_order_flags(order_opts, pcmk__ar_first_implies_then);
}
/* Ensure connection is up before running this action */
- order_start_then_action(remote_rsc, action, order_opts,
- remote_rsc->cluster);
+ order_start_then_action(remote_rsc, action, order_opts);
break;
- case stop_rsc:
+ case pcmk_action_stop:
if (state == remote_state_alive) {
order_action_then_stop(action, remote_rsc,
- pe_order_implies_first,
- remote_rsc->cluster);
+ pcmk__ar_then_implies_first);
} else if (state == remote_state_failed) {
/* The resource is active on the node, but since we don't have a
@@ -223,28 +223,27 @@ apply_remote_ordering(pe_action_t *action)
* by the fencing.
*/
pe_fence_node(remote_rsc->cluster, action->node,
- "resources are active but connection is unrecoverable",
+ "resources are active but "
+ "connection is unrecoverable",
FALSE);
- } else if (remote_rsc->next_role == RSC_ROLE_STOPPED) {
+ } else if (remote_rsc->next_role == pcmk_role_stopped) {
/* State must be remote_state_unknown or remote_state_stopped.
* Since the connection is not coming back up in this
* transition, stop this resource first.
*/
order_action_then_stop(action, remote_rsc,
- pe_order_implies_first,
- remote_rsc->cluster);
+ pcmk__ar_then_implies_first);
} else {
/* The connection is going to be started somewhere else, so
* stop this resource after that completes.
*/
- order_start_then_action(remote_rsc, action, pe_order_none,
- remote_rsc->cluster);
+ order_start_then_action(remote_rsc, action, pcmk__ar_none);
}
break;
- case action_demote:
+ case pcmk_action_demote:
/* Only order this demote relative to the connection start if the
* connection isn't being torn down. Otherwise, the demote would be
* blocked because the connection start would not be allowed.
@@ -252,8 +251,7 @@ apply_remote_ordering(pe_action_t *action)
if ((state == remote_state_resting)
|| (state == remote_state_unknown)) {
- order_start_then_action(remote_rsc, action, pe_order_none,
- remote_rsc->cluster);
+ order_start_then_action(remote_rsc, action, pcmk__ar_none);
} /* Otherwise we can rely on the stop ordering */
break;
@@ -265,13 +263,12 @@ apply_remote_ordering(pe_action_t *action)
* the connection was re-established
*/
order_start_then_action(remote_rsc, action,
- pe_order_implies_then,
- remote_rsc->cluster);
+ pcmk__ar_first_implies_then);
} else {
- pe_node_t *cluster_node = pe__current_node(remote_rsc);
+ pcmk_node_t *cluster_node = pe__current_node(remote_rsc);
- if ((task == monitor_rsc) && (state == remote_state_failed)) {
+ if ((task == pcmk_action_monitor) && (state == remote_state_failed)) {
/* We would only be here if we do not know the state of the
* resource on the remote node. Since we have no way to find
* out, it is necessary to fence the node.
@@ -287,12 +284,10 @@ apply_remote_ordering(pe_action_t *action)
* stopped _before_ we let the connection get closed.
*/
order_action_then_stop(action, remote_rsc,
- pe_order_runnable_left,
- remote_rsc->cluster);
+ pcmk__ar_unrunnable_first_blocks);
} else {
- order_start_then_action(remote_rsc, action, pe_order_none,
- remote_rsc->cluster);
+ order_start_then_action(remote_rsc, action, pcmk__ar_none);
}
}
break;
@@ -300,7 +295,7 @@ apply_remote_ordering(pe_action_t *action)
}
static void
-apply_container_ordering(pe_action_t *action, pe_working_set_t *data_set)
+apply_container_ordering(pcmk_action_t *action)
{
/* VMs are also classified as containers for these purposes... in
* that they both involve a 'thing' running on a real or remote
@@ -309,8 +304,8 @@ apply_container_ordering(pe_action_t *action, pe_working_set_t *data_set)
* This allows us to be smarter about the type and extent of
* recovery actions required in various scenarios
*/
- pe_resource_t *remote_rsc = NULL;
- pe_resource_t *container = NULL;
+ pcmk_resource_t *remote_rsc = NULL;
+ pcmk_resource_t *container = NULL;
enum action_tasks task = text2task(action->task);
CRM_ASSERT(action->rsc != NULL);
@@ -323,40 +318,40 @@ apply_container_ordering(pe_action_t *action, pe_working_set_t *data_set)
container = remote_rsc->container;
CRM_ASSERT(container != NULL);
- if (pcmk_is_set(container->flags, pe_rsc_failed)) {
- pe_fence_node(data_set, action->node, "container failed", FALSE);
+ if (pcmk_is_set(container->flags, pcmk_rsc_failed)) {
+ pe_fence_node(action->rsc->cluster, action->node, "container failed",
+ FALSE);
}
crm_trace("Order %s action %s relative to %s%s for %s%s",
action->task, action->uuid,
- pcmk_is_set(remote_rsc->flags, pe_rsc_failed)? "failed " : "",
+ pcmk_is_set(remote_rsc->flags, pcmk_rsc_failed)? "failed " : "",
remote_rsc->id,
- pcmk_is_set(container->flags, pe_rsc_failed)? "failed " : "",
+ pcmk_is_set(container->flags, pcmk_rsc_failed)? "failed " : "",
container->id);
- if (pcmk__strcase_any_of(action->task, CRMD_ACTION_MIGRATE,
- CRMD_ACTION_MIGRATED, NULL)) {
- /* Migration ops map to "no_action", but we need to apply the same
- * ordering as for stop or demote (see get_router_node()).
+ if (pcmk__strcase_any_of(action->task, PCMK_ACTION_MIGRATE_TO,
+ PCMK_ACTION_MIGRATE_FROM, NULL)) {
+ /* Migration ops map to pcmk_action_unspecified, but we need to apply
+ * the same ordering as for stop or demote (see get_router_node()).
*/
- task = stop_rsc;
+ task = pcmk_action_stop;
}
switch (task) {
- case start_rsc:
- case action_promote:
+ case pcmk_action_start:
+ case pcmk_action_promote:
// Force resource recovery if the container is recovered
- order_start_then_action(container, action, pe_order_implies_then,
- data_set);
+ order_start_then_action(container, action,
+ pcmk__ar_first_implies_then);
// Wait for the connection resource to be up, too
- order_start_then_action(remote_rsc, action, pe_order_none,
- data_set);
+ order_start_then_action(remote_rsc, action, pcmk__ar_none);
break;
- case stop_rsc:
- case action_demote:
- if (pcmk_is_set(container->flags, pe_rsc_failed)) {
+ case pcmk_action_stop:
+ case pcmk_action_demote:
+ if (pcmk_is_set(container->flags, pcmk_rsc_failed)) {
/* When the container representing a guest node fails, any stop
* or demote actions for resources running on the guest node
* are implied by the container stopping. This is similar to
@@ -372,8 +367,7 @@ apply_container_ordering(pe_action_t *action, pe_working_set_t *data_set)
* stopped (otherwise we re-introduce an ordering loop when the
* connection is restarting).
*/
- order_action_then_stop(action, remote_rsc, pe_order_none,
- data_set);
+ order_action_then_stop(action, remote_rsc, pcmk__ar_none);
}
break;
@@ -384,13 +378,12 @@ apply_container_ordering(pe_action_t *action, pe_working_set_t *data_set)
* recurring monitors to be restarted, even if just
* the connection was re-established
*/
- if(task != no_action) {
+ if (task != pcmk_action_unspecified) {
order_start_then_action(remote_rsc, action,
- pe_order_implies_then, data_set);
+ pcmk__ar_first_implies_then);
}
} else {
- order_start_then_action(remote_rsc, action, pe_order_none,
- data_set);
+ order_start_then_action(remote_rsc, action, pcmk__ar_none);
}
break;
}
@@ -400,20 +393,20 @@ apply_container_ordering(pe_action_t *action, pe_working_set_t *data_set)
* \internal
* \brief Order all relevant actions relative to remote connection actions
*
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*/
void
-pcmk__order_remote_connection_actions(pe_working_set_t *data_set)
+pcmk__order_remote_connection_actions(pcmk_scheduler_t *scheduler)
{
- if (!pcmk_is_set(data_set->flags, pe_flag_have_remote_nodes)) {
+ if (!pcmk_is_set(scheduler->flags, pcmk_sched_have_remote_nodes)) {
return;
}
crm_trace("Creating remote connection orderings");
- for (GList *gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
- pe_action_t *action = (pe_action_t *) gIter->data;
- pe_resource_t *remote = NULL;
+ for (GList *iter = scheduler->actions; iter != NULL; iter = iter->next) {
+ pcmk_action_t *action = iter->data;
+ pcmk_resource_t *remote = NULL;
// We are only interested in resource actions
if (action->rsc == NULL) {
@@ -425,16 +418,18 @@ pcmk__order_remote_connection_actions(pe_working_set_t *data_set)
* any start of the resource in this transition.
*/
if (action->rsc->is_remote_node &&
- pcmk__str_eq(action->task, CRM_OP_CLEAR_FAILCOUNT, pcmk__str_casei)) {
+ pcmk__str_eq(action->task, PCMK_ACTION_CLEAR_FAILCOUNT,
+ pcmk__str_none)) {
pcmk__new_ordering(action->rsc, NULL, action, action->rsc,
- pcmk__op_key(action->rsc->id, RSC_START, 0),
- NULL, pe_order_optional, data_set);
+ pcmk__op_key(action->rsc->id, PCMK_ACTION_START,
+ 0),
+ NULL, pcmk__ar_ordered, scheduler);
continue;
}
- // We are only interested in actions allocated to a node
+ // We are only interested in actions assigned to a node
if (action->node == NULL) {
continue;
}
@@ -449,7 +444,7 @@ pcmk__order_remote_connection_actions(pe_working_set_t *data_set)
* real actions and vice versa later in update_actions() at the end of
* pcmk__apply_orderings().
*/
- if (pcmk_is_set(action->flags, pe_action_pseudo)) {
+ if (pcmk_is_set(action->flags, pcmk_action_pseudo)) {
continue;
}
@@ -464,16 +459,17 @@ pcmk__order_remote_connection_actions(pe_working_set_t *data_set)
* remote connection. This ensures that if the connection fails to
* start, we leave the resource running on the original node.
*/
- if (pcmk__str_eq(action->task, RSC_START, pcmk__str_casei)) {
+ if (pcmk__str_eq(action->task, PCMK_ACTION_START, pcmk__str_none)) {
for (GList *item = action->rsc->actions; item != NULL;
item = item->next) {
- pe_action_t *rsc_action = item->data;
+ pcmk_action_t *rsc_action = item->data;
- if ((rsc_action->node->details != action->node->details)
- && pcmk__str_eq(rsc_action->task, RSC_STOP, pcmk__str_casei)) {
+ if (!pe__same_node(rsc_action->node, action->node)
+ && pcmk__str_eq(rsc_action->task, PCMK_ACTION_STOP,
+ pcmk__str_none)) {
pcmk__new_ordering(remote, start_key(remote), NULL,
action->rsc, NULL, rsc_action,
- pe_order_optional, data_set);
+ pcmk__ar_ordered, scheduler);
}
}
}
@@ -489,7 +485,7 @@ pcmk__order_remote_connection_actions(pe_working_set_t *data_set)
*/
if (remote->container) {
crm_trace("Container ordering for %s", action->uuid);
- apply_container_ordering(action, data_set);
+ apply_container_ordering(action);
} else {
crm_trace("Remote ordering for %s", action->uuid);
@@ -507,7 +503,7 @@ pcmk__order_remote_connection_actions(pe_working_set_t *data_set)
* \return true if \p node is a failed remote node, false otherwise
*/
bool
-pcmk__is_failed_remote_node(const pe_node_t *node)
+pcmk__is_failed_remote_node(const pcmk_node_t *node)
{
return pe__is_remote_node(node) && (node->details->remote_rsc != NULL)
&& (get_remote_node_state(node) == remote_state_failed);
@@ -524,7 +520,8 @@ pcmk__is_failed_remote_node(const pe_node_t *node)
* resource, otherwise false
*/
bool
-pcmk__rsc_corresponds_to_guest(const pe_resource_t *rsc, const pe_node_t *node)
+pcmk__rsc_corresponds_to_guest(const pcmk_resource_t *rsc,
+ const pcmk_node_t *node)
{
return (rsc != NULL) && (rsc->fillers != NULL) && (node != NULL)
&& (node->details->remote_rsc != NULL)
@@ -545,15 +542,15 @@ pcmk__rsc_corresponds_to_guest(const pe_resource_t *rsc, const pe_node_t *node)
* \return Connection host that action should be routed through if remote,
* otherwise NULL
*/
-pe_node_t *
-pcmk__connection_host_for_action(const pe_action_t *action)
+pcmk_node_t *
+pcmk__connection_host_for_action(const pcmk_action_t *action)
{
- pe_node_t *began_on = NULL;
- pe_node_t *ended_on = NULL;
+ pcmk_node_t *began_on = NULL;
+ pcmk_node_t *ended_on = NULL;
bool partial_migration = false;
const char *task = action->task;
- if (pcmk__str_eq(task, CRM_OP_FENCE, pcmk__str_casei)
+ if (pcmk__str_eq(task, PCMK_ACTION_STONITH, pcmk__str_none)
|| !pe__is_guest_or_remote_node(action->node)) {
return NULL;
}
@@ -586,7 +583,7 @@ pcmk__connection_host_for_action(const pe_action_t *action)
return began_on;
}
- if (began_on->details == ended_on->details) {
+ if (pe__same_node(began_on, ended_on)) {
crm_trace("Routing %s for %s through remote connection's "
"current node %s (not moving)%s",
action->task, (action->rsc? action->rsc->id : "no resource"),
@@ -602,7 +599,7 @@ pcmk__connection_host_for_action(const pe_action_t *action)
* on.
*/
- if (pcmk__str_eq(task, "notify", pcmk__str_casei)) {
+ if (pcmk__str_eq(task, PCMK_ACTION_NOTIFY, pcmk__str_none)) {
task = g_hash_table_lookup(action->meta, "notify_operation");
}
@@ -618,8 +615,10 @@ pcmk__connection_host_for_action(const pe_action_t *action)
* the connection's pseudo-start on the migration target, so the target is
* the router node.
*/
- if (pcmk__strcase_any_of(task, "cancel", "stop", "demote", "migrate_from",
- "migrate_to", NULL) && !partial_migration) {
+ if (pcmk__strcase_any_of(task, PCMK_ACTION_CANCEL, PCMK_ACTION_STOP,
+ PCMK_ACTION_DEMOTE, PCMK_ACTION_MIGRATE_FROM,
+ PCMK_ACTION_MIGRATE_TO, NULL)
+ && !partial_migration) {
crm_trace("Routing %s for %s through remote connection's "
"current node %s (moving)%s",
action->task, (action->rsc? action->rsc->id : "no resource"),
@@ -653,7 +652,7 @@ pcmk__connection_host_for_action(const pe_action_t *action)
* \param[in,out] params Resource parameters evaluated per node
*/
void
-pcmk__substitute_remote_addr(pe_resource_t *rsc, GHashTable *params)
+pcmk__substitute_remote_addr(pcmk_resource_t *rsc, GHashTable *params)
{
const char *remote_addr = g_hash_table_lookup(params,
XML_RSC_ATTR_REMOTE_RA_ADDR);
@@ -681,36 +680,37 @@ pcmk__substitute_remote_addr(pe_resource_t *rsc, GHashTable *params)
* \param[in] action Action to check
*/
void
-pcmk__add_bundle_meta_to_xml(xmlNode *args_xml, const pe_action_t *action)
+pcmk__add_bundle_meta_to_xml(xmlNode *args_xml, const pcmk_action_t *action)
{
- const pe_node_t *host = NULL;
+ const pcmk_node_t *guest = action->node;
+ const pcmk_node_t *host = NULL;
enum action_tasks task;
- if (!pe__is_guest_node(action->node)) {
+ if (!pe__is_guest_node(guest)) {
return;
}
task = text2task(action->task);
- if ((task == action_notify) || (task == action_notified)) {
+ if ((task == pcmk_action_notify) || (task == pcmk_action_notified)) {
task = text2task(g_hash_table_lookup(action->meta, "notify_operation"));
}
switch (task) {
- case stop_rsc:
- case stopped_rsc:
- case action_demote:
- case action_demoted:
+ case pcmk_action_stop:
+ case pcmk_action_stopped:
+ case pcmk_action_demote:
+ case pcmk_action_demoted:
// "Down" actions take place on guest's current host
- host = pe__current_node(action->node->details->remote_rsc->container);
+ host = pe__current_node(guest->details->remote_rsc->container);
break;
- case start_rsc:
- case started_rsc:
- case monitor_rsc:
- case action_promote:
- case action_promoted:
+ case pcmk_action_start:
+ case pcmk_action_started:
+ case pcmk_action_monitor:
+ case pcmk_action_promote:
+ case pcmk_action_promoted:
// "Up" actions take place on guest's next host
- host = action->node->details->remote_rsc->container->allocated_to;
+ host = guest->details->remote_rsc->container->allocated_to;
break;
default:
diff --git a/lib/pacemaker/pcmk_sched_resource.c b/lib/pacemaker/pcmk_sched_resource.c
index b855499..908c434 100644
--- a/lib/pacemaker/pcmk_sched_resource.c
+++ b/lib/pacemaker/pcmk_sched_resource.c
@@ -16,8 +16,8 @@
#include "libpacemaker_private.h"
-// Resource allocation methods that vary by resource variant
-static resource_alloc_functions_t allocation_methods[] = {
+// Resource assignment methods by resource variant
+static pcmk_assignment_methods_t assignment_methods[] = {
{
pcmk__primitive_assign,
pcmk__primitive_create_actions,
@@ -58,25 +58,25 @@ static resource_alloc_functions_t allocation_methods[] = {
},
{
pcmk__clone_assign,
- clone_create_actions,
- clone_create_probe,
- clone_internal_constraints,
+ pcmk__clone_create_actions,
+ pcmk__clone_create_probe,
+ pcmk__clone_internal_constraints,
pcmk__clone_apply_coloc_score,
pcmk__colocated_resources,
pcmk__with_clone_colocations,
pcmk__clone_with_colocations,
pcmk__add_colocated_node_scores,
- clone_rsc_location,
- clone_action_flags,
+ pcmk__clone_apply_location,
+ pcmk__clone_action_flags,
pcmk__instance_update_ordered_actions,
pcmk__output_resource_actions,
- clone_expand,
- clone_append_meta,
+ pcmk__clone_add_actions_to_graph,
+ pcmk__clone_add_graph_meta,
pcmk__clone_add_utilization,
pcmk__clone_shutdown_lock,
},
{
- pcmk__bundle_allocate,
+ pcmk__bundle_assign,
pcmk__bundle_create_actions,
pcmk__bundle_create_probe,
pcmk__bundle_internal_constraints,
@@ -85,11 +85,11 @@ static resource_alloc_functions_t allocation_methods[] = {
pcmk__with_bundle_colocations,
pcmk__bundle_with_colocations,
pcmk__add_colocated_node_scores,
- pcmk__bundle_rsc_location,
+ pcmk__bundle_apply_location,
pcmk__bundle_action_flags,
pcmk__instance_update_ordered_actions,
pcmk__output_bundle_actions,
- pcmk__bundle_expand,
+ pcmk__bundle_add_actions_to_graph,
pcmk__noop_add_graph_meta,
pcmk__bundle_add_utilization,
pcmk__bundle_shutdown_lock,
@@ -108,7 +108,7 @@ static resource_alloc_functions_t allocation_methods[] = {
* \return true if agent for \p rsc changed, otherwise false
*/
bool
-pcmk__rsc_agent_changed(pe_resource_t *rsc, pe_node_t *node,
+pcmk__rsc_agent_changed(pcmk_resource_t *rsc, pcmk_node_t *node,
const xmlNode *rsc_entry, bool active_on_node)
{
bool changed = false;
@@ -136,9 +136,9 @@ pcmk__rsc_agent_changed(pe_resource_t *rsc, pe_node_t *node,
}
if (changed && active_on_node) {
// Make sure the resource is restarted
- custom_action(rsc, stop_key(rsc), CRMD_ACTION_STOP, node, FALSE, TRUE,
+ custom_action(rsc, stop_key(rsc), PCMK_ACTION_STOP, node, FALSE,
rsc->cluster);
- pe__set_resource_flags(rsc, pe_rsc_start_pending);
+ pe__set_resource_flags(rsc, pcmk_rsc_start_pending);
}
return changed;
}
@@ -154,14 +154,14 @@ pcmk__rsc_agent_changed(pe_resource_t *rsc, pe_node_t *node,
* \return (Possibly new) head of list
*/
static GList *
-add_rsc_if_matching(GList *result, pe_resource_t *rsc, const char *id)
+add_rsc_if_matching(GList *result, pcmk_resource_t *rsc, const char *id)
{
if ((strcmp(rsc->id, id) == 0)
|| ((rsc->clone_name != NULL) && (strcmp(rsc->clone_name, id) == 0))) {
result = g_list_prepend(result, rsc);
}
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
- pe_resource_t *child = (pe_resource_t *) iter->data;
+ pcmk_resource_t *child = (pcmk_resource_t *) iter->data;
result = add_rsc_if_matching(result, child, id);
}
@@ -172,55 +172,75 @@ add_rsc_if_matching(GList *result, pe_resource_t *rsc, const char *id)
* \internal
* \brief Find all resources matching a given ID by either ID or clone name
*
- * \param[in] id Resource ID to check
- * \param[in] data_set Cluster working set
+ * \param[in] id Resource ID to check
+ * \param[in] scheduler Scheduler data
*
* \return List of all resources that match \p id
* \note The caller is responsible for freeing the return value with
* g_list_free().
*/
GList *
-pcmk__rscs_matching_id(const char *id, const pe_working_set_t *data_set)
+pcmk__rscs_matching_id(const char *id, const pcmk_scheduler_t *scheduler)
{
GList *result = NULL;
- CRM_CHECK((id != NULL) && (data_set != NULL), return NULL);
- for (GList *iter = data_set->resources; iter != NULL; iter = iter->next) {
- result = add_rsc_if_matching(result, (pe_resource_t *) iter->data, id);
+ CRM_CHECK((id != NULL) && (scheduler != NULL), return NULL);
+ for (GList *iter = scheduler->resources; iter != NULL; iter = iter->next) {
+ result = add_rsc_if_matching(result, (pcmk_resource_t *) iter->data,
+ id);
}
return result;
}
/*!
* \internal
- * \brief Set the variant-appropriate allocation methods for a resource
+ * \brief Set the variant-appropriate assignment methods for a resource
*
- * \param[in,out] rsc Resource to set allocation methods for
- * \param[in] ignored Here so function can be used with g_list_foreach()
+ * \param[in,out] data Resource to set assignment methods for
+ * \param[in] user_data Ignored
*/
static void
-set_allocation_methods_for_rsc(pe_resource_t *rsc, void *ignored)
+set_assignment_methods_for_rsc(gpointer data, gpointer user_data)
{
- rsc->cmds = &allocation_methods[rsc->variant];
- g_list_foreach(rsc->children, (GFunc) set_allocation_methods_for_rsc, NULL);
+ pcmk_resource_t *rsc = data;
+
+ rsc->cmds = &assignment_methods[rsc->variant];
+ g_list_foreach(rsc->children, set_assignment_methods_for_rsc, NULL);
}
/*!
* \internal
- * \brief Set the variant-appropriate allocation methods for all resources
+ * \brief Set the variant-appropriate assignment methods for all resources
*
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*/
void
-pcmk__set_allocation_methods(pe_working_set_t *data_set)
+pcmk__set_assignment_methods(pcmk_scheduler_t *scheduler)
+{
+ g_list_foreach(scheduler->resources, set_assignment_methods_for_rsc, NULL);
+}
+
+/*!
+ * \internal
+ * \brief Wrapper for colocated_resources() method for readability
+ *
+ * \param[in] rsc Resource to add to colocated list
+ * \param[in] orig_rsc Resource originally requested
+ * \param[in,out] list Pointer to list to add to
+ *
+ * \return (Possibly new) head of list
+ */
+static inline void
+add_colocated_resources(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc, GList **list)
{
- g_list_foreach(data_set->resources, (GFunc) set_allocation_methods_for_rsc,
- NULL);
+ *list = rsc->cmds->colocated_resources(rsc, orig_rsc, *list);
}
-// Shared implementation of resource_alloc_functions_t:colocated_resources()
+// Shared implementation of pcmk_assignment_methods_t:colocated_resources()
GList *
-pcmk__colocated_resources(const pe_resource_t *rsc, const pe_resource_t *orig_rsc,
+pcmk__colocated_resources(const pcmk_resource_t *rsc,
+ const pcmk_resource_t *orig_rsc,
GList *colocated_rscs)
{
const GList *iter = NULL;
@@ -242,7 +262,7 @@ pcmk__colocated_resources(const pe_resource_t *rsc, const pe_resource_t *orig_rs
colocations = pcmk__this_with_colocations(rsc);
for (iter = colocations; iter != NULL; iter = iter->next) {
const pcmk__colocation_t *constraint = iter->data;
- const pe_resource_t *primary = constraint->primary;
+ const pcmk_resource_t *primary = constraint->primary;
if (primary == orig_rsc) {
continue; // Break colocation loop
@@ -251,10 +271,7 @@ pcmk__colocated_resources(const pe_resource_t *rsc, const pe_resource_t *orig_rs
if ((constraint->score == INFINITY) &&
(pcmk__colocation_affects(rsc, primary, constraint,
true) == pcmk__coloc_affects_location)) {
-
- colocated_rscs = primary->cmds->colocated_resources(primary,
- orig_rsc,
- colocated_rscs);
+ add_colocated_resources(primary, orig_rsc, &colocated_rscs);
}
}
g_list_free(colocations);
@@ -263,7 +280,7 @@ pcmk__colocated_resources(const pe_resource_t *rsc, const pe_resource_t *orig_rs
colocations = pcmk__with_this_colocations(rsc);
for (iter = colocations; iter != NULL; iter = iter->next) {
const pcmk__colocation_t *constraint = iter->data;
- const pe_resource_t *dependent = constraint->dependent;
+ const pcmk_resource_t *dependent = constraint->dependent;
if (dependent == orig_rsc) {
continue; // Break colocation loop
@@ -276,10 +293,7 @@ pcmk__colocated_resources(const pe_resource_t *rsc, const pe_resource_t *orig_rs
if ((constraint->score == INFINITY) &&
(pcmk__colocation_affects(dependent, rsc, constraint,
true) == pcmk__coloc_affects_location)) {
-
- colocated_rscs = dependent->cmds->colocated_resources(dependent,
- orig_rsc,
- colocated_rscs);
+ add_colocated_resources(dependent, orig_rsc, &colocated_rscs);
}
}
g_list_free(colocations);
@@ -289,21 +303,29 @@ pcmk__colocated_resources(const pe_resource_t *rsc, const pe_resource_t *orig_rs
// No-op function for variants that don't need to implement add_graph_meta()
void
-pcmk__noop_add_graph_meta(const pe_resource_t *rsc, xmlNode *xml)
+pcmk__noop_add_graph_meta(const pcmk_resource_t *rsc, xmlNode *xml)
{
}
+/*!
+ * \internal
+ * \brief Output a summary of scheduled actions for a resource
+ *
+ * \param[in,out] rsc Resource to output actions for
+ */
void
-pcmk__output_resource_actions(pe_resource_t *rsc)
+pcmk__output_resource_actions(pcmk_resource_t *rsc)
{
- pcmk__output_t *out = rsc->cluster->priv;
+ pcmk_node_t *next = NULL;
+ pcmk_node_t *current = NULL;
+ pcmk__output_t *out = NULL;
- pe_node_t *next = NULL;
- pe_node_t *current = NULL;
+ CRM_ASSERT(rsc != NULL);
+ out = rsc->cluster->priv;
if (rsc->children != NULL) {
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
- pe_resource_t *child = (pe_resource_t *) iter->data;
+ pcmk_resource_t *child = (pcmk_resource_t *) iter->data;
child->cmds->output_actions(child);
}
@@ -313,15 +335,15 @@ pcmk__output_resource_actions(pe_resource_t *rsc)
next = rsc->allocated_to;
if (rsc->running_on) {
current = pe__current_node(rsc);
- if (rsc->role == RSC_ROLE_STOPPED) {
+ if (rsc->role == pcmk_role_stopped) {
/* This can occur when resources are being recovered because
* the current role can change in pcmk__primitive_create_actions()
*/
- rsc->role = RSC_ROLE_STARTED;
+ rsc->role = pcmk_role_started;
}
}
- if ((current == NULL) && pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
+ if ((current == NULL) && pcmk_is_set(rsc->flags, pcmk_rsc_removed)) {
/* Don't log stopped orphans */
return;
}
@@ -331,175 +353,207 @@ pcmk__output_resource_actions(pe_resource_t *rsc)
/*!
* \internal
- * \brief Assign a specified primitive resource to a node
+ * \brief Add a resource to a node's list of assigned resources
+ *
+ * \param[in,out] node Node to add resource to
+ * \param[in] rsc Resource to add
+ */
+static inline void
+add_assigned_resource(pcmk_node_t *node, pcmk_resource_t *rsc)
+{
+ node->details->allocated_rsc = g_list_prepend(node->details->allocated_rsc,
+ rsc);
+}
+
+/*!
+ * \internal
+ * \brief Assign a specified resource (of any variant) to a node
+ *
+ * Assign a specified resource and its children (if any) to a specified node, if
+ * the node can run the resource (or unconditionally, if \p force is true). Mark
+ * the resources as no longer provisional.
*
- * Assign a specified primitive resource to a specified node, if the node can
- * run the resource (or unconditionally, if \p force is true). Mark the resource
- * as no longer provisional. If the primitive can't be assigned (or \p chosen is
- * NULL), unassign any previous assignment for it, set its next role to stopped,
- * and update any existing actions scheduled for it. This is not done
- * recursively for children, so it should be called only for primitives.
+ * If a resource can't be assigned (or \p node is \c NULL), unassign any
+ * previous assignment. If \p stop_if_fail is \c true, set next role to stopped
+ * and update any existing actions scheduled for the resource.
*
- * \param[in,out] rsc Resource to assign
- * \param[in,out] chosen Node to assign \p rsc to
- * \param[in] force If true, assign to \p chosen even if unavailable
+ * \param[in,out] rsc Resource to assign
+ * \param[in,out] node Node to assign \p rsc to
+ * \param[in] force If true, assign to \p node even if unavailable
+ * \param[in] stop_if_fail If \c true and either \p rsc can't be assigned
+ * or \p chosen is \c NULL, set next role to
+ * stopped and update existing actions (if \p rsc
+ * is not a primitive, this applies to its
+ * primitive descendants instead)
*
- * \return true if \p rsc could be assigned, otherwise false
+ * \return \c true if the assignment of \p rsc changed, or \c false otherwise
*
* \note Assigning a resource to the NULL node using this function is different
- * from calling pcmk__unassign_resource(), in that it will also update any
+ * from calling pcmk__unassign_resource(), in that it may also update any
* actions created for the resource.
+ * \note The \c pcmk_assignment_methods_t:assign() method is preferred, unless
+ * a resource should be assigned to the \c NULL node or every resource in
+ * a tree should be assigned to the same node.
+ * \note If \p stop_if_fail is \c false, then \c pcmk__unassign_resource() can
+ * completely undo the assignment. A successful assignment can be either
+ * undone or left alone as final. A failed assignment has the same effect
+ * as calling pcmk__unassign_resource(); there are no side effects on
+ * roles or actions.
*/
bool
-pcmk__finalize_assignment(pe_resource_t *rsc, pe_node_t *chosen, bool force)
+pcmk__assign_resource(pcmk_resource_t *rsc, pcmk_node_t *node, bool force,
+ bool stop_if_fail)
{
- pcmk__output_t *out = rsc->cluster->priv;
-
- CRM_ASSERT(rsc->variant == pe_native);
-
- if (!force && (chosen != NULL)) {
- if ((chosen->weight < 0)
- // Allow the graph to assume that guest node connections will come up
- || (!pcmk__node_available(chosen, true, false)
- && !pe__is_guest_node(chosen))) {
-
- crm_debug("All nodes for resource %s are unavailable, unclean or "
- "shutting down (%s can%s run resources, with weight %d)",
- rsc->id, pe__node_name(chosen),
- (pcmk__node_available(chosen, true, false)? "" : "not"),
- chosen->weight);
- pe__set_next_role(rsc, RSC_ROLE_STOPPED, "node availability");
- chosen = NULL;
+ bool changed = false;
+
+ CRM_ASSERT(rsc != NULL);
+
+ if (rsc->children != NULL) {
+ for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
+ pcmk_resource_t *child_rsc = iter->data;
+
+ changed |= pcmk__assign_resource(child_rsc, node, force,
+ stop_if_fail);
}
+ return changed;
}
+ // Assigning a primitive
+
+ if (!force && (node != NULL)
+ && ((node->weight < 0)
+ // Allow graph to assume that guest node connections will come up
+ || (!pcmk__node_available(node, true, false)
+ && !pe__is_guest_node(node)))) {
+
+ pe_rsc_debug(rsc,
+ "All nodes for resource %s are unavailable, unclean or "
+ "shutting down (%s can%s run resources, with score %s)",
+ rsc->id, pe__node_name(node),
+ (pcmk__node_available(node, true, false)? "" : "not"),
+ pcmk_readable_score(node->weight));
+
+ if (stop_if_fail) {
+ pe__set_next_role(rsc, pcmk_role_stopped, "node availability");
+ }
+ node = NULL;
+ }
+
+ if (rsc->allocated_to != NULL) {
+ changed = !pe__same_node(rsc->allocated_to, node);
+ } else {
+ changed = (node != NULL);
+ }
pcmk__unassign_resource(rsc);
- pe__clear_resource_flags(rsc, pe_rsc_provisional);
+ pe__clear_resource_flags(rsc, pcmk_rsc_unassigned);
- if (chosen == NULL) {
- crm_debug("Could not allocate a node for %s", rsc->id);
- pe__set_next_role(rsc, RSC_ROLE_STOPPED, "unable to allocate");
+ if (node == NULL) {
+ char *rc_stopped = NULL;
+
+ pe_rsc_debug(rsc, "Could not assign %s to a node", rsc->id);
+
+ if (!stop_if_fail) {
+ return changed;
+ }
+ pe__set_next_role(rsc, pcmk_role_stopped, "unable to assign");
for (GList *iter = rsc->actions; iter != NULL; iter = iter->next) {
- pe_action_t *op = (pe_action_t *) iter->data;
+ pcmk_action_t *op = (pcmk_action_t *) iter->data;
- crm_debug("Updating %s for allocation failure", op->uuid);
+ pe_rsc_debug(rsc, "Updating %s for %s assignment failure",
+ op->uuid, rsc->id);
- if (pcmk__str_eq(op->task, RSC_STOP, pcmk__str_casei)) {
- pe__clear_action_flags(op, pe_action_optional);
+ if (pcmk__str_eq(op->task, PCMK_ACTION_STOP, pcmk__str_none)) {
+ pe__clear_action_flags(op, pcmk_action_optional);
- } else if (pcmk__str_eq(op->task, RSC_START, pcmk__str_casei)) {
- pe__clear_action_flags(op, pe_action_runnable);
- //pe__set_resource_flags(rsc, pe_rsc_block);
+ } else if (pcmk__str_eq(op->task, PCMK_ACTION_START,
+ pcmk__str_none)) {
+ pe__clear_action_flags(op, pcmk_action_runnable);
} else {
// Cancel recurring actions, unless for stopped state
const char *interval_ms_s = NULL;
const char *target_rc_s = NULL;
- char *rc_stopped = pcmk__itoa(PCMK_OCF_NOT_RUNNING);
interval_ms_s = g_hash_table_lookup(op->meta,
XML_LRM_ATTR_INTERVAL_MS);
target_rc_s = g_hash_table_lookup(op->meta,
XML_ATTR_TE_TARGET_RC);
- if ((interval_ms_s != NULL)
- && !pcmk__str_eq(interval_ms_s, "0", pcmk__str_none)
+ if (rc_stopped == NULL) {
+ rc_stopped = pcmk__itoa(PCMK_OCF_NOT_RUNNING);
+ }
+
+ if (!pcmk__str_eq(interval_ms_s, "0", pcmk__str_null_matches)
&& !pcmk__str_eq(rc_stopped, target_rc_s, pcmk__str_none)) {
- pe__clear_action_flags(op, pe_action_runnable);
+
+ pe__clear_action_flags(op, pcmk_action_runnable);
}
- free(rc_stopped);
}
}
- return false;
- }
-
- crm_debug("Assigning %s to %s", rsc->id, pe__node_name(chosen));
- rsc->allocated_to = pe__copy_node(chosen);
-
- chosen->details->allocated_rsc = g_list_prepend(chosen->details->allocated_rsc,
- rsc);
- chosen->details->num_resources++;
- chosen->count++;
- pcmk__consume_node_capacity(chosen->details->utilization, rsc);
-
- if (pcmk_is_set(rsc->cluster->flags, pe_flag_show_utilization)) {
- out->message(out, "resource-util", rsc, chosen, __func__);
+ free(rc_stopped);
+ return changed;
}
- return true;
-}
-/*!
- * \internal
- * \brief Assign a specified resource (of any variant) to a node
- *
- * Assign a specified resource and its children (if any) to a specified node, if
- * the node can run the resource (or unconditionally, if \p force is true). Mark
- * the resources as no longer provisional. If the resources can't be assigned
- * (or \p chosen is NULL), unassign any previous assignments, set next role to
- * stopped, and update any existing actions scheduled for them.
- *
- * \param[in,out] rsc Resource to assign
- * \param[in,out] chosen Node to assign \p rsc to
- * \param[in] force If true, assign to \p chosen even if unavailable
- *
- * \return true if \p rsc could be assigned, otherwise false
- *
- * \note Assigning a resource to the NULL node using this function is different
- * from calling pcmk__unassign_resource(), in that it will also update any
- * actions created for the resource.
- */
-bool
-pcmk__assign_resource(pe_resource_t *rsc, pe_node_t *node, bool force)
-{
- bool changed = false;
+ pe_rsc_debug(rsc, "Assigning %s to %s", rsc->id, pe__node_name(node));
+ rsc->allocated_to = pe__copy_node(node);
- if (rsc->children == NULL) {
- if (rsc->allocated_to != NULL) {
- changed = true;
- }
- pcmk__finalize_assignment(rsc, node, force);
+ add_assigned_resource(node, rsc);
+ node->details->num_resources++;
+ node->count++;
+ pcmk__consume_node_capacity(node->details->utilization, rsc);
- } else {
- for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
- pe_resource_t *child_rsc = (pe_resource_t *) iter->data;
+ if (pcmk_is_set(rsc->cluster->flags, pcmk_sched_show_utilization)) {
+ pcmk__output_t *out = rsc->cluster->priv;
- changed |= pcmk__assign_resource(child_rsc, node, force);
- }
+ out->message(out, "resource-util", rsc, node, __func__);
}
return changed;
}
/*!
* \internal
- * \brief Remove any assignment of a specified resource to a node
+ * \brief Remove any node assignment from a specified resource and its children
*
* If a specified resource has been assigned to a node, remove that assignment
- * and mark the resource as provisional again. This is not done recursively for
- * children, so it should be called only for primitives.
+ * and mark the resource as provisional again.
*
* \param[in,out] rsc Resource to unassign
+ *
+ * \note This function is called recursively on \p rsc and its children.
*/
void
-pcmk__unassign_resource(pe_resource_t *rsc)
+pcmk__unassign_resource(pcmk_resource_t *rsc)
{
- pe_node_t *old = rsc->allocated_to;
+ pcmk_node_t *old = rsc->allocated_to;
if (old == NULL) {
- return;
+ crm_info("Unassigning %s", rsc->id);
+ } else {
+ crm_info("Unassigning %s from %s", rsc->id, pe__node_name(old));
}
- crm_info("Unassigning %s from %s", rsc->id, pe__node_name(old));
- pe__set_resource_flags(rsc, pe_rsc_provisional);
- rsc->allocated_to = NULL;
+ pe__set_resource_flags(rsc, pcmk_rsc_unassigned);
- /* We're going to free the pe_node_t, but its details member is shared and
- * will remain, so update that appropriately first.
- */
- old->details->allocated_rsc = g_list_remove(old->details->allocated_rsc,
- rsc);
- old->details->num_resources--;
- pcmk__release_node_capacity(old->details->utilization, rsc);
- free(old);
+ if (rsc->children == NULL) {
+ if (old == NULL) {
+ return;
+ }
+ rsc->allocated_to = NULL;
+
+ /* We're going to free the pcmk_node_t, but its details member is shared
+ * and will remain, so update that appropriately first.
+ */
+ old->details->allocated_rsc = g_list_remove(old->details->allocated_rsc,
+ rsc);
+ old->details->num_resources--;
+ pcmk__release_node_capacity(old->details->utilization, rsc);
+ free(old);
+ return;
+ }
+
+ for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
+ pcmk__unassign_resource((pcmk_resource_t *) iter->data);
+ }
}
/*!
@@ -514,11 +568,11 @@ pcmk__unassign_resource(pe_resource_t *rsc)
* \return true if the migration threshold has been reached, false otherwise
*/
bool
-pcmk__threshold_reached(pe_resource_t *rsc, const pe_node_t *node,
- pe_resource_t **failed)
+pcmk__threshold_reached(pcmk_resource_t *rsc, const pcmk_node_t *node,
+ pcmk_resource_t **failed)
{
int fail_count, remaining_tries;
- pe_resource_t *rsc_to_ban = rsc;
+ pcmk_resource_t *rsc_to_ban = rsc;
// Migration threshold of 0 means never force away
if (rsc->migration_threshold == 0) {
@@ -526,19 +580,19 @@ pcmk__threshold_reached(pe_resource_t *rsc, const pe_node_t *node,
}
// If we're ignoring failures, also ignore the migration threshold
- if (pcmk_is_set(rsc->flags, pe_rsc_failure_ignored)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_ignore_failure)) {
return false;
}
// If there are no failures, there's no need to force away
fail_count = pe_get_failcount(node, rsc, NULL,
- pe_fc_effective|pe_fc_fillers, NULL);
+ pcmk__fc_effective|pcmk__fc_fillers, NULL);
if (fail_count <= 0) {
return false;
}
// If failed resource is anonymous clone instance, we'll force clone away
- if (!pcmk_is_set(rsc->flags, pe_rsc_unique)) {
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_unique)) {
rsc_to_ban = uber_parent(rsc);
}
@@ -564,69 +618,66 @@ pcmk__threshold_reached(pe_resource_t *rsc, const pe_node_t *node,
return false;
}
-static void *
-convert_const_pointer(const void *ptr)
-{
- /* Worst function ever */
- return (void *)ptr;
-}
-
/*!
* \internal
- * \brief Get a node's weight
+ * \brief Get a node's score
*
- * \param[in] node Unweighted node to check (for node ID)
- * \param[in] nodes List of weighted nodes to look for \p node in
+ * \param[in] node Node with ID to check
+ * \param[in] nodes List of nodes to look for \p node score in
*
- * \return Node's weight, or -INFINITY if not found
+ * \return Node's score, or -INFINITY if not found
*/
static int
-get_node_weight(const pe_node_t *node, GHashTable *nodes)
+get_node_score(const pcmk_node_t *node, GHashTable *nodes)
{
- pe_node_t *weighted_node = NULL;
+ pcmk_node_t *found_node = NULL;
if ((node != NULL) && (nodes != NULL)) {
- weighted_node = g_hash_table_lookup(nodes, node->details->id);
+ found_node = g_hash_table_lookup(nodes, node->details->id);
}
- return (weighted_node == NULL)? -INFINITY : weighted_node->weight;
+ return (found_node == NULL)? -INFINITY : found_node->weight;
}
/*!
* \internal
- * \brief Compare two resources according to which should be allocated first
+ * \brief Compare two resources according to which should be assigned first
*
* \param[in] a First resource to compare
* \param[in] b Second resource to compare
* \param[in] data Sorted list of all nodes in cluster
*
- * \return -1 if \p a should be allocated before \b, 0 if they are equal,
- * or +1 if \p a should be allocated after \b
+ * \return -1 if \p a should be assigned before \b, 0 if they are equal,
+ * or +1 if \p a should be assigned after \b
*/
static gint
cmp_resources(gconstpointer a, gconstpointer b, gpointer data)
{
- const pe_resource_t *resource1 = a;
- const pe_resource_t *resource2 = b;
- const GList *nodes = (const GList *) data;
+ /* GLib insists that this function require gconstpointer arguments, but we
+ * make a small, temporary change to each argument (setting the
+ * pe_rsc_merging flag) during comparison
+ */
+ pcmk_resource_t *resource1 = (pcmk_resource_t *) a;
+ pcmk_resource_t *resource2 = (pcmk_resource_t *) b;
+ const GList *nodes = data;
int rc = 0;
- int r1_weight = -INFINITY;
- int r2_weight = -INFINITY;
- pe_node_t *r1_node = NULL;
- pe_node_t *r2_node = NULL;
+ int r1_score = -INFINITY;
+ int r2_score = -INFINITY;
+ pcmk_node_t *r1_node = NULL;
+ pcmk_node_t *r2_node = NULL;
GHashTable *r1_nodes = NULL;
GHashTable *r2_nodes = NULL;
const char *reason = NULL;
- // Resources with highest priority should be allocated first
+ // Resources with highest priority should be assigned first
reason = "priority";
- r1_weight = resource1->priority;
- r2_weight = resource2->priority;
- if (r1_weight > r2_weight) {
+ r1_score = resource1->priority;
+ r2_score = resource2->priority;
+ if (r1_score > r2_score) {
rc = -1;
goto done;
}
- if (r1_weight < r2_weight) {
+ if (r1_score < r2_score) {
rc = 1;
goto done;
}
@@ -637,17 +688,17 @@ cmp_resources(gconstpointer a, gconstpointer b, gpointer data)
goto done;
}
- // Calculate and log node weights
- resource1->cmds->add_colocated_node_scores(convert_const_pointer(resource1),
- resource1->id, &r1_nodes, NULL,
- 1, pcmk__coloc_select_this_with);
- resource2->cmds->add_colocated_node_scores(convert_const_pointer(resource2),
- resource2->id, &r2_nodes, NULL,
- 1, pcmk__coloc_select_this_with);
- pe__show_node_weights(true, NULL, resource1->id, r1_nodes,
- resource1->cluster);
- pe__show_node_weights(true, NULL, resource2->id, r2_nodes,
- resource2->cluster);
+ // Calculate and log node scores
+ resource1->cmds->add_colocated_node_scores(resource1, NULL, resource1->id,
+ &r1_nodes, NULL, 1,
+ pcmk__coloc_select_this_with);
+ resource2->cmds->add_colocated_node_scores(resource2, NULL, resource2->id,
+ &r2_nodes, NULL, 1,
+ pcmk__coloc_select_this_with);
+ pe__show_node_scores(true, NULL, resource1->id, r1_nodes,
+ resource1->cluster);
+ pe__show_node_scores(true, NULL, resource2->id, r2_nodes,
+ resource2->cluster);
// The resource with highest score on its current node goes first
reason = "current location";
@@ -657,29 +708,29 @@ cmp_resources(gconstpointer a, gconstpointer b, gpointer data)
if (resource2->running_on != NULL) {
r2_node = pe__current_node(resource2);
}
- r1_weight = get_node_weight(r1_node, r1_nodes);
- r2_weight = get_node_weight(r2_node, r2_nodes);
- if (r1_weight > r2_weight) {
+ r1_score = get_node_score(r1_node, r1_nodes);
+ r2_score = get_node_score(r2_node, r2_nodes);
+ if (r1_score > r2_score) {
rc = -1;
goto done;
}
- if (r1_weight < r2_weight) {
+ if (r1_score < r2_score) {
rc = 1;
goto done;
}
- // Otherwise a higher weight on any node will do
+ // Otherwise a higher score on any node will do
reason = "score";
for (const GList *iter = nodes; iter != NULL; iter = iter->next) {
- const pe_node_t *node = (const pe_node_t *) iter->data;
+ const pcmk_node_t *node = (const pcmk_node_t *) iter->data;
- r1_weight = get_node_weight(node, r1_nodes);
- r2_weight = get_node_weight(node, r2_nodes);
- if (r1_weight > r2_weight) {
+ r1_score = get_node_score(node, r1_nodes);
+ r2_score = get_node_score(node, r2_nodes);
+ if (r1_score > r2_score) {
rc = -1;
goto done;
}
- if (r1_weight < r2_weight) {
+ if (r1_score < r2_score) {
rc = 1;
goto done;
}
@@ -687,11 +738,11 @@ cmp_resources(gconstpointer a, gconstpointer b, gpointer data)
done:
crm_trace("%s (%d)%s%s %c %s (%d)%s%s: %s",
- resource1->id, r1_weight,
+ resource1->id, r1_score,
((r1_node == NULL)? "" : " on "),
((r1_node == NULL)? "" : r1_node->details->id),
((rc < 0)? '>' : ((rc > 0)? '<' : '=')),
- resource2->id, r2_weight,
+ resource2->id, r2_score,
((r2_node == NULL)? "" : " on "),
((r2_node == NULL)? "" : r2_node->details->id),
reason);
@@ -706,17 +757,17 @@ done:
/*!
* \internal
- * \brief Sort resources in the order they should be allocated to nodes
+ * \brief Sort resources in the order they should be assigned to nodes
*
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*/
void
-pcmk__sort_resources(pe_working_set_t *data_set)
+pcmk__sort_resources(pcmk_scheduler_t *scheduler)
{
- GList *nodes = g_list_copy(data_set->nodes);
+ GList *nodes = g_list_copy(scheduler->nodes);
nodes = pcmk__sort_nodes(nodes, NULL);
- data_set->resources = g_list_sort_with_data(data_set->resources,
- cmp_resources, nodes);
+ scheduler->resources = g_list_sort_with_data(scheduler->resources,
+ cmp_resources, nodes);
g_list_free(nodes);
}
diff --git a/lib/pacemaker/pcmk_sched_tickets.c b/lib/pacemaker/pcmk_sched_tickets.c
index 30206d7..f61b371 100644
--- a/lib/pacemaker/pcmk_sched_tickets.c
+++ b/lib/pacemaker/pcmk_sched_tickets.c
@@ -13,6 +13,7 @@
#include <glib.h>
#include <crm/crm.h>
+#include <crm/common/scheduler_internal.h>
#include <crm/pengine/status.h>
#include <pacemaker-internal.h>
@@ -27,8 +28,8 @@ enum loss_ticket_policy {
typedef struct {
const char *id;
- pe_resource_t *rsc;
- pe_ticket_t *ticket;
+ pcmk_resource_t *rsc;
+ pcmk_ticket_t *ticket;
enum loss_ticket_policy loss_policy;
int role;
} rsc_ticket_t;
@@ -43,9 +44,9 @@ typedef struct {
* constraint's, otherwise false
*/
static bool
-ticket_role_matches(const pe_resource_t *rsc, const rsc_ticket_t *rsc_ticket)
+ticket_role_matches(const pcmk_resource_t *rsc, const rsc_ticket_t *rsc_ticket)
{
- if ((rsc_ticket->role == RSC_ROLE_UNKNOWN)
+ if ((rsc_ticket->role == pcmk_role_unknown)
|| (rsc_ticket->role == rsc->role)) {
return true;
}
@@ -59,13 +60,11 @@ ticket_role_matches(const pe_resource_t *rsc, const rsc_ticket_t *rsc_ticket)
*
* \param[in,out] rsc Resource affected by ticket
* \param[in] rsc_ticket Ticket
- * \param[in,out] data_set Cluster working set
*/
static void
-constraints_for_ticket(pe_resource_t *rsc, const rsc_ticket_t *rsc_ticket,
- pe_working_set_t *data_set)
+constraints_for_ticket(pcmk_resource_t *rsc, const rsc_ticket_t *rsc_ticket)
{
- GList *gIter = NULL;
+ GList *iter = NULL;
CRM_CHECK((rsc != NULL) && (rsc_ticket != NULL), return);
@@ -75,9 +74,8 @@ constraints_for_ticket(pe_resource_t *rsc, const rsc_ticket_t *rsc_ticket,
if (rsc->children) {
pe_rsc_trace(rsc, "Processing ticket dependencies from %s", rsc->id);
- for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
- constraints_for_ticket((pe_resource_t *) gIter->data, rsc_ticket,
- data_set);
+ for (iter = rsc->children; iter != NULL; iter = iter->next) {
+ constraints_for_ticket((pcmk_resource_t *) iter->data, rsc_ticket);
}
return;
}
@@ -91,14 +89,14 @@ constraints_for_ticket(pe_resource_t *rsc, const rsc_ticket_t *rsc_ticket,
switch (rsc_ticket->loss_policy) {
case loss_ticket_stop:
resource_location(rsc, NULL, -INFINITY, "__loss_of_ticket__",
- data_set);
+ rsc->cluster);
break;
case loss_ticket_demote:
// Promotion score will be set to -INFINITY in promotion_order()
- if (rsc_ticket->role != RSC_ROLE_PROMOTED) {
+ if (rsc_ticket->role != pcmk_role_promoted) {
resource_location(rsc, NULL, -INFINITY,
- "__loss_of_ticket__", data_set);
+ "__loss_of_ticket__", rsc->cluster);
}
break;
@@ -108,11 +106,10 @@ constraints_for_ticket(pe_resource_t *rsc, const rsc_ticket_t *rsc_ticket,
}
resource_location(rsc, NULL, -INFINITY, "__loss_of_ticket__",
- data_set);
+ rsc->cluster);
- for (gIter = rsc->running_on; gIter != NULL;
- gIter = gIter->next) {
- pe_fence_node(data_set, (pe_node_t *) gIter->data,
+ for (iter = rsc->running_on; iter != NULL; iter = iter->next) {
+ pe_fence_node(rsc->cluster, (pcmk_node_t *) iter->data,
"deadman ticket was lost", FALSE);
}
break;
@@ -122,34 +119,33 @@ constraints_for_ticket(pe_resource_t *rsc, const rsc_ticket_t *rsc_ticket,
return;
}
if (rsc->running_on != NULL) {
- pe__clear_resource_flags(rsc, pe_rsc_managed);
- pe__set_resource_flags(rsc, pe_rsc_block);
+ pe__clear_resource_flags(rsc, pcmk_rsc_managed);
+ pe__set_resource_flags(rsc, pcmk_rsc_blocked);
}
break;
}
} else if (!rsc_ticket->ticket->granted) {
- if ((rsc_ticket->role != RSC_ROLE_PROMOTED)
+ if ((rsc_ticket->role != pcmk_role_promoted)
|| (rsc_ticket->loss_policy == loss_ticket_stop)) {
resource_location(rsc, NULL, -INFINITY, "__no_ticket__",
- data_set);
+ rsc->cluster);
}
} else if (rsc_ticket->ticket->standby) {
- if ((rsc_ticket->role != RSC_ROLE_PROMOTED)
+ if ((rsc_ticket->role != pcmk_role_promoted)
|| (rsc_ticket->loss_policy == loss_ticket_stop)) {
resource_location(rsc, NULL, -INFINITY, "__ticket_standby__",
- data_set);
+ rsc->cluster);
}
}
}
static void
-rsc_ticket_new(const char *id, pe_resource_t *rsc, pe_ticket_t *ticket,
- const char *state, const char *loss_policy,
- pe_working_set_t *data_set)
+rsc_ticket_new(const char *id, pcmk_resource_t *rsc, pcmk_ticket_t *ticket,
+ const char *state, const char *loss_policy)
{
rsc_ticket_t *new_rsc_ticket = NULL;
@@ -164,9 +160,9 @@ rsc_ticket_new(const char *id, pe_resource_t *rsc, pe_ticket_t *ticket,
return;
}
- if (pcmk__str_eq(state, RSC_ROLE_STARTED_S,
+ if (pcmk__str_eq(state, PCMK__ROLE_STARTED,
pcmk__str_null_matches|pcmk__str_casei)) {
- state = RSC_ROLE_UNKNOWN_S;
+ state = PCMK__ROLE_UNKNOWN;
}
new_rsc_ticket->id = id;
@@ -175,7 +171,7 @@ rsc_ticket_new(const char *id, pe_resource_t *rsc, pe_ticket_t *ticket,
new_rsc_ticket->role = text2role(state);
if (pcmk__str_eq(loss_policy, "fence", pcmk__str_casei)) {
- if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
+ if (pcmk_is_set(rsc->cluster->flags, pcmk_sched_fencing_enabled)) {
new_rsc_ticket->loss_policy = loss_ticket_fence;
} else {
pcmk__config_err("Resetting '" XML_TICKET_ATTR_LOSS_POLICY
@@ -196,7 +192,7 @@ rsc_ticket_new(const char *id, pe_resource_t *rsc, pe_ticket_t *ticket,
role2text(new_rsc_ticket->role));
new_rsc_ticket->loss_policy = loss_ticket_freeze;
- } else if (pcmk__str_eq(loss_policy, "demote", pcmk__str_casei)) {
+ } else if (pcmk__str_eq(loss_policy, PCMK_ACTION_DEMOTE, pcmk__str_casei)) {
crm_debug("On loss of ticket '%s': Demote %s (%s)",
new_rsc_ticket->ticket->id, new_rsc_ticket->rsc->id,
role2text(new_rsc_ticket->role));
@@ -209,7 +205,7 @@ rsc_ticket_new(const char *id, pe_resource_t *rsc, pe_ticket_t *ticket,
new_rsc_ticket->loss_policy = loss_ticket_stop;
} else {
- if (new_rsc_ticket->role == RSC_ROLE_PROMOTED) {
+ if (new_rsc_ticket->role == pcmk_role_promoted) {
crm_debug("On loss of ticket '%s': Default to demote %s (%s)",
new_rsc_ticket->ticket->id, new_rsc_ticket->rsc->id,
role2text(new_rsc_ticket->role));
@@ -228,18 +224,18 @@ rsc_ticket_new(const char *id, pe_resource_t *rsc, pe_ticket_t *ticket,
rsc->rsc_tickets = g_list_append(rsc->rsc_tickets, new_rsc_ticket);
- data_set->ticket_constraints = g_list_append(data_set->ticket_constraints,
- new_rsc_ticket);
+ rsc->cluster->ticket_constraints = g_list_append(
+ rsc->cluster->ticket_constraints, new_rsc_ticket);
if (!(new_rsc_ticket->ticket->granted) || new_rsc_ticket->ticket->standby) {
- constraints_for_ticket(rsc, new_rsc_ticket, data_set);
+ constraints_for_ticket(rsc, new_rsc_ticket);
}
}
// \return Standard Pacemaker return code
static int
-unpack_rsc_ticket_set(xmlNode *set, pe_ticket_t *ticket,
- const char *loss_policy, pe_working_set_t *data_set)
+unpack_rsc_ticket_set(xmlNode *set, pcmk_ticket_t *ticket,
+ const char *loss_policy, pcmk_scheduler_t *scheduler)
{
const char *set_id = NULL;
const char *role = NULL;
@@ -259,9 +255,9 @@ unpack_rsc_ticket_set(xmlNode *set, pe_ticket_t *ticket,
for (xmlNode *xml_rsc = first_named_child(set, XML_TAG_RESOURCE_REF);
xml_rsc != NULL; xml_rsc = crm_next_same_xml(xml_rsc)) {
- pe_resource_t *resource = NULL;
+ pcmk_resource_t *resource = NULL;
- resource = pcmk__find_constraint_resource(data_set->resources,
+ resource = pcmk__find_constraint_resource(scheduler->resources,
ID(xml_rsc));
if (resource == NULL) {
pcmk__config_err("%s: No resource found for %s",
@@ -270,21 +266,21 @@ unpack_rsc_ticket_set(xmlNode *set, pe_ticket_t *ticket,
}
pe_rsc_trace(resource, "Resource '%s' depends on ticket '%s'",
resource->id, ticket->id);
- rsc_ticket_new(set_id, resource, ticket, role, loss_policy, data_set);
+ rsc_ticket_new(set_id, resource, ticket, role, loss_policy);
}
return pcmk_rc_ok;
}
static void
-unpack_simple_rsc_ticket(xmlNode *xml_obj, pe_working_set_t *data_set)
+unpack_simple_rsc_ticket(xmlNode *xml_obj, pcmk_scheduler_t *scheduler)
{
const char *id = NULL;
const char *ticket_str = crm_element_value(xml_obj, XML_TICKET_ATTR_TICKET);
const char *loss_policy = crm_element_value(xml_obj,
XML_TICKET_ATTR_LOSS_POLICY);
- pe_ticket_t *ticket = NULL;
+ pcmk_ticket_t *ticket = NULL;
const char *rsc_id = crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE);
const char *state = crm_element_value(xml_obj,
@@ -294,10 +290,10 @@ unpack_simple_rsc_ticket(xmlNode *xml_obj, pe_working_set_t *data_set)
const char *instance = crm_element_value(xml_obj,
XML_COLOC_ATTR_SOURCE_INSTANCE);
- pe_resource_t *rsc = NULL;
+ pcmk_resource_t *rsc = NULL;
if (instance != NULL) {
- pe_warn_once(pe_wo_coloc_inst,
+ pe_warn_once(pcmk__wo_coloc_inst,
"Support for " XML_COLOC_ATTR_SOURCE_INSTANCE " is "
"deprecated and will be removed in a future release.");
}
@@ -307,7 +303,7 @@ unpack_simple_rsc_ticket(xmlNode *xml_obj, pe_working_set_t *data_set)
id = ID(xml_obj);
if (id == NULL) {
pcmk__config_err("Ignoring <%s> constraint without " XML_ATTR_ID,
- crm_element_name(xml_obj));
+ xml_obj->name);
return;
}
@@ -316,7 +312,7 @@ unpack_simple_rsc_ticket(xmlNode *xml_obj, pe_working_set_t *data_set)
id);
return;
} else {
- ticket = g_hash_table_lookup(data_set->tickets, ticket_str);
+ ticket = g_hash_table_lookup(scheduler->tickets, ticket_str);
}
if (ticket == NULL) {
@@ -329,7 +325,7 @@ unpack_simple_rsc_ticket(xmlNode *xml_obj, pe_working_set_t *data_set)
pcmk__config_err("Ignoring constraint '%s' without resource", id);
return;
} else {
- rsc = pcmk__find_constraint_resource(data_set->resources, rsc_id);
+ rsc = pcmk__find_constraint_resource(scheduler->resources, rsc_id);
}
if (rsc == NULL) {
@@ -354,20 +350,20 @@ unpack_simple_rsc_ticket(xmlNode *xml_obj, pe_working_set_t *data_set)
}
}
- rsc_ticket_new(id, rsc, ticket, state, loss_policy, data_set);
+ rsc_ticket_new(id, rsc, ticket, state, loss_policy);
}
// \return Standard Pacemaker return code
static int
unpack_rsc_ticket_tags(xmlNode *xml_obj, xmlNode **expanded_xml,
- pe_working_set_t *data_set)
+ pcmk_scheduler_t *scheduler)
{
const char *id = NULL;
const char *rsc_id = NULL;
const char *state = NULL;
- pe_resource_t *rsc = NULL;
- pe_tag_t *tag = NULL;
+ pcmk_resource_t *rsc = NULL;
+ pcmk_tag_t *tag = NULL;
xmlNode *rsc_set = NULL;
@@ -378,12 +374,12 @@ unpack_rsc_ticket_tags(xmlNode *xml_obj, xmlNode **expanded_xml,
id = ID(xml_obj);
if (id == NULL) {
pcmk__config_err("Ignoring <%s> constraint without " XML_ATTR_ID,
- crm_element_name(xml_obj));
+ xml_obj->name);
return pcmk_rc_unpack_error;
}
// Check whether there are any resource sets with template or tag references
- *expanded_xml = pcmk__expand_tags_in_sets(xml_obj, data_set);
+ *expanded_xml = pcmk__expand_tags_in_sets(xml_obj, scheduler);
if (*expanded_xml != NULL) {
crm_log_xml_trace(*expanded_xml, "Expanded rsc_ticket");
return pcmk_rc_ok;
@@ -394,7 +390,7 @@ unpack_rsc_ticket_tags(xmlNode *xml_obj, xmlNode **expanded_xml,
return pcmk_rc_ok;
}
- if (!pcmk__valid_resource_or_tag(data_set, rsc_id, &rsc, &tag)) {
+ if (!pcmk__valid_resource_or_tag(scheduler, rsc_id, &rsc, &tag)) {
pcmk__config_err("Ignoring constraint '%s' because '%s' is not a "
"valid resource or tag", id, rsc_id);
return pcmk_rc_unpack_error;
@@ -408,9 +404,9 @@ unpack_rsc_ticket_tags(xmlNode *xml_obj, xmlNode **expanded_xml,
*expanded_xml = copy_xml(xml_obj);
- // Convert template/tag reference in "rsc" into resource_set under rsc_ticket
+ // Convert any template or tag reference in "rsc" into ticket resource_set
if (!pcmk__tag_to_set(*expanded_xml, &rsc_set, XML_COLOC_ATTR_SOURCE,
- false, data_set)) {
+ false, scheduler)) {
free_xml(*expanded_xml);
*expanded_xml = NULL;
return pcmk_rc_unpack_error;
@@ -432,16 +428,15 @@ unpack_rsc_ticket_tags(xmlNode *xml_obj, xmlNode **expanded_xml,
}
void
-pcmk__unpack_rsc_ticket(xmlNode *xml_obj, pe_working_set_t *data_set)
+pcmk__unpack_rsc_ticket(xmlNode *xml_obj, pcmk_scheduler_t *scheduler)
{
xmlNode *set = NULL;
bool any_sets = false;
const char *id = NULL;
- const char *ticket_str = crm_element_value(xml_obj, XML_TICKET_ATTR_TICKET);
- const char *loss_policy = crm_element_value(xml_obj, XML_TICKET_ATTR_LOSS_POLICY);
+ const char *ticket_str = NULL;
- pe_ticket_t *ticket = NULL;
+ pcmk_ticket_t *ticket = NULL;
xmlNode *orig_xml = NULL;
xmlNode *expanded_xml = NULL;
@@ -451,30 +446,31 @@ pcmk__unpack_rsc_ticket(xmlNode *xml_obj, pe_working_set_t *data_set)
id = ID(xml_obj);
if (id == NULL) {
pcmk__config_err("Ignoring <%s> constraint without " XML_ATTR_ID,
- crm_element_name(xml_obj));
+ xml_obj->name);
return;
}
- if (data_set->tickets == NULL) {
- data_set->tickets = pcmk__strkey_table(free, destroy_ticket);
+ if (scheduler->tickets == NULL) {
+ scheduler->tickets = pcmk__strkey_table(free, destroy_ticket);
}
+ ticket_str = crm_element_value(xml_obj, XML_TICKET_ATTR_TICKET);
if (ticket_str == NULL) {
pcmk__config_err("Ignoring constraint '%s' without ticket", id);
return;
} else {
- ticket = g_hash_table_lookup(data_set->tickets, ticket_str);
+ ticket = g_hash_table_lookup(scheduler->tickets, ticket_str);
}
if (ticket == NULL) {
- ticket = ticket_new(ticket_str, data_set);
+ ticket = ticket_new(ticket_str, scheduler);
if (ticket == NULL) {
return;
}
}
if (unpack_rsc_ticket_tags(xml_obj, &expanded_xml,
- data_set) != pcmk_rc_ok) {
+ scheduler) != pcmk_rc_ok) {
return;
}
if (expanded_xml != NULL) {
@@ -485,11 +481,15 @@ pcmk__unpack_rsc_ticket(xmlNode *xml_obj, pe_working_set_t *data_set)
for (set = first_named_child(xml_obj, XML_CONS_TAG_RSC_SET); set != NULL;
set = crm_next_same_xml(set)) {
+ const char *loss_policy = NULL;
+
any_sets = true;
- set = expand_idref(set, data_set->input);
+ set = expand_idref(set, scheduler->input);
+ loss_policy = crm_element_value(xml_obj, XML_TICKET_ATTR_LOSS_POLICY);
+
if ((set == NULL) // Configuration error, message already logged
|| (unpack_rsc_ticket_set(set, ticket, loss_policy,
- data_set) != pcmk_rc_ok)) {
+ scheduler) != pcmk_rc_ok)) {
if (expanded_xml != NULL) {
free_xml(expanded_xml);
}
@@ -503,7 +503,7 @@ pcmk__unpack_rsc_ticket(xmlNode *xml_obj, pe_working_set_t *data_set)
}
if (!any_sets) {
- unpack_simple_rsc_ticket(xml_obj, data_set);
+ unpack_simple_rsc_ticket(xml_obj, scheduler);
}
}
@@ -517,12 +517,12 @@ pcmk__unpack_rsc_ticket(xmlNode *xml_obj, pe_working_set_t *data_set)
* \param[in,out] rsc Resource to check
*/
void
-pcmk__require_promotion_tickets(pe_resource_t *rsc)
+pcmk__require_promotion_tickets(pcmk_resource_t *rsc)
{
for (GList *item = rsc->rsc_tickets; item != NULL; item = item->next) {
rsc_ticket_t *rsc_ticket = (rsc_ticket_t *) item->data;
- if ((rsc_ticket->role == RSC_ROLE_PROMOTED)
+ if ((rsc_ticket->role == pcmk_role_promoted)
&& (!rsc_ticket->ticket->granted || rsc_ticket->ticket->standby)) {
resource_location(rsc, NULL, -INFINITY,
"__stateful_without_ticket__", rsc->cluster);
diff --git a/lib/pacemaker/pcmk_sched_utilization.c b/lib/pacemaker/pcmk_sched_utilization.c
index 0a4bec3..962a94c 100644
--- a/lib/pacemaker/pcmk_sched_utilization.c
+++ b/lib/pacemaker/pcmk_sched_utilization.c
@@ -13,9 +13,6 @@
#include "libpacemaker_private.h"
-// Name for a pseudo-op to use in ordering constraints for utilization
-#define LOAD_STOPPED "load_stopped"
-
/*!
* \internal
* \brief Get integer utilization from a string
@@ -46,8 +43,8 @@ utilization_value(const char *s)
*/
struct compare_data {
- const pe_node_t *node1;
- const pe_node_t *node2;
+ const pcmk_node_t *node1;
+ const pcmk_node_t *node2;
bool node2_only;
int result;
};
@@ -56,8 +53,8 @@ struct compare_data {
* \internal
* \brief Compare a single utilization attribute for two nodes
*
- * Compare one utilization attribute for two nodes, incrementing the result if
- * the first node has greater capacity, and decrementing it if the second node
+ * Compare one utilization attribute for two nodes, decrementing the result if
+ * the first node has greater capacity, and incrementing it if the second node
* has greater capacity.
*
* \param[in] key Utilization attribute name to compare
@@ -102,7 +99,8 @@ compare_utilization_value(gpointer key, gpointer value, gpointer user_data)
* if node2 has more free capacity
*/
int
-pcmk__compare_node_capacities(const pe_node_t *node1, const pe_node_t *node2)
+pcmk__compare_node_capacities(const pcmk_node_t *node1,
+ const pcmk_node_t *node2)
{
struct compare_data data = {
.node1 = node1,
@@ -167,7 +165,7 @@ update_utilization_value(gpointer key, gpointer value, gpointer user_data)
*/
void
pcmk__consume_node_capacity(GHashTable *current_utilization,
- const pe_resource_t *rsc)
+ const pcmk_resource_t *rsc)
{
struct calculate_data data = {
.current_utilization = current_utilization,
@@ -186,7 +184,7 @@ pcmk__consume_node_capacity(GHashTable *current_utilization,
*/
void
pcmk__release_node_capacity(GHashTable *current_utilization,
- const pe_resource_t *rsc)
+ const pcmk_resource_t *rsc)
{
struct calculate_data data = {
.current_utilization = current_utilization,
@@ -202,7 +200,7 @@ pcmk__release_node_capacity(GHashTable *current_utilization,
*/
struct capacity_data {
- const pe_node_t *node;
+ const pcmk_node_t *node;
const char *rsc_id;
bool is_enough;
};
@@ -248,7 +246,7 @@ check_capacity(gpointer key, gpointer value, gpointer user_data)
* \return true if node has sufficient capacity for resource, otherwise false
*/
static bool
-have_enough_capacity(const pe_node_t *node, const char *rsc_id,
+have_enough_capacity(const pcmk_node_t *node, const char *rsc_id,
GHashTable *utilization)
{
struct capacity_data data = {
@@ -265,7 +263,7 @@ have_enough_capacity(const pe_node_t *node, const char *rsc_id,
* \internal
* \brief Sum the utilization requirements of a list of resources
*
- * \param[in] orig_rsc Resource being allocated (for logging purposes)
+ * \param[in] orig_rsc Resource being assigned (for logging purposes)
* \param[in] rscs Resources whose utilization should be summed
*
* \return Newly allocated hash table with sum of all utilization values
@@ -273,12 +271,12 @@ have_enough_capacity(const pe_node_t *node, const char *rsc_id,
* g_hash_table_destroy().
*/
static GHashTable *
-sum_resource_utilization(const pe_resource_t *orig_rsc, GList *rscs)
+sum_resource_utilization(const pcmk_resource_t *orig_rsc, GList *rscs)
{
GHashTable *utilization = pcmk__strkey_table(free, free);
for (GList *iter = rscs; iter != NULL; iter = iter->next) {
- pe_resource_t *rsc = (pe_resource_t *) iter->data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
rsc->cmds->add_utilization(rsc, orig_rsc, rscs, utilization);
}
@@ -294,15 +292,15 @@ sum_resource_utilization(const pe_resource_t *orig_rsc, GList *rscs)
* \return Allowed node for \p rsc with most spare capacity, if there are no
* nodes with enough capacity for \p rsc and all its colocated resources
*/
-const pe_node_t *
-pcmk__ban_insufficient_capacity(pe_resource_t *rsc)
+const pcmk_node_t *
+pcmk__ban_insufficient_capacity(pcmk_resource_t *rsc)
{
bool any_capable = false;
char *rscs_id = NULL;
- pe_node_t *node = NULL;
- const pe_node_t *most_capable_node = NULL;
+ pcmk_node_t *node = NULL;
+ const pcmk_node_t *most_capable_node = NULL;
GList *colocated_rscs = NULL;
- GHashTable *unallocated_utilization = NULL;
+ GHashTable *unassigned_utilization = NULL;
GHashTableIter iter;
CRM_CHECK(rsc != NULL, return NULL);
@@ -326,8 +324,8 @@ pcmk__ban_insufficient_capacity(pe_resource_t *rsc)
colocated_rscs = g_list_append(colocated_rscs, rsc);
}
- // Sum utilization of colocated resources that haven't been allocated yet
- unallocated_utilization = sum_resource_utilization(rsc, colocated_rscs);
+ // Sum utilization of colocated resources that haven't been assigned yet
+ unassigned_utilization = sum_resource_utilization(rsc, colocated_rscs);
// Check whether any node has enough capacity for all the resources
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
@@ -336,7 +334,7 @@ pcmk__ban_insufficient_capacity(pe_resource_t *rsc)
continue;
}
- if (have_enough_capacity(node, rscs_id, unallocated_utilization)) {
+ if (have_enough_capacity(node, rscs_id, unassigned_utilization)) {
any_capable = true;
}
@@ -353,7 +351,7 @@ pcmk__ban_insufficient_capacity(pe_resource_t *rsc)
while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
if (pcmk__node_available(node, true, false)
&& !have_enough_capacity(node, rscs_id,
- unallocated_utilization)) {
+ unassigned_utilization)) {
pe_rsc_debug(rsc, "%s does not have enough capacity for %s",
pe__node_name(node), rscs_id);
resource_location(rsc, node, -INFINITY, "__limit_utilization__",
@@ -376,12 +374,12 @@ pcmk__ban_insufficient_capacity(pe_resource_t *rsc)
}
}
- g_hash_table_destroy(unallocated_utilization);
+ g_hash_table_destroy(unassigned_utilization);
g_list_free(colocated_rscs);
free(rscs_id);
- pe__show_node_weights(true, rsc, "Post-utilization",
- rsc->allowed_nodes, rsc->cluster);
+ pe__show_node_scores(true, rsc, "Post-utilization", rsc->allowed_nodes,
+ rsc->cluster);
return most_capable_node;
}
@@ -389,21 +387,21 @@ pcmk__ban_insufficient_capacity(pe_resource_t *rsc)
* \internal
* \brief Create a new load_stopped pseudo-op for a node
*
- * \param[in] node Node to create op for
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] node Node to create op for
*
* \return Newly created load_stopped op
*/
-static pe_action_t *
-new_load_stopped_op(const pe_node_t *node, pe_working_set_t *data_set)
+static pcmk_action_t *
+new_load_stopped_op(pcmk_node_t *node)
{
- char *load_stopped_task = crm_strdup_printf(LOAD_STOPPED "_%s",
+ char *load_stopped_task = crm_strdup_printf(PCMK_ACTION_LOAD_STOPPED "_%s",
node->details->uname);
- pe_action_t *load_stopped = get_pseudo_op(load_stopped_task, data_set);
+ pcmk_action_t *load_stopped = get_pseudo_op(load_stopped_task,
+ node->details->data_set);
if (load_stopped->node == NULL) {
load_stopped->node = pe__copy_node(node);
- pe__clear_action_flags(load_stopped, pe_action_optional);
+ pe__clear_action_flags(load_stopped, pcmk_action_optional);
}
free(load_stopped_task);
return load_stopped;
@@ -417,33 +415,32 @@ new_load_stopped_op(const pe_node_t *node, pe_working_set_t *data_set)
* \param[in] allowed_nodes List of allowed next nodes for \p rsc
*/
void
-pcmk__create_utilization_constraints(pe_resource_t *rsc,
+pcmk__create_utilization_constraints(pcmk_resource_t *rsc,
const GList *allowed_nodes)
{
const GList *iter = NULL;
- const pe_node_t *node = NULL;
- pe_action_t *load_stopped = NULL;
+ pcmk_action_t *load_stopped = NULL;
pe_rsc_trace(rsc, "Creating utilization constraints for %s - strategy: %s",
rsc->id, rsc->cluster->placement_strategy);
// "stop rsc then load_stopped" constraints for current nodes
for (iter = rsc->running_on; iter != NULL; iter = iter->next) {
- node = (const pe_node_t *) iter->data;
- load_stopped = new_load_stopped_op(node, rsc->cluster);
+ load_stopped = new_load_stopped_op(iter->data);
pcmk__new_ordering(rsc, stop_key(rsc), NULL, NULL, NULL, load_stopped,
- pe_order_load, rsc->cluster);
+ pcmk__ar_if_on_same_node_or_target, rsc->cluster);
}
// "load_stopped then start/migrate_to rsc" constraints for allowed nodes
for (iter = allowed_nodes; iter; iter = iter->next) {
- node = (const pe_node_t *) iter->data;
- load_stopped = new_load_stopped_op(node, rsc->cluster);
+ load_stopped = new_load_stopped_op(iter->data);
pcmk__new_ordering(NULL, NULL, load_stopped, rsc, start_key(rsc), NULL,
- pe_order_load, rsc->cluster);
+ pcmk__ar_if_on_same_node_or_target, rsc->cluster);
pcmk__new_ordering(NULL, NULL, load_stopped,
- rsc, pcmk__op_key(rsc->id, RSC_MIGRATE, 0), NULL,
- pe_order_load, rsc->cluster);
+ rsc,
+ pcmk__op_key(rsc->id, PCMK_ACTION_MIGRATE_TO, 0),
+ NULL,
+ pcmk__ar_if_on_same_node_or_target, rsc->cluster);
}
}
@@ -451,18 +448,19 @@ pcmk__create_utilization_constraints(pe_resource_t *rsc,
* \internal
* \brief Output node capacities if enabled
*
- * \param[in] desc Prefix for output
- * \param[in,out] data_set Cluster working set
+ * \param[in] desc Prefix for output
+ * \param[in,out] scheduler Scheduler data
*/
void
-pcmk__show_node_capacities(const char *desc, pe_working_set_t *data_set)
+pcmk__show_node_capacities(const char *desc, pcmk_scheduler_t *scheduler)
{
- if (!pcmk_is_set(data_set->flags, pe_flag_show_utilization)) {
+ if (!pcmk_is_set(scheduler->flags, pcmk_sched_show_utilization)) {
return;
}
- for (const GList *iter = data_set->nodes; iter != NULL; iter = iter->next) {
- const pe_node_t *node = (const pe_node_t *) iter->data;
- pcmk__output_t *out = data_set->priv;
+ for (const GList *iter = scheduler->nodes;
+ iter != NULL; iter = iter->next) {
+ const pcmk_node_t *node = (const pcmk_node_t *) iter->data;
+ pcmk__output_t *out = scheduler->priv;
out->message(out, "node-capacity", node, desc);
}
diff --git a/lib/pacemaker/pcmk_scheduler.c b/lib/pacemaker/pcmk_scheduler.c
index b4e670d..31b2c36 100644
--- a/lib/pacemaker/pcmk_scheduler.c
+++ b/lib/pacemaker/pcmk_scheduler.c
@@ -14,6 +14,7 @@
#include <crm/msg_xml.h>
#include <crm/common/xml.h>
#include <crm/common/xml_internal.h>
+#include <crm/common/scheduler_internal.h>
#include <glib.h>
@@ -25,7 +26,7 @@ CRM_TRACE_INIT_DATA(pacemaker);
/*!
* \internal
- * \brief Do deferred action checks after allocation
+ * \brief Do deferred action checks after assignment
*
* When unpacking the resource history, the scheduler checks for resource
* configurations that have changed since an action was run. However, at that
@@ -39,30 +40,31 @@ CRM_TRACE_INIT_DATA(pacemaker);
* \param[in] check Type of deferred check to do
*/
static void
-check_params(pe_resource_t *rsc, pe_node_t *node, const xmlNode *rsc_op,
- enum pe_check_parameters check)
+check_params(pcmk_resource_t *rsc, pcmk_node_t *node, const xmlNode *rsc_op,
+ enum pcmk__check_parameters check)
{
const char *reason = NULL;
op_digest_cache_t *digest_data = NULL;
switch (check) {
- case pe_check_active:
+ case pcmk__check_active:
if (pcmk__check_action_config(rsc, node, rsc_op)
- && pe_get_failcount(node, rsc, NULL, pe_fc_effective, NULL)) {
+ && pe_get_failcount(node, rsc, NULL, pcmk__fc_effective,
+ NULL)) {
reason = "action definition changed";
}
break;
- case pe_check_last_failure:
+ case pcmk__check_last_failure:
digest_data = rsc_action_digest_cmp(rsc, rsc_op, node,
rsc->cluster);
switch (digest_data->rc) {
- case RSC_DIGEST_UNKNOWN:
+ case pcmk__digest_unknown:
crm_trace("Resource %s history entry %s on %s has "
"no digest to compare",
rsc->id, ID(rsc_op), node->details->id);
break;
- case RSC_DIGEST_MATCH:
+ case pcmk__digest_match:
break;
default:
reason = "resource parameters have changed";
@@ -86,9 +88,11 @@ check_params(pe_resource_t *rsc, pe_node_t *node, const xmlNode *rsc_op,
* otherwise false
*/
static bool
-failcount_clear_action_exists(const pe_node_t *node, const pe_resource_t *rsc)
+failcount_clear_action_exists(const pcmk_node_t *node,
+ const pcmk_resource_t *rsc)
{
- GList *list = pe__resource_actions(rsc, node, CRM_OP_CLEAR_FAILCOUNT, TRUE);
+ GList *list = pe__resource_actions(rsc, node, PCMK_ACTION_CLEAR_FAILCOUNT,
+ TRUE);
if (list != NULL) {
g_list_free(list);
@@ -101,19 +105,22 @@ failcount_clear_action_exists(const pe_node_t *node, const pe_resource_t *rsc)
* \internal
* \brief Ban a resource from a node if it reached its failure threshold there
*
- * \param[in,out] rsc Resource to check failure threshold for
- * \param[in] node Node to check \p rsc on
+ * \param[in,out] data Resource to check failure threshold for
+ * \param[in] user_data Node to check resource on
*/
static void
-check_failure_threshold(pe_resource_t *rsc, const pe_node_t *node)
+check_failure_threshold(gpointer data, gpointer user_data)
{
+ pcmk_resource_t *rsc = data;
+ const pcmk_node_t *node = user_data;
+
// If this is a collective resource, apply recursively to children instead
if (rsc->children != NULL) {
- g_list_foreach(rsc->children, (GFunc) check_failure_threshold,
- (gpointer) node);
+ g_list_foreach(rsc->children, check_failure_threshold, user_data);
return;
+ }
- } else if (failcount_clear_action_exists(node, rsc)) {
+ if (!failcount_clear_action_exists(node, rsc)) {
/* Don't force the resource away from this node due to a failcount
* that's going to be cleared.
*
@@ -124,10 +131,7 @@ check_failure_threshold(pe_resource_t *rsc, const pe_node_t *node)
* threshold when we shouldn't. Worst case, we stop or move the
* resource, then move it back in the next transition.
*/
- return;
-
- } else {
- pe_resource_t *failed = NULL;
+ pcmk_resource_t *failed = NULL;
if (pcmk__threshold_reached(rsc, node, &failed)) {
resource_location(failed, node, -INFINITY, "__fail_limit__",
@@ -145,23 +149,25 @@ check_failure_threshold(pe_resource_t *rsc, const pe_node_t *node)
* exclusive, probes will only be done on nodes listed in exclusive constraints.
* This function bans the resource from the node if the node is not listed.
*
- * \param[in,out] rsc Resource to check
- * \param[in] node Node to check \p rsc on
+ * \param[in,out] data Resource to check
+ * \param[in] user_data Node to check resource on
*/
static void
-apply_exclusive_discovery(pe_resource_t *rsc, const pe_node_t *node)
+apply_exclusive_discovery(gpointer data, gpointer user_data)
{
+ pcmk_resource_t *rsc = data;
+ const pcmk_node_t *node = user_data;
+
if (rsc->exclusive_discover
|| pe__const_top_resource(rsc, false)->exclusive_discover) {
- pe_node_t *match = NULL;
+ pcmk_node_t *match = NULL;
// If this is a collective resource, apply recursively to children
- g_list_foreach(rsc->children, (GFunc) apply_exclusive_discovery,
- (gpointer) node);
+ g_list_foreach(rsc->children, apply_exclusive_discovery, user_data);
match = g_hash_table_lookup(rsc->allowed_nodes, node->details->id);
if ((match != NULL)
- && (match->rsc_discover_mode != pe_discover_exclusive)) {
+ && (match->rsc_discover_mode != pcmk_probe_exclusive)) {
match->weight = -INFINITY;
}
}
@@ -171,24 +177,25 @@ apply_exclusive_discovery(pe_resource_t *rsc, const pe_node_t *node)
* \internal
* \brief Apply stickiness to a resource if appropriate
*
- * \param[in,out] rsc Resource to check for stickiness
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] data Resource to check for stickiness
+ * \param[in] user_data Ignored
*/
static void
-apply_stickiness(pe_resource_t *rsc, pe_working_set_t *data_set)
+apply_stickiness(gpointer data, gpointer user_data)
{
- pe_node_t *node = NULL;
+ pcmk_resource_t *rsc = data;
+ pcmk_node_t *node = NULL;
// If this is a collective resource, apply recursively to children instead
if (rsc->children != NULL) {
- g_list_foreach(rsc->children, (GFunc) apply_stickiness, data_set);
+ g_list_foreach(rsc->children, apply_stickiness, NULL);
return;
}
/* A resource is sticky if it is managed, has stickiness configured, and is
* active on a single node.
*/
- if (!pcmk_is_set(rsc->flags, pe_rsc_managed)
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)
|| (rsc->stickiness < 1) || !pcmk__list_of_1(rsc->running_on)) {
return;
}
@@ -200,9 +207,9 @@ apply_stickiness(pe_resource_t *rsc, pe_working_set_t *data_set)
* allowed on the node, so we don't keep the resource somewhere it is no
* longer explicitly enabled.
*/
- if (!pcmk_is_set(rsc->cluster->flags, pe_flag_symmetric_cluster)
- && (pe_hash_table_lookup(rsc->allowed_nodes,
- node->details->id) == NULL)) {
+ if (!pcmk_is_set(rsc->cluster->flags, pcmk_sched_symmetric_cluster)
+ && (g_hash_table_lookup(rsc->allowed_nodes,
+ node->details->id) == NULL)) {
pe_rsc_debug(rsc,
"Ignoring %s stickiness because the cluster is "
"asymmetric and %s is not explicitly allowed",
@@ -212,23 +219,23 @@ apply_stickiness(pe_resource_t *rsc, pe_working_set_t *data_set)
pe_rsc_debug(rsc, "Resource %s has %d stickiness on %s",
rsc->id, rsc->stickiness, pe__node_name(node));
- resource_location(rsc, node, rsc->stickiness, "stickiness", data_set);
+ resource_location(rsc, node, rsc->stickiness, "stickiness", rsc->cluster);
}
/*!
* \internal
* \brief Apply shutdown locks for all resources as appropriate
*
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*/
static void
-apply_shutdown_locks(pe_working_set_t *data_set)
+apply_shutdown_locks(pcmk_scheduler_t *scheduler)
{
- if (!pcmk_is_set(data_set->flags, pe_flag_shutdown_lock)) {
+ if (!pcmk_is_set(scheduler->flags, pcmk_sched_shutdown_lock)) {
return;
}
- for (GList *iter = data_set->resources; iter != NULL; iter = iter->next) {
- pe_resource_t *rsc = (pe_resource_t *) iter->data;
+ for (GList *iter = scheduler->resources; iter != NULL; iter = iter->next) {
+ pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
rsc->cmds->shutdown_lock(rsc);
}
@@ -238,25 +245,25 @@ apply_shutdown_locks(pe_working_set_t *data_set)
* \internal
* \brief Calculate the number of available nodes in the cluster
*
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*/
static void
-count_available_nodes(pe_working_set_t *data_set)
+count_available_nodes(pcmk_scheduler_t *scheduler)
{
- if (pcmk_is_set(data_set->flags, pe_flag_no_compat)) {
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_no_compat)) {
return;
}
// @COMPAT for API backward compatibility only (cluster does not use value)
- for (GList *iter = data_set->nodes; iter != NULL; iter = iter->next) {
- pe_node_t *node = (pe_node_t *) iter->data;
+ for (GList *iter = scheduler->nodes; iter != NULL; iter = iter->next) {
+ pcmk_node_t *node = (pcmk_node_t *) iter->data;
if ((node != NULL) && (node->weight >= 0) && node->details->online
&& (node->details->type != node_ping)) {
- data_set->max_valid_nodes++;
+ scheduler->max_valid_nodes++;
}
}
- crm_trace("Online node count: %d", data_set->max_valid_nodes);
+ crm_trace("Online node count: %d", scheduler->max_valid_nodes);
}
/*
@@ -268,112 +275,113 @@ count_available_nodes(pe_working_set_t *data_set)
* migration thresholds, and exclusive resource discovery.
*/
static void
-apply_node_criteria(pe_working_set_t *data_set)
+apply_node_criteria(pcmk_scheduler_t *scheduler)
{
crm_trace("Applying node-specific scheduling criteria");
- apply_shutdown_locks(data_set);
- count_available_nodes(data_set);
- pcmk__apply_locations(data_set);
- g_list_foreach(data_set->resources, (GFunc) apply_stickiness, data_set);
+ apply_shutdown_locks(scheduler);
+ count_available_nodes(scheduler);
+ pcmk__apply_locations(scheduler);
+ g_list_foreach(scheduler->resources, apply_stickiness, NULL);
- for (GList *node_iter = data_set->nodes; node_iter != NULL;
+ for (GList *node_iter = scheduler->nodes; node_iter != NULL;
node_iter = node_iter->next) {
- for (GList *rsc_iter = data_set->resources; rsc_iter != NULL;
+ for (GList *rsc_iter = scheduler->resources; rsc_iter != NULL;
rsc_iter = rsc_iter->next) {
- pe_node_t *node = (pe_node_t *) node_iter->data;
- pe_resource_t *rsc = (pe_resource_t *) rsc_iter->data;
-
- check_failure_threshold(rsc, node);
- apply_exclusive_discovery(rsc, node);
+ check_failure_threshold(rsc_iter->data, node_iter->data);
+ apply_exclusive_discovery(rsc_iter->data, node_iter->data);
}
}
}
/*!
* \internal
- * \brief Allocate resources to nodes
+ * \brief Assign resources to nodes
*
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*/
static void
-allocate_resources(pe_working_set_t *data_set)
+assign_resources(pcmk_scheduler_t *scheduler)
{
GList *iter = NULL;
- crm_trace("Allocating resources to nodes");
+ crm_trace("Assigning resources to nodes");
- if (!pcmk__str_eq(data_set->placement_strategy, "default", pcmk__str_casei)) {
- pcmk__sort_resources(data_set);
+ if (!pcmk__str_eq(scheduler->placement_strategy, "default",
+ pcmk__str_casei)) {
+ pcmk__sort_resources(scheduler);
}
- pcmk__show_node_capacities("Original", data_set);
+ pcmk__show_node_capacities("Original", scheduler);
- if (pcmk_is_set(data_set->flags, pe_flag_have_remote_nodes)) {
- /* Allocate remote connection resources first (which will also allocate
- * any colocation dependencies). If the connection is migrating, always
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_have_remote_nodes)) {
+ /* Assign remote connection resources first (which will also assign any
+ * colocation dependencies). If the connection is migrating, always
* prefer the partial migration target.
*/
- for (iter = data_set->resources; iter != NULL; iter = iter->next) {
- pe_resource_t *rsc = (pe_resource_t *) iter->data;
+ for (iter = scheduler->resources; iter != NULL; iter = iter->next) {
+ pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
if (rsc->is_remote_node) {
- pe_rsc_trace(rsc, "Allocating remote connection resource '%s'",
+ pe_rsc_trace(rsc, "Assigning remote connection resource '%s'",
rsc->id);
- rsc->cmds->assign(rsc, rsc->partial_migration_target);
+ rsc->cmds->assign(rsc, rsc->partial_migration_target, true);
}
}
}
/* now do the rest of the resources */
- for (iter = data_set->resources; iter != NULL; iter = iter->next) {
- pe_resource_t *rsc = (pe_resource_t *) iter->data;
+ for (iter = scheduler->resources; iter != NULL; iter = iter->next) {
+ pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
if (!rsc->is_remote_node) {
- pe_rsc_trace(rsc, "Allocating %s resource '%s'",
- crm_element_name(rsc->xml), rsc->id);
- rsc->cmds->assign(rsc, NULL);
+ pe_rsc_trace(rsc, "Assigning %s resource '%s'",
+ rsc->xml->name, rsc->id);
+ rsc->cmds->assign(rsc, NULL, true);
}
}
- pcmk__show_node_capacities("Remaining", data_set);
+ pcmk__show_node_capacities("Remaining", scheduler);
}
/*!
* \internal
* \brief Schedule fail count clearing on online nodes if resource is orphaned
*
- * \param[in,out] rsc Resource to check
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] data Resource to check
+ * \param[in] user_data Ignored
*/
static void
-clear_failcounts_if_orphaned(pe_resource_t *rsc, pe_working_set_t *data_set)
+clear_failcounts_if_orphaned(gpointer data, gpointer user_data)
{
- if (!pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
+ pcmk_resource_t *rsc = data;
+
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_removed)) {
return;
}
crm_trace("Clear fail counts for orphaned resource %s", rsc->id);
/* There's no need to recurse into rsc->children because those
- * should just be unallocated clone instances.
+ * should just be unassigned clone instances.
*/
- for (GList *iter = data_set->nodes; iter != NULL; iter = iter->next) {
- pe_node_t *node = (pe_node_t *) iter->data;
- pe_action_t *clear_op = NULL;
+ for (GList *iter = rsc->cluster->nodes; iter != NULL; iter = iter->next) {
+ pcmk_node_t *node = (pcmk_node_t *) iter->data;
+ pcmk_action_t *clear_op = NULL;
if (!node->details->online) {
continue;
}
- if (pe_get_failcount(node, rsc, NULL, pe_fc_effective, NULL) == 0) {
+ if (pe_get_failcount(node, rsc, NULL, pcmk__fc_effective, NULL) == 0) {
continue;
}
- clear_op = pe__clear_failcount(rsc, node, "it is orphaned", data_set);
+ clear_op = pe__clear_failcount(rsc, node, "it is orphaned",
+ rsc->cluster);
/* We can't use order_action_then_stop() here because its
- * pe_order_preserve breaks things
+ * pcmk__ar_guest_allowed breaks things
*/
pcmk__new_ordering(clear_op->rsc, NULL, clear_op, rsc, stop_key(rsc),
- NULL, pe_order_optional, data_set);
+ NULL, pcmk__ar_ordered, rsc->cluster);
}
}
@@ -381,28 +389,28 @@ clear_failcounts_if_orphaned(pe_resource_t *rsc, pe_working_set_t *data_set)
* \internal
* \brief Schedule any resource actions needed
*
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*/
static void
-schedule_resource_actions(pe_working_set_t *data_set)
+schedule_resource_actions(pcmk_scheduler_t *scheduler)
{
// Process deferred action checks
- pe__foreach_param_check(data_set, check_params);
- pe__free_param_checks(data_set);
+ pe__foreach_param_check(scheduler, check_params);
+ pe__free_param_checks(scheduler);
- if (pcmk_is_set(data_set->flags, pe_flag_startup_probes)) {
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_probe_resources)) {
crm_trace("Scheduling probes");
- pcmk__schedule_probes(data_set);
+ pcmk__schedule_probes(scheduler);
}
- if (pcmk_is_set(data_set->flags, pe_flag_stop_rsc_orphans)) {
- g_list_foreach(data_set->resources,
- (GFunc) clear_failcounts_if_orphaned, data_set);
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_stop_removed_resources)) {
+ g_list_foreach(scheduler->resources, clear_failcounts_if_orphaned,
+ NULL);
}
crm_trace("Scheduling resource actions");
- for (GList *iter = data_set->resources; iter != NULL; iter = iter->next) {
- pe_resource_t *rsc = (pe_resource_t *) iter->data;
+ for (GList *iter = scheduler->resources; iter != NULL; iter = iter->next) {
+ pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
rsc->cmds->create_actions(rsc);
}
@@ -417,13 +425,13 @@ schedule_resource_actions(pe_working_set_t *data_set)
* \return true if resource or any descendant is managed, otherwise false
*/
static bool
-is_managed(const pe_resource_t *rsc)
+is_managed(const pcmk_resource_t *rsc)
{
- if (pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
return true;
}
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
- if (is_managed((pe_resource_t *) iter->data)) {
+ if (is_managed((pcmk_resource_t *) iter->data)) {
return true;
}
}
@@ -434,16 +442,16 @@ is_managed(const pe_resource_t *rsc)
* \internal
* \brief Check whether any resources in the cluster are managed
*
- * \param[in] data_set Cluster working set
+ * \param[in] scheduler Scheduler data
*
* \return true if any resource is managed, otherwise false
*/
static bool
-any_managed_resources(const pe_working_set_t *data_set)
+any_managed_resources(const pcmk_scheduler_t *scheduler)
{
- for (const GList *iter = data_set->resources;
+ for (const GList *iter = scheduler->resources;
iter != NULL; iter = iter->next) {
- if (is_managed((const pe_resource_t *) iter->data)) {
+ if (is_managed((const pcmk_resource_t *) iter->data)) {
return true;
}
}
@@ -456,16 +464,14 @@ any_managed_resources(const pe_working_set_t *data_set)
*
* \param[in] node Node to check
* \param[in] have_managed Whether any resource in cluster is managed
- * \param[in] data_set Cluster working set
*
* \return true if \p node should be fenced, otherwise false
*/
static bool
-needs_fencing(const pe_node_t *node, bool have_managed,
- const pe_working_set_t *data_set)
+needs_fencing(const pcmk_node_t *node, bool have_managed)
{
return have_managed && node->details->unclean
- && pe_can_fence(data_set, node);
+ && pe_can_fence(node->details->data_set, node);
}
/*!
@@ -477,7 +483,7 @@ needs_fencing(const pe_node_t *node, bool have_managed,
* \return true if \p node should be shut down, otherwise false
*/
static bool
-needs_shutdown(const pe_node_t *node)
+needs_shutdown(const pcmk_node_t *node)
{
if (pe__is_guest_or_remote_node(node)) {
/* Do not send shutdown actions for Pacemaker Remote nodes.
@@ -492,24 +498,24 @@ needs_shutdown(const pe_node_t *node)
* \internal
* \brief Track and order non-DC fencing
*
- * \param[in,out] list List of existing non-DC fencing actions
- * \param[in,out] action Fencing action to prepend to \p list
- * \param[in] data_set Cluster working set
+ * \param[in,out] list List of existing non-DC fencing actions
+ * \param[in,out] action Fencing action to prepend to \p list
+ * \param[in] scheduler Scheduler data
*
* \return (Possibly new) head of \p list
*/
static GList *
-add_nondc_fencing(GList *list, pe_action_t *action,
- const pe_working_set_t *data_set)
+add_nondc_fencing(GList *list, pcmk_action_t *action,
+ const pcmk_scheduler_t *scheduler)
{
- if (!pcmk_is_set(data_set->flags, pe_flag_concurrent_fencing)
+ if (!pcmk_is_set(scheduler->flags, pcmk_sched_concurrent_fencing)
&& (list != NULL)) {
/* Concurrent fencing is disabled, so order each non-DC
* fencing in a chain. If there is any DC fencing or
* shutdown, it will be ordered after the last action in the
* chain later.
*/
- order_actions((pe_action_t *) list->data, action, pe_order_optional);
+ order_actions((pcmk_action_t *) list->data, action, pcmk__ar_ordered);
}
return g_list_prepend(list, action);
}
@@ -519,16 +525,15 @@ add_nondc_fencing(GList *list, pe_action_t *action,
* \brief Schedule a node for fencing
*
* \param[in,out] node Node that requires fencing
- * \param[in,out] data_set Cluster working set
*/
-static pe_action_t *
-schedule_fencing(pe_node_t *node, pe_working_set_t *data_set)
+static pcmk_action_t *
+schedule_fencing(pcmk_node_t *node)
{
- pe_action_t *fencing = pe_fence_op(node, NULL, FALSE, "node is unclean",
- FALSE, data_set);
+ pcmk_action_t *fencing = pe_fence_op(node, NULL, FALSE, "node is unclean",
+ FALSE, node->details->data_set);
pe_warn("Scheduling node %s for fencing", pe__node_name(node));
- pcmk__order_vs_fence(fencing, data_set);
+ pcmk__order_vs_fence(fencing, node->details->data_set);
return fencing;
}
@@ -536,50 +541,52 @@ schedule_fencing(pe_node_t *node, pe_working_set_t *data_set)
* \internal
* \brief Create and order node fencing and shutdown actions
*
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*/
static void
-schedule_fencing_and_shutdowns(pe_working_set_t *data_set)
+schedule_fencing_and_shutdowns(pcmk_scheduler_t *scheduler)
{
- pe_action_t *dc_down = NULL;
+ pcmk_action_t *dc_down = NULL;
bool integrity_lost = false;
- bool have_managed = any_managed_resources(data_set);
+ bool have_managed = any_managed_resources(scheduler);
GList *fencing_ops = NULL;
GList *shutdown_ops = NULL;
crm_trace("Scheduling fencing and shutdowns as needed");
if (!have_managed) {
- crm_notice("No fencing will be done until there are resources to manage");
+ crm_notice("No fencing will be done until there are resources "
+ "to manage");
}
// Check each node for whether it needs fencing or shutdown
- for (GList *iter = data_set->nodes; iter != NULL; iter = iter->next) {
- pe_node_t *node = (pe_node_t *) iter->data;
- pe_action_t *fencing = NULL;
+ for (GList *iter = scheduler->nodes; iter != NULL; iter = iter->next) {
+ pcmk_node_t *node = (pcmk_node_t *) iter->data;
+ pcmk_action_t *fencing = NULL;
/* Guest nodes are "fenced" by recovering their container resource,
* so handle them separately.
*/
if (pe__is_guest_node(node)) {
if (node->details->remote_requires_reset && have_managed
- && pe_can_fence(data_set, node)) {
+ && pe_can_fence(scheduler, node)) {
pcmk__fence_guest(node);
}
continue;
}
- if (needs_fencing(node, have_managed, data_set)) {
- fencing = schedule_fencing(node, data_set);
+ if (needs_fencing(node, have_managed)) {
+ fencing = schedule_fencing(node);
// Track DC and non-DC fence actions separately
if (node->details->is_dc) {
dc_down = fencing;
} else {
- fencing_ops = add_nondc_fencing(fencing_ops, fencing, data_set);
+ fencing_ops = add_nondc_fencing(fencing_ops, fencing,
+ scheduler);
}
} else if (needs_shutdown(node)) {
- pe_action_t *down_op = pcmk__new_shutdown_action(node);
+ pcmk_action_t *down_op = pcmk__new_shutdown_action(node);
// Track DC and non-DC shutdown actions separately
if (node->details->is_dc) {
@@ -597,12 +604,12 @@ schedule_fencing_and_shutdowns(pe_working_set_t *data_set)
}
if (integrity_lost) {
- if (!pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
+ if (!pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
pe_warn("Resource functionality and data integrity cannot be "
"guaranteed (configure, enable, and test fencing to "
"correct this)");
- } else if (!pcmk_is_set(data_set->flags, pe_flag_have_quorum)) {
+ } else if (!pcmk_is_set(scheduler->flags, pcmk_sched_quorate)) {
crm_notice("Unclean nodes will not be fenced until quorum is "
"attained or no-quorum-policy is set to ignore");
}
@@ -616,13 +623,14 @@ schedule_fencing_and_shutdowns(pe_working_set_t *data_set)
* clone stop that's also ordered before the shutdowns, thus leading to
* a graph loop.
*/
- if (pcmk__str_eq(dc_down->task, CRM_OP_SHUTDOWN, pcmk__str_none)) {
+ if (pcmk__str_eq(dc_down->task, PCMK_ACTION_DO_SHUTDOWN,
+ pcmk__str_none)) {
pcmk__order_after_each(dc_down, shutdown_ops);
}
// Order any non-DC fencing before any DC fencing or shutdown
- if (pcmk_is_set(data_set->flags, pe_flag_concurrent_fencing)) {
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_concurrent_fencing)) {
/* With concurrent fencing, order each non-DC fencing action
* separately before any DC fencing or shutdown.
*/
@@ -633,8 +641,8 @@ schedule_fencing_and_shutdowns(pe_working_set_t *data_set)
* the DC fencing after the last action in the chain (which is the
* first item in the list).
*/
- order_actions((pe_action_t *) fencing_ops->data, dc_down,
- pe_order_optional);
+ order_actions((pcmk_action_t *) fencing_ops->data, dc_down,
+ pcmk__ar_ordered);
}
}
g_list_free(fencing_ops);
@@ -642,24 +650,23 @@ schedule_fencing_and_shutdowns(pe_working_set_t *data_set)
}
static void
-log_resource_details(pe_working_set_t *data_set)
+log_resource_details(pcmk_scheduler_t *scheduler)
{
- pcmk__output_t *out = data_set->priv;
+ pcmk__output_t *out = scheduler->priv;
GList *all = NULL;
- /* We need a list of nodes that we are allowed to output information for.
- * This is necessary because out->message for all the resource-related
- * messages expects such a list, due to the `crm_mon --node=` feature. Here,
- * we just make it a list of all the nodes.
+ /* Due to the `crm_mon --node=` feature, out->message() for all the
+ * resource-related messages expects a list of nodes that we are allowed to
+ * output information for. Here, we create a wildcard to match all nodes.
*/
all = g_list_prepend(all, (gpointer) "*");
- for (GList *item = data_set->resources; item != NULL; item = item->next) {
- pe_resource_t *rsc = (pe_resource_t *) item->data;
+ for (GList *item = scheduler->resources; item != NULL; item = item->next) {
+ pcmk_resource_t *rsc = (pcmk_resource_t *) item->data;
// Log all resources except inactive orphans
- if (!pcmk_is_set(rsc->flags, pe_rsc_orphan)
- || (rsc->role != RSC_ROLE_STOPPED)) {
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_removed)
+ || (rsc->role != pcmk_role_stopped)) {
out->message(out, crm_map_element_name(rsc->xml), 0, rsc, all, all);
}
}
@@ -668,12 +675,12 @@ log_resource_details(pe_working_set_t *data_set)
}
static void
-log_all_actions(pe_working_set_t *data_set)
+log_all_actions(pcmk_scheduler_t *scheduler)
{
/* This only ever outputs to the log, so ignore whatever output object was
* previously set and just log instead.
*/
- pcmk__output_t *prev_out = data_set->priv;
+ pcmk__output_t *prev_out = scheduler->priv;
pcmk__output_t *out = NULL;
if (pcmk__log_output_new(&out) != pcmk_rc_ok) {
@@ -683,33 +690,35 @@ log_all_actions(pe_working_set_t *data_set)
pe__register_messages(out);
pcmk__register_lib_messages(out);
pcmk__output_set_log_level(out, LOG_NOTICE);
- data_set->priv = out;
+ scheduler->priv = out;
out->begin_list(out, NULL, NULL, "Actions");
- pcmk__output_actions(data_set);
+ pcmk__output_actions(scheduler);
out->end_list(out);
out->finish(out, CRM_EX_OK, true, NULL);
pcmk__output_free(out);
- data_set->priv = prev_out;
+ scheduler->priv = prev_out;
}
/*!
* \internal
* \brief Log all required but unrunnable actions at trace level
*
- * \param[in] data_set Cluster working set
+ * \param[in] scheduler Scheduler data
*/
static void
-log_unrunnable_actions(const pe_working_set_t *data_set)
+log_unrunnable_actions(const pcmk_scheduler_t *scheduler)
{
- const uint64_t flags = pe_action_optional|pe_action_runnable|pe_action_pseudo;
+ const uint64_t flags = pcmk_action_optional
+ |pcmk_action_runnable
+ |pcmk_action_pseudo;
crm_trace("Required but unrunnable actions:");
- for (const GList *iter = data_set->actions;
+ for (const GList *iter = scheduler->actions;
iter != NULL; iter = iter->next) {
- const pe_action_t *action = (const pe_action_t *) iter->data;
+ const pcmk_action_t *action = (const pcmk_action_t *) iter->data;
if (!pcmk_any_flags_set(action->flags, flags)) {
pcmk__log_action("\t", action, true);
@@ -721,23 +730,23 @@ log_unrunnable_actions(const pe_working_set_t *data_set)
* \internal
* \brief Unpack the CIB for scheduling
*
- * \param[in,out] cib CIB XML to unpack (may be NULL if already unpacked)
- * \param[in] flags Working set flags to set in addition to defaults
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] cib CIB XML to unpack (may be NULL if already unpacked)
+ * \param[in] flags Scheduler flags to set in addition to defaults
+ * \param[in,out] scheduler Scheduler data
*/
static void
-unpack_cib(xmlNode *cib, unsigned long long flags, pe_working_set_t *data_set)
+unpack_cib(xmlNode *cib, unsigned long long flags, pcmk_scheduler_t *scheduler)
{
const char* localhost_save = NULL;
- if (pcmk_is_set(data_set->flags, pe_flag_have_status)) {
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_have_status)) {
crm_trace("Reusing previously calculated cluster status");
- pe__set_working_set_flags(data_set, flags);
+ pe__set_working_set_flags(scheduler, flags);
return;
}
- if (data_set->localhost) {
- localhost_save = data_set->localhost;
+ if (scheduler->localhost) {
+ localhost_save = scheduler->localhost;
}
CRM_ASSERT(cib != NULL);
@@ -745,67 +754,67 @@ unpack_cib(xmlNode *cib, unsigned long long flags, pe_working_set_t *data_set)
/* This will zero the entire struct without freeing anything first, so
* callers should never call pcmk__schedule_actions() with a populated data
- * set unless pe_flag_have_status is set (i.e. cluster_status() was
+ * set unless pcmk_sched_have_status is set (i.e. cluster_status() was
* previously called, whether directly or via pcmk__schedule_actions()).
*/
- set_working_set_defaults(data_set);
+ set_working_set_defaults(scheduler);
if (localhost_save) {
- data_set->localhost = localhost_save;
+ scheduler->localhost = localhost_save;
}
- pe__set_working_set_flags(data_set, flags);
- data_set->input = cib;
- cluster_status(data_set); // Sets pe_flag_have_status
+ pe__set_working_set_flags(scheduler, flags);
+ scheduler->input = cib;
+ cluster_status(scheduler); // Sets pcmk_sched_have_status
}
/*!
* \internal
* \brief Run the scheduler for a given CIB
*
- * \param[in,out] cib CIB XML to use as scheduler input
- * \param[in] flags Working set flags to set in addition to defaults
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] cib CIB XML to use as scheduler input
+ * \param[in] flags Scheduler flags to set in addition to defaults
+ * \param[in,out] scheduler Scheduler data
*/
void
pcmk__schedule_actions(xmlNode *cib, unsigned long long flags,
- pe_working_set_t *data_set)
+ pcmk_scheduler_t *scheduler)
{
- unpack_cib(cib, flags, data_set);
- pcmk__set_allocation_methods(data_set);
- pcmk__apply_node_health(data_set);
- pcmk__unpack_constraints(data_set);
- if (pcmk_is_set(data_set->flags, pe_flag_check_config)) {
+ unpack_cib(cib, flags, scheduler);
+ pcmk__set_assignment_methods(scheduler);
+ pcmk__apply_node_health(scheduler);
+ pcmk__unpack_constraints(scheduler);
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_validate_only)) {
return;
}
- if (!pcmk_is_set(data_set->flags, pe_flag_quick_location) &&
- pcmk__is_daemon) {
- log_resource_details(data_set);
+ if (!pcmk_is_set(scheduler->flags, pcmk_sched_location_only)
+ && pcmk__is_daemon) {
+ log_resource_details(scheduler);
}
- apply_node_criteria(data_set);
+ apply_node_criteria(scheduler);
- if (pcmk_is_set(data_set->flags, pe_flag_quick_location)) {
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_location_only)) {
return;
}
- pcmk__create_internal_constraints(data_set);
- pcmk__handle_rsc_config_changes(data_set);
- allocate_resources(data_set);
- schedule_resource_actions(data_set);
+ pcmk__create_internal_constraints(scheduler);
+ pcmk__handle_rsc_config_changes(scheduler);
+ assign_resources(scheduler);
+ schedule_resource_actions(scheduler);
/* Remote ordering constraints need to happen prior to calculating fencing
* because it is one more place we can mark nodes as needing fencing.
*/
- pcmk__order_remote_connection_actions(data_set);
+ pcmk__order_remote_connection_actions(scheduler);
- schedule_fencing_and_shutdowns(data_set);
- pcmk__apply_orderings(data_set);
- log_all_actions(data_set);
- pcmk__create_graph(data_set);
+ schedule_fencing_and_shutdowns(scheduler);
+ pcmk__apply_orderings(scheduler);
+ log_all_actions(scheduler);
+ pcmk__create_graph(scheduler);
if (get_crm_log_level() == LOG_TRACE) {
- log_unrunnable_actions(data_set);
+ log_unrunnable_actions(scheduler);
}
}
diff --git a/lib/pacemaker/pcmk_simulate.c b/lib/pacemaker/pcmk_simulate.c
index 165c7d3..167f8a5 100644
--- a/lib/pacemaker/pcmk_simulate.c
+++ b/lib/pacemaker/pcmk_simulate.c
@@ -11,7 +11,7 @@
#include <crm/cib/internal.h>
#include <crm/common/output.h>
#include <crm/common/results.h>
-#include <crm/pengine/pe_types.h>
+#include <crm/common/scheduler.h>
#include <pacemaker-internal.h>
#include <pacemaker.h>
@@ -27,7 +27,7 @@ static cib_t *fake_cib = NULL;
static GList *fake_resource_list = NULL;
static const GList *fake_op_fail_list = NULL;
-static void set_effective_date(pe_working_set_t *data_set, bool print_original,
+static void set_effective_date(pcmk_scheduler_t *scheduler, bool print_original,
const char *use_date);
/*!
@@ -41,7 +41,7 @@ static void set_effective_date(pe_working_set_t *data_set, bool print_original,
* \note It is the caller's responsibility to free the result.
*/
static char *
-create_action_name(const pe_action_t *action, bool verbose)
+create_action_name(const pcmk_action_t *action, bool verbose)
{
char *action_name = NULL;
const char *prefix = "";
@@ -51,11 +51,11 @@ create_action_name(const pe_action_t *action, bool verbose)
if (action->node != NULL) {
action_host = action->node->details->uname;
- } else if (!pcmk_is_set(action->flags, pe_action_pseudo)) {
+ } else if (!pcmk_is_set(action->flags, pcmk_action_pseudo)) {
action_host = "<none>";
}
- if (pcmk__str_eq(action->task, RSC_CANCEL, pcmk__str_none)) {
+ if (pcmk__str_eq(action->task, PCMK_ACTION_CANCEL, pcmk__str_none)) {
prefix = "Cancel ";
task = action->cancel_task;
}
@@ -74,8 +74,8 @@ create_action_name(const pe_action_t *action, bool verbose)
interval_ms = 0;
}
- if (pcmk__strcase_any_of(action->task, RSC_NOTIFY, RSC_NOTIFIED,
- NULL)) {
+ if (pcmk__strcase_any_of(action->task, PCMK_ACTION_NOTIFY,
+ PCMK_ACTION_NOTIFIED, NULL)) {
const char *n_type = g_hash_table_lookup(action->meta,
"notify_key_type");
const char *n_task = g_hash_table_lookup(action->meta,
@@ -96,7 +96,8 @@ create_action_name(const pe_action_t *action, bool verbose)
}
free(key);
- } else if (pcmk__str_eq(action->task, CRM_OP_FENCE, pcmk__str_casei)) {
+ } else if (pcmk__str_eq(action->task, PCMK_ACTION_STONITH,
+ pcmk__str_none)) {
const char *op = g_hash_table_lookup(action->meta, "stonith_action");
action_name = crm_strdup_printf("%s%s '%s' %s",
@@ -127,17 +128,18 @@ create_action_name(const pe_action_t *action, bool verbose)
* \internal
* \brief Display the status of a cluster
*
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
* \param[in] show_opts How to modify display (as pcmk_show_opt_e flags)
* \param[in] section_opts Sections to display (as pcmk_section_e flags)
* \param[in] title What to use as list title
* \param[in] print_spacer Whether to display a spacer first
*/
static void
-print_cluster_status(pe_working_set_t *data_set, uint32_t show_opts,
- uint32_t section_opts, const char *title, bool print_spacer)
+print_cluster_status(pcmk_scheduler_t *scheduler, uint32_t show_opts,
+ uint32_t section_opts, const char *title,
+ bool print_spacer)
{
- pcmk__output_t *out = data_set->priv;
+ pcmk__output_t *out = scheduler->priv;
GList *all = NULL;
crm_exit_t stonith_rc = 0;
enum pcmk_pacemakerd_state state = pcmk_pacemakerd_state_invalid;
@@ -150,7 +152,7 @@ print_cluster_status(pe_working_set_t *data_set, uint32_t show_opts,
PCMK__OUTPUT_SPACER_IF(out, print_spacer);
out->begin_list(out, NULL, NULL, "%s", title);
out->message(out, "cluster-status",
- data_set, state, stonith_rc, NULL,
+ scheduler, state, stonith_rc, NULL,
false, section_opts, show_opts, NULL, all, all);
out->end_list(out);
@@ -161,45 +163,45 @@ print_cluster_status(pe_working_set_t *data_set, uint32_t show_opts,
* \internal
* \brief Display a summary of all actions scheduled in a transition
*
- * \param[in,out] data_set Cluster working set (fully scheduled)
+ * \param[in,out] scheduler Scheduler data (fully scheduled)
* \param[in] print_spacer Whether to display a spacer first
*/
static void
-print_transition_summary(pe_working_set_t *data_set, bool print_spacer)
+print_transition_summary(pcmk_scheduler_t *scheduler, bool print_spacer)
{
- pcmk__output_t *out = data_set->priv;
+ pcmk__output_t *out = scheduler->priv;
PCMK__OUTPUT_SPACER_IF(out, print_spacer);
out->begin_list(out, NULL, NULL, "Transition Summary");
- pcmk__output_actions(data_set);
+ pcmk__output_actions(scheduler);
out->end_list(out);
}
/*!
* \internal
- * \brief Reset a cluster working set's input, output, date, and flags
+ * \brief Reset scheduler input, output, date, and flags
*
- * \param[in,out] data_set Cluster working set
- * \param[in] input What to set as cluster input
- * \param[in] out What to set as cluster output object
- * \param[in] use_date What to set as cluster's current timestamp
- * \param[in] flags Cluster flags to add (pe_flag_*)
+ * \param[in,out] scheduler Scheduler data
+ * \param[in] input What to set as cluster input
+ * \param[in] out What to set as cluster output object
+ * \param[in] use_date What to set as cluster's current timestamp
+ * \param[in] flags Group of enum pcmk_scheduler_flags to set
*/
static void
-reset(pe_working_set_t *data_set, xmlNodePtr input, pcmk__output_t *out,
+reset(pcmk_scheduler_t *scheduler, xmlNodePtr input, pcmk__output_t *out,
const char *use_date, unsigned int flags)
{
- data_set->input = input;
- data_set->priv = out;
- set_effective_date(data_set, true, use_date);
+ scheduler->input = input;
+ scheduler->priv = out;
+ set_effective_date(scheduler, true, use_date);
if (pcmk_is_set(flags, pcmk_sim_sanitized)) {
- pe__set_working_set_flags(data_set, pe_flag_sanitized);
+ pe__set_working_set_flags(scheduler, pcmk_sched_sanitized);
}
if (pcmk_is_set(flags, pcmk_sim_show_scores)) {
- pe__set_working_set_flags(data_set, pe_flag_show_scores);
+ pe__set_working_set_flags(scheduler, pcmk_sched_output_scores);
}
if (pcmk_is_set(flags, pcmk_sim_show_utilization)) {
- pe__set_working_set_flags(data_set, pe_flag_show_utilization);
+ pe__set_working_set_flags(scheduler, pcmk_sched_show_utilization);
}
}
@@ -207,7 +209,7 @@ reset(pe_working_set_t *data_set, xmlNodePtr input, pcmk__output_t *out,
* \brief Write out a file in dot(1) format describing the actions that will
* be taken by the scheduler in response to an input CIB file.
*
- * \param[in,out] data_set Working set for the cluster
+ * \param[in,out] scheduler Scheduler data
* \param[in] dot_file The filename to write
* \param[in] all_actions Write all actions, even those that are optional
* or are on unmanaged resources
@@ -217,10 +219,10 @@ reset(pe_working_set_t *data_set, xmlNodePtr input, pcmk__output_t *out,
* \return Standard Pacemaker return code
*/
static int
-write_sim_dotfile(pe_working_set_t *data_set, const char *dot_file,
+write_sim_dotfile(pcmk_scheduler_t *scheduler, const char *dot_file,
bool all_actions, bool verbose)
{
- GList *gIter = NULL;
+ GList *iter = NULL;
FILE *dot_strm = fopen(dot_file, "w");
if (dot_strm == NULL) {
@@ -228,30 +230,30 @@ write_sim_dotfile(pe_working_set_t *data_set, const char *dot_file,
}
fprintf(dot_strm, " digraph \"g\" {\n");
- for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
- pe_action_t *action = (pe_action_t *) gIter->data;
+ for (iter = scheduler->actions; iter != NULL; iter = iter->next) {
+ pcmk_action_t *action = (pcmk_action_t *) iter->data;
const char *style = "dashed";
const char *font = "black";
const char *color = "black";
char *action_name = create_action_name(action, verbose);
- if (pcmk_is_set(action->flags, pe_action_pseudo)) {
+ if (pcmk_is_set(action->flags, pcmk_action_pseudo)) {
font = "orange";
}
- if (pcmk_is_set(action->flags, pe_action_dumped)) {
+ if (pcmk_is_set(action->flags, pcmk_action_added_to_graph)) {
style = "bold";
color = "green";
} else if ((action->rsc != NULL)
- && !pcmk_is_set(action->rsc->flags, pe_rsc_managed)) {
+ && !pcmk_is_set(action->rsc->flags, pcmk_rsc_managed)) {
color = "red";
font = "purple";
if (!all_actions) {
goto do_not_write;
}
- } else if (pcmk_is_set(action->flags, pe_action_optional)) {
+ } else if (pcmk_is_set(action->flags, pcmk_action_optional)) {
color = "blue";
if (!all_actions) {
goto do_not_write;
@@ -259,23 +261,23 @@ write_sim_dotfile(pe_working_set_t *data_set, const char *dot_file,
} else {
color = "red";
- CRM_LOG_ASSERT(!pcmk_is_set(action->flags, pe_action_runnable));
+ CRM_LOG_ASSERT(!pcmk_is_set(action->flags, pcmk_action_runnable));
}
- pe__set_action_flags(action, pe_action_dumped);
+ pe__set_action_flags(action, pcmk_action_added_to_graph);
fprintf(dot_strm, "\"%s\" [ style=%s color=\"%s\" fontcolor=\"%s\"]\n",
action_name, style, color, font);
do_not_write:
free(action_name);
}
- for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
- pe_action_t *action = (pe_action_t *) gIter->data;
+ for (iter = scheduler->actions; iter != NULL; iter = iter->next) {
+ pcmk_action_t *action = (pcmk_action_t *) iter->data;
- GList *gIter2 = NULL;
+ for (GList *before_iter = action->actions_before;
+ before_iter != NULL; before_iter = before_iter->next) {
- for (gIter2 = action->actions_before; gIter2 != NULL; gIter2 = gIter2->next) {
- pe_action_wrapper_t *before = (pe_action_wrapper_t *) gIter2->data;
+ pcmk__related_action_t *before = before_iter->data;
char *before_name = NULL;
char *after_name = NULL;
@@ -285,11 +287,12 @@ write_sim_dotfile(pe_working_set_t *data_set, const char *dot_file,
if (before->state == pe_link_dumped) {
optional = false;
style = "bold";
- } else if (before->type == pe_order_none) {
+ } else if ((uint32_t) before->type == pcmk__ar_none) {
continue;
- } else if (pcmk_is_set(before->action->flags, pe_action_dumped)
- && pcmk_is_set(action->flags, pe_action_dumped)
- && before->type != pe_order_load) {
+ } else if (pcmk_is_set(before->action->flags,
+ pcmk_action_added_to_graph)
+ && pcmk_is_set(action->flags, pcmk_action_added_to_graph)
+ && (uint32_t) before->type != pcmk__ar_if_on_same_node_or_target) {
optional = false;
}
@@ -314,23 +317,23 @@ write_sim_dotfile(pe_working_set_t *data_set, const char *dot_file,
* \brief Profile the configuration updates and scheduler actions in a single
* CIB file, printing the profiling timings.
*
- * \note \p data_set->priv must have been set to a valid \p pcmk__output_t
+ * \note \p scheduler->priv must have been set to a valid \p pcmk__output_t
* object before this function is called.
*
- * \param[in] xml_file The CIB file to profile
- * \param[in] repeat Number of times to run
- * \param[in,out] data_set Working set for the cluster
- * \param[in] use_date The date to set the cluster's time to (may be NULL)
+ * \param[in] xml_file The CIB file to profile
+ * \param[in] repeat Number of times to run
+ * \param[in,out] scheduler Scheduler data
+ * \param[in] use_date The date to set the cluster's time to (may be NULL)
*/
static void
-profile_file(const char *xml_file, long long repeat, pe_working_set_t *data_set,
- const char *use_date)
+profile_file(const char *xml_file, long long repeat,
+ pcmk_scheduler_t *scheduler, const char *use_date)
{
- pcmk__output_t *out = data_set->priv;
+ pcmk__output_t *out = scheduler->priv;
xmlNode *cib_object = NULL;
clock_t start = 0;
clock_t end;
- unsigned long long data_set_flags = pe_flag_no_compat;
+ unsigned long long scheduler_flags = pcmk_sched_no_compat;
CRM_ASSERT(out != NULL);
@@ -351,20 +354,20 @@ profile_file(const char *xml_file, long long repeat, pe_working_set_t *data_set,
return;
}
- if (pcmk_is_set(data_set->flags, pe_flag_show_scores)) {
- data_set_flags |= pe_flag_show_scores;
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_output_scores)) {
+ scheduler_flags |= pcmk_sched_output_scores;
}
- if (pcmk_is_set(data_set->flags, pe_flag_show_utilization)) {
- data_set_flags |= pe_flag_show_utilization;
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_show_utilization)) {
+ scheduler_flags |= pcmk_sched_show_utilization;
}
for (int i = 0; i < repeat; ++i) {
xmlNode *input = (repeat == 1)? cib_object : copy_xml(cib_object);
- data_set->input = input;
- set_effective_date(data_set, false, use_date);
- pcmk__schedule_actions(input, data_set_flags, data_set);
- pe_reset_working_set(data_set);
+ scheduler->input = input;
+ set_effective_date(scheduler, false, use_date);
+ pcmk__schedule_actions(input, scheduler_flags, scheduler);
+ pe_reset_working_set(scheduler);
}
end = clock();
@@ -372,10 +375,10 @@ profile_file(const char *xml_file, long long repeat, pe_working_set_t *data_set,
}
void
-pcmk__profile_dir(const char *dir, long long repeat, pe_working_set_t *data_set,
- const char *use_date)
+pcmk__profile_dir(const char *dir, long long repeat,
+ pcmk_scheduler_t *scheduler, const char *use_date)
{
- pcmk__output_t *out = data_set->priv;
+ pcmk__output_t *out = scheduler->priv;
struct dirent **namelist;
int file_num = scandir(dir, &namelist, 0, alphasort);
@@ -398,9 +401,10 @@ pcmk__profile_dir(const char *dir, long long repeat, pe_working_set_t *data_set,
free(namelist[file_num]);
continue;
}
- snprintf(buffer, sizeof(buffer), "%s/%s", dir, namelist[file_num]->d_name);
+ snprintf(buffer, sizeof(buffer), "%s/%s",
+ dir, namelist[file_num]->d_name);
if (stat(buffer, &prop) == 0 && S_ISREG(prop.st_mode)) {
- profile_file(buffer, repeat, data_set, use_date);
+ profile_file(buffer, repeat, scheduler, use_date);
}
free(namelist[file_num]);
}
@@ -414,37 +418,37 @@ pcmk__profile_dir(const char *dir, long long repeat, pe_working_set_t *data_set,
* \brief Set the date of the cluster, either to the value given by
* \p use_date, or to the "execution-date" value in the CIB.
*
- * \note \p data_set->priv must have been set to a valid \p pcmk__output_t
+ * \note \p scheduler->priv must have been set to a valid \p pcmk__output_t
* object before this function is called.
*
- * \param[in,out] data_set Working set for the cluster
+ * \param[in,out] scheduler Scheduler data
* \param[in] print_original If \p true, the "execution-date" should
* also be printed
* \param[in] use_date The date to set the cluster's time to
* (may be NULL)
*/
static void
-set_effective_date(pe_working_set_t *data_set, bool print_original,
+set_effective_date(pcmk_scheduler_t *scheduler, bool print_original,
const char *use_date)
{
- pcmk__output_t *out = data_set->priv;
+ pcmk__output_t *out = scheduler->priv;
time_t original_date = 0;
CRM_ASSERT(out != NULL);
- crm_element_value_epoch(data_set->input, "execution-date", &original_date);
+ crm_element_value_epoch(scheduler->input, "execution-date", &original_date);
if (use_date) {
- data_set->now = crm_time_new(use_date);
+ scheduler->now = crm_time_new(use_date);
out->info(out, "Setting effective cluster time: %s", use_date);
- crm_time_log(LOG_NOTICE, "Pretending 'now' is", data_set->now,
+ crm_time_log(LOG_NOTICE, "Pretending 'now' is", scheduler->now,
crm_time_log_date | crm_time_log_timeofday);
} else if (original_date != 0) {
- data_set->now = pcmk__copy_timet(original_date);
+ scheduler->now = pcmk__copy_timet(original_date);
if (print_original) {
- char *when = crm_time_as_string(data_set->now,
+ char *when = crm_time_as_string(scheduler->now,
crm_time_log_date|crm_time_log_timeofday);
out->info(out, "Using the original execution date of: %s", when);
@@ -543,7 +547,8 @@ simulate_resource_action(pcmk__graph_t *graph, pcmk__graph_action_t *action)
}
// Certain actions need to be displayed but don't need history entries
- if (pcmk__strcase_any_of(operation, "delete", RSC_METADATA, NULL)) {
+ if (pcmk__strcase_any_of(operation, PCMK_ACTION_DELETE,
+ PCMK_ACTION_META_DATA, NULL)) {
out->message(out, "inject-rsc-action", resource, operation, node,
(guint) 0);
goto done; // Confirm action and update graph
@@ -684,7 +689,7 @@ simulate_fencing_action(pcmk__graph_t *graph, pcmk__graph_action_t *action)
out->message(out, "inject-fencing-action", target, op);
- if (!pcmk__str_eq(op, "on", pcmk__str_casei)) {
+ if (!pcmk__str_eq(op, PCMK_ACTION_ON, pcmk__str_casei)) {
int rc = pcmk_ok;
GString *xpath = g_string_sized_new(512);
@@ -725,7 +730,7 @@ simulate_fencing_action(pcmk__graph_t *graph, pcmk__graph_action_t *action)
}
enum pcmk__graph_status
-pcmk__simulate_transition(pe_working_set_t *data_set, cib_t *cib,
+pcmk__simulate_transition(pcmk_scheduler_t *scheduler, cib_t *cib,
const GList *op_fail_list)
{
pcmk__graph_t *transition = NULL;
@@ -738,7 +743,7 @@ pcmk__simulate_transition(pe_working_set_t *data_set, cib_t *cib,
simulate_fencing_action,
};
- out = data_set->priv;
+ out = scheduler->priv;
fake_cib = cib;
fake_op_fail_list = op_fail_list;
@@ -748,10 +753,10 @@ pcmk__simulate_transition(pe_working_set_t *data_set, cib_t *cib,
}
pcmk__set_graph_functions(&simulation_fns);
- transition = pcmk__unpack_graph(data_set->graph, crm_system_name);
+ transition = pcmk__unpack_graph(scheduler->graph, crm_system_name);
pcmk__log_graph(LOG_DEBUG, transition);
- fake_resource_list = data_set->resources;
+ fake_resource_list = scheduler->resources;
do {
graph_rc = pcmk__execute_graph(transition);
} while (graph_rc == pcmk__graph_active);
@@ -772,15 +777,15 @@ pcmk__simulate_transition(pe_working_set_t *data_set, cib_t *cib,
cib_sync_call|cib_scope_local);
CRM_ASSERT(rc == pcmk_ok);
- pe_reset_working_set(data_set);
- data_set->input = cib_object;
+ pe_reset_working_set(scheduler);
+ scheduler->input = cib_object;
out->end_list(out);
}
return graph_rc;
}
int
-pcmk__simulate(pe_working_set_t *data_set, pcmk__output_t *out,
+pcmk__simulate(pcmk_scheduler_t *scheduler, pcmk__output_t *out,
const pcmk_injections_t *injections, unsigned int flags,
uint32_t section_opts, const char *use_date,
const char *input_file, const char *graph_file,
@@ -796,8 +801,8 @@ pcmk__simulate(pe_working_set_t *data_set, pcmk__output_t *out,
goto simulate_done;
}
- reset(data_set, input, out, use_date, flags);
- cluster_status(data_set);
+ reset(scheduler, input, out, use_date, flags);
+ cluster_status(scheduler);
if ((cib->variant == cib_native)
&& pcmk_is_set(section_opts, pcmk_section_times)) {
@@ -805,29 +810,30 @@ pcmk__simulate(pe_working_set_t *data_set, pcmk__output_t *out,
// Currently used only in the times section
pcmk__query_node_name(out, 0, &pcmk__our_nodename, 0);
}
- data_set->localhost = pcmk__our_nodename;
+ scheduler->localhost = pcmk__our_nodename;
}
if (!out->is_quiet(out)) {
- if (pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)) {
- printed = out->message(out, "maint-mode", data_set->flags);
+ const bool show_pending = pcmk_is_set(flags, pcmk_sim_show_pending);
+
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_in_maintenance)) {
+ printed = out->message(out, "maint-mode", scheduler->flags);
}
- if (data_set->disabled_resources || data_set->blocked_resources) {
+ if (scheduler->disabled_resources || scheduler->blocked_resources) {
PCMK__OUTPUT_SPACER_IF(out, printed == pcmk_rc_ok);
printed = out->info(out,
"%d of %d resource instances DISABLED and "
"%d BLOCKED from further action due to failure",
- data_set->disabled_resources,
- data_set->ninstances,
- data_set->blocked_resources);
+ scheduler->disabled_resources,
+ scheduler->ninstances,
+ scheduler->blocked_resources);
}
/* Most formatted output headers use caps for each word, but this one
* only has the first word capitalized for compatibility with pcs.
*/
- print_cluster_status(data_set,
- pcmk_is_set(flags, pcmk_sim_show_pending)? pcmk_show_pending : 0,
+ print_cluster_status(scheduler, (show_pending? pcmk_show_pending : 0),
section_opts, "Current cluster status",
(printed == pcmk_rc_ok));
printed = pcmk_rc_ok;
@@ -845,7 +851,7 @@ pcmk__simulate(pe_working_set_t *data_set, pcmk__output_t *out,
|| (injections->watchdog != NULL)) {
PCMK__OUTPUT_SPACER_IF(out, printed == pcmk_rc_ok);
- pcmk__inject_scheduler_input(data_set, cib, injections);
+ pcmk__inject_scheduler_input(scheduler, cib, injections);
printed = pcmk_rc_ok;
rc = cib->cmds->query(cib, NULL, &input, cib_sync_call);
@@ -854,9 +860,9 @@ pcmk__simulate(pe_working_set_t *data_set, pcmk__output_t *out,
goto simulate_done;
}
- cleanup_calculations(data_set);
- reset(data_set, input, out, use_date, flags);
- cluster_status(data_set);
+ cleanup_calculations(scheduler);
+ reset(scheduler, input, out, use_date, flags);
+ cluster_status(scheduler);
}
if (input_file != NULL) {
@@ -869,28 +875,29 @@ pcmk__simulate(pe_working_set_t *data_set, pcmk__output_t *out,
if (pcmk_any_flags_set(flags, pcmk_sim_process | pcmk_sim_simulate)) {
pcmk__output_t *logger_out = NULL;
- unsigned long long data_set_flags = pe_flag_no_compat;
+ unsigned long long scheduler_flags = pcmk_sched_no_compat;
- if (pcmk_is_set(data_set->flags, pe_flag_show_scores)) {
- data_set_flags |= pe_flag_show_scores;
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_output_scores)) {
+ scheduler_flags |= pcmk_sched_output_scores;
}
- if (pcmk_is_set(data_set->flags, pe_flag_show_utilization)) {
- data_set_flags |= pe_flag_show_utilization;
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_show_utilization)) {
+ scheduler_flags |= pcmk_sched_show_utilization;
}
- if (pcmk_all_flags_set(data_set->flags,
- pe_flag_show_scores|pe_flag_show_utilization)) {
+ if (pcmk_all_flags_set(scheduler->flags,
+ pcmk_sched_output_scores
+ |pcmk_sched_show_utilization)) {
PCMK__OUTPUT_SPACER_IF(out, printed == pcmk_rc_ok);
out->begin_list(out, NULL, NULL,
- "Allocation Scores and Utilization Information");
+ "Assignment Scores and Utilization Information");
printed = pcmk_rc_ok;
- } else if (pcmk_is_set(data_set->flags, pe_flag_show_scores)) {
+ } else if (pcmk_is_set(scheduler->flags, pcmk_sched_output_scores)) {
PCMK__OUTPUT_SPACER_IF(out, printed == pcmk_rc_ok);
- out->begin_list(out, NULL, NULL, "Allocation Scores");
+ out->begin_list(out, NULL, NULL, "Assignment Scores");
printed = pcmk_rc_ok;
- } else if (pcmk_is_set(data_set->flags, pe_flag_show_utilization)) {
+ } else if (pcmk_is_set(scheduler->flags, pcmk_sched_show_utilization)) {
PCMK__OUTPUT_SPACER_IF(out, printed == pcmk_rc_ok);
out->begin_list(out, NULL, NULL, "Utilization Information");
printed = pcmk_rc_ok;
@@ -902,23 +909,23 @@ pcmk__simulate(pe_working_set_t *data_set, pcmk__output_t *out,
}
pe__register_messages(logger_out);
pcmk__register_lib_messages(logger_out);
- data_set->priv = logger_out;
+ scheduler->priv = logger_out;
}
- pcmk__schedule_actions(input, data_set_flags, data_set);
+ pcmk__schedule_actions(input, scheduler_flags, scheduler);
if (logger_out == NULL) {
out->end_list(out);
} else {
logger_out->finish(logger_out, CRM_EX_OK, true, NULL);
pcmk__output_free(logger_out);
- data_set->priv = out;
+ scheduler->priv = out;
}
input = NULL; /* Don't try and free it twice */
if (graph_file != NULL) {
- rc = write_xml_file(data_set->graph, graph_file, FALSE);
+ rc = write_xml_file(scheduler->graph, graph_file, FALSE);
if (rc < 0) {
rc = pcmk_rc_graph_error;
goto simulate_done;
@@ -926,7 +933,7 @@ pcmk__simulate(pe_working_set_t *data_set, pcmk__output_t *out,
}
if (dot_file != NULL) {
- rc = write_sim_dotfile(data_set, dot_file,
+ rc = write_sim_dotfile(scheduler, dot_file,
pcmk_is_set(flags, pcmk_sim_all_actions),
pcmk_is_set(flags, pcmk_sim_verbose));
if (rc != pcmk_rc_ok) {
@@ -936,7 +943,7 @@ pcmk__simulate(pe_working_set_t *data_set, pcmk__output_t *out,
}
if (!out->is_quiet(out)) {
- print_transition_summary(data_set, printed == pcmk_rc_ok);
+ print_transition_summary(scheduler, printed == pcmk_rc_ok);
}
}
@@ -947,8 +954,8 @@ pcmk__simulate(pe_working_set_t *data_set, pcmk__output_t *out,
}
PCMK__OUTPUT_SPACER_IF(out, printed == pcmk_rc_ok);
- if (pcmk__simulate_transition(data_set, cib,
- injections->op_fail) != pcmk__graph_complete) {
+ if (pcmk__simulate_transition(scheduler, cib, injections->op_fail)
+ != pcmk__graph_complete) {
rc = pcmk_rc_invalid_transition;
}
@@ -956,17 +963,17 @@ pcmk__simulate(pe_working_set_t *data_set, pcmk__output_t *out,
goto simulate_done;
}
- set_effective_date(data_set, true, use_date);
+ set_effective_date(scheduler, true, use_date);
if (pcmk_is_set(flags, pcmk_sim_show_scores)) {
- pe__set_working_set_flags(data_set, pe_flag_show_scores);
+ pe__set_working_set_flags(scheduler, pcmk_sched_output_scores);
}
if (pcmk_is_set(flags, pcmk_sim_show_utilization)) {
- pe__set_working_set_flags(data_set, pe_flag_show_utilization);
+ pe__set_working_set_flags(scheduler, pcmk_sched_show_utilization);
}
- cluster_status(data_set);
- print_cluster_status(data_set, 0, section_opts, "Revised Cluster Status",
+ cluster_status(scheduler);
+ print_cluster_status(scheduler, 0, section_opts, "Revised Cluster Status",
true);
simulate_done:
@@ -975,7 +982,7 @@ simulate_done:
}
int
-pcmk_simulate(xmlNodePtr *xml, pe_working_set_t *data_set,
+pcmk_simulate(xmlNodePtr *xml, pcmk_scheduler_t *scheduler,
const pcmk_injections_t *injections, unsigned int flags,
unsigned int section_opts, const char *use_date,
const char *input_file, const char *graph_file,
@@ -992,7 +999,7 @@ pcmk_simulate(xmlNodePtr *xml, pe_working_set_t *data_set,
pe__register_messages(out);
pcmk__register_lib_messages(out);
- rc = pcmk__simulate(data_set, out, injections, flags, section_opts,
+ rc = pcmk__simulate(scheduler, out, injections, flags, section_opts,
use_date, input_file, graph_file, dot_file);
pcmk__xml_output_finish(out, xml);
return rc;
diff --git a/lib/pacemaker/pcmk_status.c b/lib/pacemaker/pcmk_status.c
index 0e82633..77b6c90 100644
--- a/lib/pacemaker/pcmk_status.c
+++ b/lib/pacemaker/pcmk_status.c
@@ -17,6 +17,7 @@
#include <crm/common/output.h>
#include <crm/common/results.h>
#include <crm/fencing/internal.h>
+#include <crm/pengine/internal.h>
#include <crm/stonith-ng.h>
#include <pacemaker.h>
#include <pacemaker-internal.h>
@@ -79,7 +80,7 @@ pcmk__output_cluster_status(pcmk__output_t *out, stonith_t *stonith, cib_t *cib,
xmlNode *cib_copy = copy_xml(current_cib);
stonith_history_t *stonith_history = NULL;
int history_rc = 0;
- pe_working_set_t *data_set = NULL;
+ pcmk_scheduler_t *scheduler = NULL;
GList *unames = NULL;
GList *resources = NULL;
@@ -99,42 +100,43 @@ pcmk__output_cluster_status(pcmk__output_t *out, stonith_t *stonith, cib_t *cib,
fence_history);
}
- data_set = pe_new_working_set();
- CRM_ASSERT(data_set != NULL);
- pe__set_working_set_flags(data_set, pe_flag_no_compat);
+ scheduler = pe_new_working_set();
+ CRM_ASSERT(scheduler != NULL);
+ pe__set_working_set_flags(scheduler, pcmk_sched_no_compat);
- data_set->input = cib_copy;
- data_set->priv = out;
- cluster_status(data_set);
+ scheduler->input = cib_copy;
+ scheduler->priv = out;
+ cluster_status(scheduler);
if ((cib->variant == cib_native) && pcmk_is_set(show, pcmk_section_times)) {
if (pcmk__our_nodename == NULL) {
// Currently used only in the times section
pcmk__query_node_name(out, 0, &pcmk__our_nodename, 0);
}
- data_set->localhost = pcmk__our_nodename;
+ scheduler->localhost = pcmk__our_nodename;
}
/* Unpack constraints if any section will need them
* (tickets may be referenced in constraints but not granted yet,
* and bans need negative location constraints) */
- if (pcmk_is_set(show, pcmk_section_bans) || pcmk_is_set(show, pcmk_section_tickets)) {
- pcmk__unpack_constraints(data_set);
+ if (pcmk_is_set(show, pcmk_section_bans)
+ || pcmk_is_set(show, pcmk_section_tickets)) {
+ pcmk__unpack_constraints(scheduler);
}
- unames = pe__build_node_name_list(data_set, only_node);
- resources = pe__build_rsc_list(data_set, only_rsc);
+ unames = pe__build_node_name_list(scheduler, only_node);
+ resources = pe__build_rsc_list(scheduler, only_rsc);
/* Always print DC if NULL. */
- if (data_set->dc_node == NULL) {
+ if (scheduler->dc_node == NULL) {
show |= pcmk_section_dc;
}
if (simple_output) {
- rc = pcmk__output_simple_status(out, data_set);
+ rc = pcmk__output_simple_status(out, scheduler);
} else {
out->message(out, "cluster-status",
- data_set, pcmkd_state, pcmk_rc2exitc(history_rc),
+ scheduler, pcmkd_state, pcmk_rc2exitc(history_rc),
stonith_history, fence_history, show, show_opts,
neg_location_prefix, unames, resources);
}
@@ -144,7 +146,7 @@ pcmk__output_cluster_status(pcmk__output_t *out, stonith_t *stonith, cib_t *cib,
stonith_history_free(stonith_history);
stonith_history = NULL;
- pe_free_working_set(data_set);
+ pe_free_working_set(scheduler);
return rc;
}
@@ -155,7 +157,9 @@ pcmk_status(xmlNodePtr *xml)
pcmk__output_t *out = NULL;
int rc = pcmk_rc_ok;
- uint32_t show_opts = pcmk_show_pending | pcmk_show_inactive_rscs | pcmk_show_timing;
+ uint32_t show_opts = pcmk_show_pending
+ |pcmk_show_inactive_rscs
+ |pcmk_show_timing;
cib = cib_new();
@@ -286,33 +290,41 @@ done:
return pcmk_rc_ok;
}
-/* This is an internal-only function that is planned to be deprecated and removed.
- * It should only ever be called from crm_mon.
+/*!
+ * \internal
+ * \brief Output cluster status in Nagios Plugin format
+ *
+ * \param[in,out] out Output object
+ * \param[in] scheduler Scheduler data
+ *
+ * \return Standard Pacemaker return code
+ * \note This is for a deprecated crm_mon option and should be called only for
+ * that.
*/
int
pcmk__output_simple_status(pcmk__output_t *out,
- const pe_working_set_t *data_set)
+ const pcmk_scheduler_t *scheduler)
{
int nodes_online = 0;
int nodes_standby = 0;
- int nodes_maintenance = 0;
+ int nodes_maint = 0;
GString *offline_nodes = NULL;
bool no_dc = false;
bool offline = false;
bool has_warnings = false;
- if (data_set->dc_node == NULL) {
+ if (scheduler->dc_node == NULL) {
has_warnings = true;
no_dc = true;
}
- for (GList *iter = data_set->nodes; iter != NULL; iter = iter->next) {
- pe_node_t *node = (pe_node_t *) iter->data;
+ for (GList *iter = scheduler->nodes; iter != NULL; iter = iter->next) {
+ pcmk_node_t *node = (pcmk_node_t *) iter->data;
if (node->details->standby && node->details->online) {
nodes_standby++;
} else if (node->details->maintenance && node->details->online) {
- nodes_maintenance++;
+ nodes_maint++;
} else if (node->details->online) {
nodes_online++;
} else {
@@ -338,14 +350,15 @@ pcmk__output_simple_status(pcmk__output_t *out,
char *nodes_maint_s = NULL;
if (nodes_standby > 0) {
- nodes_standby_s = crm_strdup_printf(", %d standby node%s", nodes_standby,
+ nodes_standby_s = crm_strdup_printf(", %d standby node%s",
+ nodes_standby,
pcmk__plural_s(nodes_standby));
}
- if (nodes_maintenance > 0) {
+ if (nodes_maint > 0) {
nodes_maint_s = crm_strdup_printf(", %d maintenance node%s",
- nodes_maintenance,
- pcmk__plural_s(nodes_maintenance));
+ nodes_maint,
+ pcmk__plural_s(nodes_maint));
}
out->info(out, "CLUSTER OK: %d node%s online%s%s, "
@@ -353,7 +366,7 @@ pcmk__output_simple_status(pcmk__output_t *out,
nodes_online, pcmk__plural_s(nodes_online),
nodes_standby_s != NULL ? nodes_standby_s : "",
nodes_maint_s != NULL ? nodes_maint_s : "",
- data_set->ninstances, pcmk__plural_s(data_set->ninstances));
+ scheduler->ninstances, pcmk__plural_s(scheduler->ninstances));
free(nodes_standby_s);
free(nodes_maint_s);
diff --git a/lib/pengine/Makefile.am b/lib/pengine/Makefile.am
index c2a8c90..9ffc745 100644
--- a/lib/pengine/Makefile.am
+++ b/lib/pengine/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2004-2022 the Pacemaker project contributors
+# Copyright 2004-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -15,27 +15,33 @@ include $(top_srcdir)/mk/common.mk
SUBDIRS = . tests
## libraries
-lib_LTLIBRARIES = libpe_rules.la libpe_status.la
-check_LTLIBRARIES = libpe_rules_test.la libpe_status_test.la
+lib_LTLIBRARIES = libpe_rules.la \
+ libpe_status.la
+check_LTLIBRARIES = libpe_rules_test.la \
+ libpe_status_test.la
-## SOURCES
-noinst_HEADERS = variant.h pe_status_private.h
+noinst_HEADERS = pe_status_private.h
-libpe_rules_la_LDFLAGS = -version-info 30:0:4
+libpe_rules_la_LDFLAGS = -version-info 30:1:4
libpe_rules_la_CFLAGS = $(CFLAGS_HARDENED_LIB)
libpe_rules_la_LDFLAGS += $(LDFLAGS_HARDENED_LIB)
libpe_rules_la_LIBADD = $(top_builddir)/lib/common/libcrmcommon.la
-libpe_rules_la_SOURCES = rules.c rules_alerts.c common.c
-libpe_status_la_LDFLAGS = -version-info 34:0:6
+## Library sources (*must* use += format for bumplibs)
+libpe_rules_la_SOURCES = common.c
+libpe_rules_la_SOURCES += rules.c
+libpe_rules_la_SOURCES += rules_alerts.c
+
+libpe_status_la_LDFLAGS = -version-info 35:0:7
libpe_status_la_CFLAGS = $(CFLAGS_HARDENED_LIB)
libpe_status_la_LDFLAGS += $(LDFLAGS_HARDENED_LIB)
libpe_status_la_LIBADD = $(top_builddir)/lib/common/libcrmcommon.la
-# Use += rather than backlashed continuation lines for parsing by bumplibs
+
+## Library sources (*must* use += format for bumplibs)
libpe_status_la_SOURCES =
libpe_status_la_SOURCES += bundle.c
libpe_status_la_SOURCES += clone.c
@@ -64,18 +70,26 @@ libpe_status_la_SOURCES += utils.c
include $(top_srcdir)/mk/tap.mk
libpe_rules_test_la_SOURCES = $(libpe_rules_la_SOURCES)
-libpe_rules_test_la_LDFLAGS = $(libpe_rules_la_LDFLAGS) -rpath $(libdir) $(LDFLAGS_WRAP)
+libpe_rules_test_la_LDFLAGS = $(libpe_rules_la_LDFLAGS) \
+ -rpath $(libdir) \
+ $(LDFLAGS_WRAP)
# See comments on libcrmcommon_test_la in lib/common/Makefile.am regarding these flags.
-libpe_rules_test_la_CFLAGS = $(libpe_rules_la_CFLAGS) -DPCMK__UNIT_TESTING \
+libpe_rules_test_la_CFLAGS = $(libpe_rules_la_CFLAGS) \
+ -DPCMK__UNIT_TESTING \
-fno-builtin -fno-inline
-libpe_rules_test_la_LIBADD = $(top_builddir)/lib/common/libcrmcommon_test.la -lcmocka -lm
+libpe_rules_test_la_LIBADD = $(top_builddir)/lib/common/libcrmcommon_test.la \
+ -lcmocka \
+ -lm
libpe_status_test_la_SOURCES = $(libpe_status_la_SOURCES)
-libpe_status_test_la_LDFLAGS = $(libpe_status_la_LDFLAGS) -rpath $(libdir) $(LDFLAGS_WRAP)
+libpe_status_test_la_LDFLAGS = $(libpe_status_la_LDFLAGS) \
+ -rpath $(libdir) \
+ $(LDFLAGS_WRAP)
# See comments on libcrmcommon_test_la in lib/common/Makefile.am regarding these flags.
-libpe_status_test_la_CFLAGS = $(libpe_status_la_CFLAGS) -DPCMK__UNIT_TESTING \
- -fno-builtin -fno-inline
-libpe_status_test_la_LIBADD = $(top_builddir)/lib/common/libcrmcommon_test.la -lcmocka -lm
-
-clean-generic:
- rm -f *.log *.debug *~
+libpe_status_test_la_CFLAGS = $(libpe_status_la_CFLAGS) \
+ -DPCMK__UNIT_TESTING \
+ -fno-builtin \
+ -fno-inline
+libpe_status_test_la_LIBADD = $(top_builddir)/lib/common/libcrmcommon_test.la \
+ -lcmocka \
+ -lm
diff --git a/lib/pengine/bundle.c b/lib/pengine/bundle.c
index ff1b365..fd859d5 100644
--- a/lib/pengine/bundle.c
+++ b/lib/pengine/bundle.c
@@ -20,8 +20,69 @@
#include <crm/common/xml_internal.h>
#include <pe_status_private.h>
-#define PE__VARIANT_BUNDLE 1
-#include "./variant.h"
+enum pe__bundle_mount_flags {
+ pe__bundle_mount_none = 0x00,
+
+ // mount instance-specific subdirectory rather than source directly
+ pe__bundle_mount_subdir = 0x01
+};
+
+typedef struct {
+ char *source;
+ char *target;
+ char *options;
+ uint32_t flags; // bitmask of pe__bundle_mount_flags
+} pe__bundle_mount_t;
+
+typedef struct {
+ char *source;
+ char *target;
+} pe__bundle_port_t;
+
+enum pe__container_agent {
+ PE__CONTAINER_AGENT_UNKNOWN,
+ PE__CONTAINER_AGENT_DOCKER,
+ PE__CONTAINER_AGENT_RKT,
+ PE__CONTAINER_AGENT_PODMAN,
+};
+
+#define PE__CONTAINER_AGENT_UNKNOWN_S "unknown"
+#define PE__CONTAINER_AGENT_DOCKER_S "docker"
+#define PE__CONTAINER_AGENT_RKT_S "rkt"
+#define PE__CONTAINER_AGENT_PODMAN_S "podman"
+
+typedef struct pe__bundle_variant_data_s {
+ int promoted_max;
+ int nreplicas;
+ int nreplicas_per_host;
+ char *prefix;
+ char *image;
+ const char *ip_last;
+ char *host_network;
+ char *host_netmask;
+ char *control_port;
+ char *container_network;
+ char *ip_range_start;
+ gboolean add_host;
+ gchar *container_host_options;
+ char *container_command;
+ char *launcher_options;
+ const char *attribute_target;
+
+ pcmk_resource_t *child;
+
+ GList *replicas; // pe__bundle_replica_t *
+ GList *ports; // pe__bundle_port_t *
+ GList *mounts; // pe__bundle_mount_t *
+
+ enum pe__container_agent agent_type;
+} pe__bundle_variant_data_t;
+
+#define get_bundle_variant_data(data, rsc) \
+ CRM_ASSERT(rsc != NULL); \
+ CRM_ASSERT(rsc->variant == pcmk_rsc_variant_bundle); \
+ CRM_ASSERT(rsc->variant_opaque != NULL); \
+ data = (pe__bundle_variant_data_t *) rsc->variant_opaque;
/*!
* \internal
@@ -32,7 +93,7 @@
* \return Maximum replicas for bundle corresponding to \p rsc
*/
int
-pe__bundle_max(const pe_resource_t *rsc)
+pe__bundle_max(const pcmk_resource_t *rsc)
{
const pe__bundle_variant_data_t *bundle_data = NULL;
@@ -42,19 +103,149 @@ pe__bundle_max(const pe_resource_t *rsc)
/*!
* \internal
- * \brief Get maximum number of bundle replicas allowed to run on one node
+ * \brief Get the resource inside a bundle
*
- * \param[in] rsc Bundle or bundled resource to check
+ * \param[in] bundle Bundle to check
*
- * \return Maximum replicas per node for bundle corresponding to \p rsc
+ * \return Resource inside \p bundle if any, otherwise NULL
*/
-int
-pe__bundle_max_per_node(const pe_resource_t *rsc)
+pcmk_resource_t *
+pe__bundled_resource(const pcmk_resource_t *rsc)
{
const pe__bundle_variant_data_t *bundle_data = NULL;
get_bundle_variant_data(bundle_data, pe__const_top_resource(rsc, true));
- return bundle_data->nreplicas_per_host;
+ return bundle_data->child;
+}
+
+/*!
+ * \internal
+ * \brief Get containerized resource corresponding to a given bundle container
+ *
+ * \param[in] instance Collective instance that might be a bundle container
+ *
+ * \return Bundled resource instance inside \p instance if it is a bundle
+ * container instance, otherwise NULL
+ */
+const pcmk_resource_t *
+pe__get_rsc_in_container(const pcmk_resource_t *instance)
+{
+ const pe__bundle_variant_data_t *data = NULL;
+ const pcmk_resource_t *top = pe__const_top_resource(instance, true);
+
+ if ((top == NULL) || (top->variant != pcmk_rsc_variant_bundle)) {
+ return NULL;
+ }
+ get_bundle_variant_data(data, top);
+
+ for (const GList *iter = data->replicas; iter != NULL; iter = iter->next) {
+ const pe__bundle_replica_t *replica = iter->data;
+
+ if (instance == replica->container) {
+ return replica->child;
+ }
+ }
+ return NULL;
+}
+
+/*!
+ * \internal
+ * \brief Check whether a given node is created by a bundle
+ *
+ * \param[in] bundle Bundle resource to check
+ * \param[in] node Node to check
+ *
+ * \return true if \p node is an instance of \p bundle, otherwise false
+ */
+bool
+pe__node_is_bundle_instance(const pcmk_resource_t *bundle,
+ const pcmk_node_t *node)
+{
+ pe__bundle_variant_data_t *bundle_data = NULL;
+
+ get_bundle_variant_data(bundle_data, bundle);
+ for (GList *iter = bundle_data->replicas; iter != NULL; iter = iter->next) {
+ pe__bundle_replica_t *replica = iter->data;
+
+ if (pe__same_node(node, replica->node)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+/*!
+ * \internal
+ * \brief Get the container of a bundle's first replica
+ *
+ * \param[in] bundle Bundle resource to get container for
+ *
+ * \return Container resource from first replica of \p bundle if any,
+ * otherwise NULL
+ */
+pcmk_resource_t *
+pe__first_container(const pcmk_resource_t *bundle)
+{
+ const pe__bundle_variant_data_t *bundle_data = NULL;
+ const pe__bundle_replica_t *replica = NULL;
+
+ get_bundle_variant_data(bundle_data, bundle);
+ if (bundle_data->replicas == NULL) {
+ return NULL;
+ }
+ replica = bundle_data->replicas->data;
+ return replica->container;
+}
+
+/*!
+ * \internal
+ * \brief Iterate over bundle replicas
+ *
+ * \param[in,out] bundle Bundle to iterate over
+ * \param[in] fn Function to call for each replica (its return value
+ * indicates whether to continue iterating)
+ * \param[in,out] user_data Pointer to pass to \p fn
+ */
+void
+pe__foreach_bundle_replica(pcmk_resource_t *bundle,
+ bool (*fn)(pe__bundle_replica_t *, void *),
+ void *user_data)
+{
+ const pe__bundle_variant_data_t *bundle_data = NULL;
+
+ get_bundle_variant_data(bundle_data, bundle);
+ for (GList *iter = bundle_data->replicas; iter != NULL; iter = iter->next) {
+ if (!fn((pe__bundle_replica_t *) iter->data, user_data)) {
+ break;
+ }
+ }
+}
+
+/*!
+ * \internal
+ * \brief Iterate over const bundle replicas
+ *
+ * \param[in] bundle Bundle to iterate over
+ * \param[in] fn Function to call for each replica (its return value
+ * indicates whether to continue iterating)
+ * \param[in,out] user_data Pointer to pass to \p fn
+ */
+void
+pe__foreach_const_bundle_replica(const pcmk_resource_t *bundle,
+ bool (*fn)(const pe__bundle_replica_t *,
+ void *),
+ void *user_data)
+{
+ const pe__bundle_variant_data_t *bundle_data = NULL;
+
+ get_bundle_variant_data(bundle_data, bundle);
+ for (const GList *iter = bundle_data->replicas; iter != NULL;
+ iter = iter->next) {
+
+ if (!fn((const pe__bundle_replica_t *) iter->data, user_data)) {
+ break;
+ }
+ }
}
static char *
@@ -159,7 +350,8 @@ valid_network(pe__bundle_variant_data_t *data)
if(data->nreplicas_per_host > 1) {
pe_err("Specifying the 'control-port' for %s requires 'replicas-per-host=1'", data->prefix);
data->nreplicas_per_host = 1;
- // @TODO to be sure: pe__clear_resource_flags(rsc, pe_rsc_unique);
+ // @TODO to be sure:
+ // pe__clear_resource_flags(rsc, pcmk_rsc_unique);
}
return TRUE;
}
@@ -167,7 +359,7 @@ valid_network(pe__bundle_variant_data_t *data)
}
static int
-create_ip_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
+create_ip_resource(pcmk_resource_t *parent, pe__bundle_variant_data_t *data,
pe__bundle_replica_t *replica)
{
if(data->ip_range_start) {
@@ -198,7 +390,8 @@ create_ip_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
}
xml_obj = create_xml_node(xml_ip, "operations");
- crm_create_op_xml(xml_obj, ID(xml_ip), "monitor", "60s", NULL);
+ crm_create_op_xml(xml_obj, ID(xml_ip), PCMK_ACTION_MONITOR, "60s",
+ NULL);
// TODO: Other ops? Timeouts and intervals from underlying resource?
@@ -226,7 +419,7 @@ container_agent_str(enum pe__container_agent t)
}
static int
-create_container_resource(pe_resource_t *parent,
+create_container_resource(pcmk_resource_t *parent,
const pe__bundle_variant_data_t *data,
pe__bundle_replica_t *replica)
{
@@ -295,11 +488,11 @@ create_container_resource(pe_resource_t *parent,
}
if (data->control_port != NULL) {
- pcmk__g_strcat(buffer, " ", env_opt, "PCMK_remote_port=",
- data->control_port, NULL);
+ pcmk__g_strcat(buffer, " ", env_opt, "PCMK_" PCMK__ENV_REMOTE_PORT "=",
+ data->control_port, NULL);
} else {
- g_string_append_printf(buffer, " %sPCMK_remote_port=%d", env_opt,
- DEFAULT_REMOTE_PORT);
+ g_string_append_printf(buffer, " %sPCMK_" PCMK__ENV_REMOTE_PORT "=%d",
+ env_opt, DEFAULT_REMOTE_PORT);
}
for (GList *iter = data->mounts; iter != NULL; iter = iter->next) {
@@ -449,14 +642,15 @@ create_container_resource(pe_resource_t *parent,
}
xml_obj = create_xml_node(xml_container, "operations");
- crm_create_op_xml(xml_obj, ID(xml_container), "monitor", "60s", NULL);
+ crm_create_op_xml(xml_obj, ID(xml_container), PCMK_ACTION_MONITOR, "60s",
+ NULL);
// TODO: Other ops? Timeouts and intervals from underlying resource?
if (pe__unpack_resource(xml_container, &replica->container, parent,
parent->cluster) != pcmk_rc_ok) {
return pcmk_rc_unpack_error;
}
- pe__set_resource_flags(replica->container, pe_rsc_replica_container);
+ pe__set_resource_flags(replica->container, pcmk_rsc_replica_container);
parent->children = g_list_append(parent->children, replica->container);
return pcmk_rc_ok;
@@ -469,13 +663,13 @@ create_container_resource(pe_resource_t *parent,
* \param[in] uname Name of node to ban
*/
static void
-disallow_node(pe_resource_t *rsc, const char *uname)
+disallow_node(pcmk_resource_t *rsc, const char *uname)
{
gpointer match = g_hash_table_lookup(rsc->allowed_nodes, uname);
if (match) {
- ((pe_node_t *) match)->weight = -INFINITY;
- ((pe_node_t *) match)->rsc_discover_mode = pe_discover_never;
+ ((pcmk_node_t *) match)->weight = -INFINITY;
+ ((pcmk_node_t *) match)->rsc_discover_mode = pcmk_probe_never;
}
if (rsc->children) {
g_list_foreach(rsc->children, (GFunc) disallow_node, (gpointer) uname);
@@ -483,12 +677,12 @@ disallow_node(pe_resource_t *rsc, const char *uname)
}
static int
-create_remote_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
+create_remote_resource(pcmk_resource_t *parent, pe__bundle_variant_data_t *data,
pe__bundle_replica_t *replica)
{
if (replica->child && valid_network(data)) {
GHashTableIter gIter;
- pe_node_t *node = NULL;
+ pcmk_node_t *node = NULL;
xmlNode *xml_remote = NULL;
char *id = crm_strdup_printf("%s-%d", data->prefix, replica->offset);
char *port_s = NULL;
@@ -527,8 +721,8 @@ create_remote_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
free(port_s);
/* Abandon our created ID, and pull the copy from the XML, because we
- * need something that will get freed during data set cleanup to use as
- * the node ID and uname.
+ * need something that will get freed during scheduler data cleanup to
+ * use as the node ID and uname.
*/
free(id);
id = NULL;
@@ -545,12 +739,12 @@ create_remote_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
} else {
node->weight = -INFINITY;
}
- node->rsc_discover_mode = pe_discover_never;
+ node->rsc_discover_mode = pcmk_probe_never;
/* unpack_remote_nodes() ensures that each remote node and guest node
- * has a pe_node_t entry. Ideally, it would do the same for bundle nodes.
- * Unfortunately, a bundle has to be mostly unpacked before it's obvious
- * what nodes will be needed, so we do it just above.
+ * has a pcmk_node_t entry. Ideally, it would do the same for bundle
+ * nodes. Unfortunately, a bundle has to be mostly unpacked before it's
+ * obvious what nodes will be needed, so we do it just above.
*
* Worse, that means that the node may have been utilized while
* unpacking other resources, without our weight correction. The most
@@ -569,7 +763,7 @@ create_remote_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
replica->node = pe__copy_node(node);
replica->node->weight = 500;
- replica->node->rsc_discover_mode = pe_discover_exclusive;
+ replica->node->rsc_discover_mode = pcmk_probe_exclusive;
/* Ensure the node shows up as allowed and with the correct discovery set */
if (replica->child->allowed_nodes != NULL) {
@@ -581,7 +775,7 @@ create_remote_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
pe__copy_node(replica->node));
{
- pe_node_t *copy = pe__copy_node(replica->node);
+ pcmk_node_t *copy = pe__copy_node(replica->node);
copy->weight = -INFINITY;
g_hash_table_insert(replica->child->parent->allowed_nodes,
(gpointer) replica->node->details->id, copy);
@@ -625,7 +819,7 @@ create_remote_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
}
static int
-create_replica_resources(pe_resource_t *parent, pe__bundle_variant_data_t *data,
+create_replica_resources(pcmk_resource_t *parent, pe__bundle_variant_data_t *data,
pe__bundle_replica_t *replica)
{
int rc = pcmk_rc_ok;
@@ -658,7 +852,8 @@ create_replica_resources(pe_resource_t *parent, pe__bundle_variant_data_t *data,
* containers with pacemaker-remoted inside in order to start
* services inside those containers.
*/
- pe__set_resource_flags(replica->remote, pe_rsc_allow_remote_remotes);
+ pe__set_resource_flags(replica->remote,
+ pcmk_rsc_remote_nesting_allowed);
}
return rc;
}
@@ -695,9 +890,9 @@ port_free(pe__bundle_port_t *port)
}
static pe__bundle_replica_t *
-replica_for_remote(pe_resource_t *remote)
+replica_for_remote(pcmk_resource_t *remote)
{
- pe_resource_t *top = remote;
+ pcmk_resource_t *top = remote;
pe__bundle_variant_data_t *bundle_data = NULL;
if (top == NULL) {
@@ -722,7 +917,7 @@ replica_for_remote(pe_resource_t *remote)
}
bool
-pe__bundle_needs_remote_name(pe_resource_t *rsc)
+pe__bundle_needs_remote_name(pcmk_resource_t *rsc)
{
const char *value;
GHashTable *params = NULL;
@@ -740,12 +935,12 @@ pe__bundle_needs_remote_name(pe_resource_t *rsc)
}
const char *
-pe__add_bundle_remote_name(pe_resource_t *rsc, pe_working_set_t *data_set,
+pe__add_bundle_remote_name(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler,
xmlNode *xml, const char *field)
{
// REMOTE_CONTAINER_HACK: Allow remote nodes that start containers with pacemaker remote inside
- pe_node_t *node = NULL;
+ pcmk_node_t *node = NULL;
pe__bundle_replica_t *replica = NULL;
if (!pe__bundle_needs_remote_name(rsc)) {
@@ -786,7 +981,7 @@ pe__add_bundle_remote_name(pe_resource_t *rsc, pe_working_set_t *data_set,
} while (0)
gboolean
-pe__unpack_bundle(pe_resource_t *rsc, pe_working_set_t *data_set)
+pe__unpack_bundle(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler)
{
const char *value = NULL;
xmlNode *xml_obj = NULL;
@@ -819,7 +1014,7 @@ pe__unpack_bundle(pe_resource_t *rsc, pe_working_set_t *data_set)
}
// Use 0 for default, minimum, and invalid promoted-max
- value = crm_element_value(xml_obj, XML_RSC_ATTR_PROMOTED_MAX);
+ value = crm_element_value(xml_obj, PCMK_META_PROMOTED_MAX);
if (value == NULL) {
// @COMPAT deprecated since 2.0.0
value = crm_element_value(xml_obj, "masters");
@@ -842,7 +1037,7 @@ pe__unpack_bundle(pe_resource_t *rsc, pe_working_set_t *data_set)
value = crm_element_value(xml_obj, "replicas-per-host");
pcmk__scan_min_int(value, &bundle_data->nreplicas_per_host, 1);
if (bundle_data->nreplicas_per_host == 1) {
- pe__clear_resource_flags(rsc, pe_rsc_unique);
+ pe__clear_resource_flags(rsc, pcmk_rsc_unique);
}
bundle_data->container_command = crm_element_value_copy(xml_obj, "run-command");
@@ -934,13 +1129,11 @@ pe__unpack_bundle(pe_resource_t *rsc, pe_working_set_t *data_set)
XML_RSC_ATTR_ORDERED, XML_BOOLEAN_TRUE);
value = pcmk__itoa(bundle_data->nreplicas);
- crm_create_nvpair_xml(xml_set, NULL,
- XML_RSC_ATTR_INCARNATION_MAX, value);
+ crm_create_nvpair_xml(xml_set, NULL, PCMK_META_CLONE_MAX, value);
free(value);
value = pcmk__itoa(bundle_data->nreplicas_per_host);
- crm_create_nvpair_xml(xml_set, NULL,
- XML_RSC_ATTR_INCARNATION_NODEMAX, value);
+ crm_create_nvpair_xml(xml_set, NULL, PCMK_META_CLONE_NODE_MAX, value);
free(value);
crm_create_nvpair_xml(xml_set, NULL, XML_RSC_ATTR_UNIQUE,
@@ -951,8 +1144,7 @@ pe__unpack_bundle(pe_resource_t *rsc, pe_working_set_t *data_set)
XML_RSC_ATTR_PROMOTABLE, XML_BOOLEAN_TRUE);
value = pcmk__itoa(bundle_data->promoted_max);
- crm_create_nvpair_xml(xml_set, NULL,
- XML_RSC_ATTR_PROMOTED_MAX, value);
+ crm_create_nvpair_xml(xml_set, NULL, PCMK_META_PROMOTED_MAX, value);
free(value);
}
@@ -972,7 +1164,7 @@ pe__unpack_bundle(pe_resource_t *rsc, pe_working_set_t *data_set)
GString *buffer = NULL;
if (pe__unpack_resource(xml_resource, &(bundle_data->child), rsc,
- data_set) != pcmk_rc_ok) {
+ scheduler) != pcmk_rc_ok) {
return FALSE;
}
@@ -1033,8 +1225,8 @@ pe__unpack_bundle(pe_resource_t *rsc, pe_working_set_t *data_set)
replica->offset = lpc++;
// Ensure the child's notify gets set based on the underlying primitive's value
- if (pcmk_is_set(replica->child->flags, pe_rsc_notify)) {
- pe__set_resource_flags(bundle_data->child, pe_rsc_notify);
+ if (pcmk_is_set(replica->child->flags, pcmk_rsc_notify)) {
+ pe__set_resource_flags(bundle_data->child, pcmk_rsc_notify);
}
allocate_ip(bundle_data, replica, buffer);
@@ -1109,7 +1301,7 @@ pe__unpack_bundle(pe_resource_t *rsc, pe_working_set_t *data_set)
}
static int
-replica_resource_active(pe_resource_t *rsc, gboolean all)
+replica_resource_active(pcmk_resource_t *rsc, gboolean all)
{
if (rsc) {
gboolean child_active = rsc->fns->active(rsc, all);
@@ -1124,7 +1316,7 @@ replica_resource_active(pe_resource_t *rsc, gboolean all)
}
gboolean
-pe__bundle_active(pe_resource_t *rsc, gboolean all)
+pe__bundle_active(pcmk_resource_t *rsc, gboolean all)
{
pe__bundle_variant_data_t *bundle_data = NULL;
GList *iter = NULL;
@@ -1171,8 +1363,8 @@ pe__bundle_active(pe_resource_t *rsc, gboolean all)
*
* \return Bundle replica if found, NULL otherwise
*/
-pe_resource_t *
-pe__find_bundle_replica(const pe_resource_t *bundle, const pe_node_t *node)
+pcmk_resource_t *
+pe__find_bundle_replica(const pcmk_resource_t *bundle, const pcmk_node_t *node)
{
pe__bundle_variant_data_t *bundle_data = NULL;
CRM_ASSERT(bundle && node);
@@ -1195,7 +1387,7 @@ pe__find_bundle_replica(const pe_resource_t *bundle, const pe_node_t *node)
* \deprecated This function will be removed in a future release
*/
static void
-print_rsc_in_list(pe_resource_t *rsc, const char *pre_text, long options,
+print_rsc_in_list(pcmk_resource_t *rsc, const char *pre_text, long options,
void *print_data)
{
if (rsc != NULL) {
@@ -1214,7 +1406,7 @@ print_rsc_in_list(pe_resource_t *rsc, const char *pre_text, long options,
* \deprecated This function will be removed in a future release
*/
static void
-bundle_print_xml(pe_resource_t *rsc, const char *pre_text, long options,
+bundle_print_xml(pcmk_resource_t *rsc, const char *pre_text, long options,
void *print_data)
{
pe__bundle_variant_data_t *bundle_data = NULL;
@@ -1232,9 +1424,10 @@ bundle_print_xml(pe_resource_t *rsc, const char *pre_text, long options,
status_print(XML_ATTR_ID "=\"%s\" ", rsc->id);
status_print("type=\"%s\" ", container_agent_str(bundle_data->agent_type));
status_print("image=\"%s\" ", bundle_data->image);
- status_print("unique=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_unique));
- status_print("managed=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_managed));
- status_print("failed=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_failed));
+ status_print("unique=\"%s\" ", pe__rsc_bool_str(rsc, pcmk_rsc_unique));
+ status_print("managed=\"%s\" ",
+ pe__rsc_bool_str(rsc, pcmk_rsc_managed));
+ status_print("failed=\"%s\" ", pe__rsc_bool_str(rsc, pcmk_rsc_failed));
status_print(">\n");
for (GList *gIter = bundle_data->replicas; gIter != NULL;
@@ -1254,12 +1447,13 @@ bundle_print_xml(pe_resource_t *rsc, const char *pre_text, long options,
free(child_text);
}
-PCMK__OUTPUT_ARGS("bundle", "uint32_t", "pe_resource_t *", "GList *", "GList *")
+PCMK__OUTPUT_ARGS("bundle", "uint32_t", "pcmk_resource_t *", "GList *",
+ "GList *")
int
pe__bundle_xml(pcmk__output_t *out, va_list args)
{
uint32_t show_opts = va_arg(args, uint32_t);
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
GList *only_node = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
@@ -1313,10 +1507,11 @@ pe__bundle_xml(pcmk__output_t *out, va_list args)
"id", rsc->id,
"type", container_agent_str(bundle_data->agent_type),
"image", bundle_data->image,
- "unique", pe__rsc_bool_str(rsc, pe_rsc_unique),
- "maintenance", pe__rsc_bool_str(rsc, pe_rsc_maintenance),
- "managed", pe__rsc_bool_str(rsc, pe_rsc_managed),
- "failed", pe__rsc_bool_str(rsc, pe_rsc_failed),
+ "unique", pe__rsc_bool_str(rsc, pcmk_rsc_unique),
+ "maintenance",
+ pe__rsc_bool_str(rsc, pcmk_rsc_maintenance),
+ "managed", pe__rsc_bool_str(rsc, pcmk_rsc_managed),
+ "failed", pe__rsc_bool_str(rsc, pcmk_rsc_failed),
"description", desc);
CRM_ASSERT(rc == pcmk_rc_ok);
}
@@ -1358,9 +1553,9 @@ pe__bundle_xml(pcmk__output_t *out, va_list args)
static void
pe__bundle_replica_output_html(pcmk__output_t *out, pe__bundle_replica_t *replica,
- pe_node_t *node, uint32_t show_opts)
+ pcmk_node_t *node, uint32_t show_opts)
{
- pe_resource_t *rsc = replica->child;
+ pcmk_resource_t *rsc = replica->child;
int offset = 0;
char buffer[LINE_MAX];
@@ -1394,23 +1589,24 @@ pe__bundle_replica_output_html(pcmk__output_t *out, pe__bundle_replica_t *replic
* otherwise unmanaged, or an empty string otherwise
*/
static const char *
-get_unmanaged_str(const pe_resource_t *rsc)
+get_unmanaged_str(const pcmk_resource_t *rsc)
{
- if (pcmk_is_set(rsc->flags, pe_rsc_maintenance)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_maintenance)) {
return " (maintenance)";
}
- if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
return " (unmanaged)";
}
return "";
}
-PCMK__OUTPUT_ARGS("bundle", "uint32_t", "pe_resource_t *", "GList *", "GList *")
+PCMK__OUTPUT_ARGS("bundle", "uint32_t", "pcmk_resource_t *", "GList *",
+ "GList *")
int
pe__bundle_html(pcmk__output_t *out, va_list args)
{
uint32_t show_opts = va_arg(args, uint32_t);
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
GList *only_node = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
@@ -1460,7 +1656,7 @@ pe__bundle_html(pcmk__output_t *out, va_list args)
PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s%s%s%s",
(bundle_data->nreplicas > 1)? " set" : "",
rsc->id, bundle_data->image,
- pcmk_is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
+ pcmk_is_set(rsc->flags, pcmk_rsc_unique)? " (unique)" : "",
desc ? " (" : "", desc ? desc : "", desc ? ")" : "",
get_unmanaged_str(rsc));
@@ -1497,7 +1693,7 @@ pe__bundle_html(pcmk__output_t *out, va_list args)
PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s%s%s%s",
(bundle_data->nreplicas > 1)? " set" : "",
rsc->id, bundle_data->image,
- pcmk_is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
+ pcmk_is_set(rsc->flags, pcmk_rsc_unique)? " (unique)" : "",
desc ? " (" : "", desc ? desc : "", desc ? ")" : "",
get_unmanaged_str(rsc));
@@ -1512,9 +1708,9 @@ pe__bundle_html(pcmk__output_t *out, va_list args)
static void
pe__bundle_replica_output_text(pcmk__output_t *out, pe__bundle_replica_t *replica,
- pe_node_t *node, uint32_t show_opts)
+ pcmk_node_t *node, uint32_t show_opts)
{
- const pe_resource_t *rsc = replica->child;
+ const pcmk_resource_t *rsc = replica->child;
int offset = 0;
char buffer[LINE_MAX];
@@ -1538,12 +1734,13 @@ pe__bundle_replica_output_text(pcmk__output_t *out, pe__bundle_replica_t *replic
pe__common_output_text(out, rsc, buffer, node, show_opts);
}
-PCMK__OUTPUT_ARGS("bundle", "uint32_t", "pe_resource_t *", "GList *", "GList *")
+PCMK__OUTPUT_ARGS("bundle", "uint32_t", "pcmk_resource_t *", "GList *",
+ "GList *")
int
pe__bundle_text(pcmk__output_t *out, va_list args)
{
uint32_t show_opts = va_arg(args, uint32_t);
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
GList *only_node = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
@@ -1593,7 +1790,7 @@ pe__bundle_text(pcmk__output_t *out, va_list args)
PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s%s%s%s",
(bundle_data->nreplicas > 1)? " set" : "",
rsc->id, bundle_data->image,
- pcmk_is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
+ pcmk_is_set(rsc->flags, pcmk_rsc_unique)? " (unique)" : "",
desc ? " (" : "", desc ? desc : "", desc ? ")" : "",
get_unmanaged_str(rsc));
@@ -1630,7 +1827,7 @@ pe__bundle_text(pcmk__output_t *out, va_list args)
PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s%s%s%s",
(bundle_data->nreplicas > 1)? " set" : "",
rsc->id, bundle_data->image,
- pcmk_is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
+ pcmk_is_set(rsc->flags, pcmk_rsc_unique)? " (unique)" : "",
desc ? " (" : "", desc ? desc : "", desc ? ")" : "",
get_unmanaged_str(rsc));
@@ -1651,8 +1848,8 @@ static void
print_bundle_replica(pe__bundle_replica_t *replica, const char *pre_text,
long options, void *print_data)
{
- pe_node_t *node = NULL;
- pe_resource_t *rsc = replica->child;
+ pcmk_node_t *node = NULL;
+ pcmk_resource_t *rsc = replica->child;
int offset = 0;
char buffer[LINE_MAX];
@@ -1682,7 +1879,7 @@ print_bundle_replica(pe__bundle_replica_t *replica, const char *pre_text,
* \deprecated This function will be removed in a future release
*/
void
-pe__print_bundle(pe_resource_t *rsc, const char *pre_text, long options,
+pe__print_bundle(pcmk_resource_t *rsc, const char *pre_text, long options,
void *print_data)
{
pe__bundle_variant_data_t *bundle_data = NULL;
@@ -1703,8 +1900,8 @@ pe__print_bundle(pe_resource_t *rsc, const char *pre_text, long options,
status_print("%sContainer bundle%s: %s [%s]%s%s\n",
pre_text, ((bundle_data->nreplicas > 1)? " set" : ""),
rsc->id, bundle_data->image,
- pcmk_is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
- pcmk_is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)");
+ pcmk_is_set(rsc->flags, pcmk_rsc_unique)? " (unique)" : "",
+ pcmk_is_set(rsc->flags, pcmk_rsc_managed)? "" : " (unmanaged)");
if (options & pe_print_html) {
status_print("<br />\n<ul>\n");
}
@@ -1784,7 +1981,7 @@ free_bundle_replica(pe__bundle_replica_t *replica)
}
void
-pe__free_bundle(pe_resource_t *rsc)
+pe__free_bundle(pcmk_resource_t *rsc)
{
pe__bundle_variant_data_t *bundle_data = NULL;
CRM_CHECK(rsc != NULL, return);
@@ -1818,9 +2015,9 @@ pe__free_bundle(pe_resource_t *rsc)
}
enum rsc_role_e
-pe__bundle_resource_state(const pe_resource_t *rsc, gboolean current)
+pe__bundle_resource_state(const pcmk_resource_t *rsc, gboolean current)
{
- enum rsc_role_e container_role = RSC_ROLE_UNKNOWN;
+ enum rsc_role_e container_role = pcmk_role_unknown;
return container_role;
}
@@ -1832,9 +2029,9 @@ pe__bundle_resource_state(const pe_resource_t *rsc, gboolean current)
* \return Number of configured replicas, or 0 on error
*/
int
-pe_bundle_replicas(const pe_resource_t *rsc)
+pe_bundle_replicas(const pcmk_resource_t *rsc)
{
- if ((rsc == NULL) || (rsc->variant != pe_container)) {
+ if ((rsc == NULL) || (rsc->variant != pcmk_rsc_variant_bundle)) {
return 0;
} else {
pe__bundle_variant_data_t *bundle_data = NULL;
@@ -1845,7 +2042,7 @@ pe_bundle_replicas(const pe_resource_t *rsc)
}
void
-pe__count_bundle(pe_resource_t *rsc)
+pe__count_bundle(pcmk_resource_t *rsc)
{
pe__bundle_variant_data_t *bundle_data = NULL;
@@ -1869,7 +2066,7 @@ pe__count_bundle(pe_resource_t *rsc)
}
gboolean
-pe__bundle_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
+pe__bundle_is_filtered(const pcmk_resource_t *rsc, GList *only_rsc,
gboolean check_parent)
{
gboolean passes = FALSE;
@@ -1913,7 +2110,7 @@ pe__bundle_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
* g_list_free().
*/
GList *
-pe__bundle_containers(const pe_resource_t *bundle)
+pe__bundle_containers(const pcmk_resource_t *bundle)
{
GList *containers = NULL;
const pe__bundle_variant_data_t *data = NULL;
@@ -1927,14 +2124,14 @@ pe__bundle_containers(const pe_resource_t *bundle)
return containers;
}
-// Bundle implementation of resource_object_functions_t:active_node()
-pe_node_t *
-pe__bundle_active_node(const pe_resource_t *rsc, unsigned int *count_all,
+// Bundle implementation of pcmk_rsc_methods_t:active_node()
+pcmk_node_t *
+pe__bundle_active_node(const pcmk_resource_t *rsc, unsigned int *count_all,
unsigned int *count_clean)
{
- pe_node_t *active = NULL;
- pe_node_t *node = NULL;
- pe_resource_t *container = NULL;
+ pcmk_node_t *active = NULL;
+ pcmk_node_t *node = NULL;
+ pcmk_resource_t *container = NULL;
GList *containers = NULL;
GList *iter = NULL;
GHashTable *nodes = NULL;
@@ -2002,3 +2199,21 @@ done:
g_hash_table_destroy(nodes);
return active;
}
+
+/*!
+ * \internal
+ * \brief Get maximum bundle resource instances per node
+ *
+ * \param[in] rsc Bundle resource to check
+ *
+ * \return Maximum number of \p rsc instances that can be active on one node
+ */
+unsigned int
+pe__bundle_max_per_node(const pcmk_resource_t *rsc)
+{
+ pe__bundle_variant_data_t *bundle_data = NULL;
+
+ get_bundle_variant_data(bundle_data, rsc);
+ CRM_ASSERT(bundle_data->nreplicas_per_host >= 0);
+ return (unsigned int) bundle_data->nreplicas_per_host;
+}
diff --git a/lib/pengine/clone.c b/lib/pengine/clone.c
index e411f98..a92a4b7 100644
--- a/lib/pengine/clone.c
+++ b/lib/pengine/clone.c
@@ -18,13 +18,14 @@
#include <crm/msg_xml.h>
#include <crm/common/output.h>
#include <crm/common/xml_internal.h>
+#include <crm/common/scheduler_internal.h>
#ifdef PCMK__COMPAT_2_0
-#define PROMOTED_INSTANCES RSC_ROLE_PROMOTED_LEGACY_S "s"
-#define UNPROMOTED_INSTANCES RSC_ROLE_UNPROMOTED_LEGACY_S "s"
+#define PROMOTED_INSTANCES PCMK__ROLE_PROMOTED_LEGACY "s"
+#define UNPROMOTED_INSTANCES PCMK__ROLE_UNPROMOTED_LEGACY "s"
#else
-#define PROMOTED_INSTANCES RSC_ROLE_PROMOTED_S
-#define UNPROMOTED_INSTANCES RSC_ROLE_UNPROMOTED_S
+#define PROMOTED_INSTANCES PCMK__ROLE_PROMOTED
+#define UNPROMOTED_INSTANCES PCMK__ROLE_UNPROMOTED
#endif
typedef struct clone_variant_data_s {
@@ -36,7 +37,7 @@ typedef struct clone_variant_data_s {
int total_clones;
- uint32_t flags; // Group of enum pe__clone_flags
+ uint32_t flags; // Group of enum pcmk__clone_flags
notify_data_t *stop_notify;
notify_data_t *start_notify;
@@ -46,8 +47,8 @@ typedef struct clone_variant_data_s {
xmlNode *xml_obj_child;
} clone_variant_data_t;
-#define get_clone_variant_data(data, rsc) \
- CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_clone)); \
+#define get_clone_variant_data(data, rsc) \
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_clone)); \
data = (clone_variant_data_t *) rsc->variant_opaque;
/*!
@@ -59,7 +60,7 @@ typedef struct clone_variant_data_s {
* \return Maximum instances for \p clone
*/
int
-pe__clone_max(const pe_resource_t *clone)
+pe__clone_max(const pcmk_resource_t *clone)
{
const clone_variant_data_t *clone_data = NULL;
@@ -76,7 +77,7 @@ pe__clone_max(const pe_resource_t *clone)
* \return Maximum allowed instances per node for \p clone
*/
int
-pe__clone_node_max(const pe_resource_t *clone)
+pe__clone_node_max(const pcmk_resource_t *clone)
{
const clone_variant_data_t *clone_data = NULL;
@@ -93,7 +94,7 @@ pe__clone_node_max(const pe_resource_t *clone)
* \return Maximum promoted instances for \p clone
*/
int
-pe__clone_promoted_max(const pe_resource_t *clone)
+pe__clone_promoted_max(const pcmk_resource_t *clone)
{
clone_variant_data_t *clone_data = NULL;
@@ -110,7 +111,7 @@ pe__clone_promoted_max(const pe_resource_t *clone)
* \return Maximum promoted instances for \p clone
*/
int
-pe__clone_promoted_node_max(const pe_resource_t *clone)
+pe__clone_promoted_node_max(const pcmk_resource_t *clone)
{
clone_variant_data_t *clone_data = NULL;
@@ -167,16 +168,16 @@ node_list_to_str(const GList *list)
}
static void
-clone_header(pcmk__output_t *out, int *rc, const pe_resource_t *rsc,
+clone_header(pcmk__output_t *out, int *rc, const pcmk_resource_t *rsc,
clone_variant_data_t *clone_data, const char *desc)
{
GString *attrs = NULL;
- if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_promotable)) {
pcmk__add_separated_word(&attrs, 64, "promotable", ", ");
}
- if (pcmk_is_set(rsc->flags, pe_rsc_unique)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_unique)) {
pcmk__add_separated_word(&attrs, 64, "unique", ", ");
}
@@ -184,10 +185,10 @@ clone_header(pcmk__output_t *out, int *rc, const pe_resource_t *rsc,
pcmk__add_separated_word(&attrs, 64, "disabled", ", ");
}
- if (pcmk_is_set(rsc->flags, pe_rsc_maintenance)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_maintenance)) {
pcmk__add_separated_word(&attrs, 64, "maintenance", ", ");
- } else if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ } else if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
pcmk__add_separated_word(&attrs, 64, "unmanaged", ", ");
}
@@ -206,8 +207,8 @@ clone_header(pcmk__output_t *out, int *rc, const pe_resource_t *rsc,
}
void
-pe__force_anon(const char *standard, pe_resource_t *rsc, const char *rid,
- pe_working_set_t *data_set)
+pe__force_anon(const char *standard, pcmk_resource_t *rsc, const char *rid,
+ pcmk_scheduler_t *scheduler)
{
if (pe_rsc_is_clone(rsc)) {
clone_variant_data_t *clone_data = rsc->variant_opaque;
@@ -218,15 +219,15 @@ pe__force_anon(const char *standard, pe_resource_t *rsc, const char *rid,
clone_data->clone_node_max = 1;
clone_data->clone_max = QB_MIN(clone_data->clone_max,
- g_list_length(data_set->nodes));
+ g_list_length(scheduler->nodes));
}
}
-pe_resource_t *
-find_clone_instance(const pe_resource_t *rsc, const char *sub_id)
+pcmk_resource_t *
+find_clone_instance(const pcmk_resource_t *rsc, const char *sub_id)
{
char *child_id = NULL;
- pe_resource_t *child = NULL;
+ pcmk_resource_t *child = NULL;
const char *child_base = NULL;
clone_variant_data_t *clone_data = NULL;
@@ -240,13 +241,13 @@ find_clone_instance(const pe_resource_t *rsc, const char *sub_id)
return child;
}
-pe_resource_t *
-pe__create_clone_child(pe_resource_t *rsc, pe_working_set_t *data_set)
+pcmk_resource_t *
+pe__create_clone_child(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler)
{
gboolean as_orphan = FALSE;
char *inc_num = NULL;
char *inc_max = NULL;
- pe_resource_t *child_rsc = NULL;
+ pcmk_resource_t *child_rsc = NULL;
xmlNode *child_copy = NULL;
clone_variant_data_t *clone_data = NULL;
@@ -268,7 +269,7 @@ pe__create_clone_child(pe_resource_t *rsc, pe_working_set_t *data_set)
crm_xml_add(child_copy, XML_RSC_ATTR_INCARNATION, inc_num);
if (pe__unpack_resource(child_copy, &child_rsc, rsc,
- data_set) != pcmk_rc_ok) {
+ scheduler) != pcmk_rc_ok) {
goto bail;
}
/* child_rsc->globally_unique = rsc->globally_unique; */
@@ -278,10 +279,10 @@ pe__create_clone_child(pe_resource_t *rsc, pe_working_set_t *data_set)
pe_rsc_trace(child_rsc, "Setting clone attributes for: %s", child_rsc->id);
rsc->children = g_list_append(rsc->children, child_rsc);
if (as_orphan) {
- pe__set_resource_flags_recursive(child_rsc, pe_rsc_orphan);
+ pe__set_resource_flags_recursive(child_rsc, pcmk_rsc_removed);
}
- add_hash_param(child_rsc->meta, XML_RSC_ATTR_INCARNATION_MAX, inc_max);
+ add_hash_param(child_rsc->meta, PCMK_META_CLONE_MAX, inc_max);
pe_rsc_trace(rsc, "Added %s instance %s", rsc->id, child_rsc->id);
bail:
@@ -291,90 +292,89 @@ pe__create_clone_child(pe_resource_t *rsc, pe_working_set_t *data_set)
return child_rsc;
}
+/*!
+ * \internal
+ * \brief Unpack a nonnegative integer value from a resource meta-attribute
+ *
+ * \param[in] rsc Resource with meta-attribute
+ * \param[in] meta_name Name of meta-attribute to unpack
+ * \param[in] deprecated_name If not NULL, try unpacking this
+ * if \p meta_name is unset
+ * \param[in] default_value Value to use if unset
+ *
+ * \return Integer parsed from resource's specified meta-attribute if a valid
+ * nonnegative integer, \p default_value if unset, or 0 if invalid
+ */
+static int
+unpack_meta_int(const pcmk_resource_t *rsc, const char *meta_name,
+ const char *deprecated_name, int default_value)
+{
+ int integer = default_value;
+ const char *value = g_hash_table_lookup(rsc->meta, meta_name);
+
+ if ((value == NULL) && (deprecated_name != NULL)) {
+ value = g_hash_table_lookup(rsc->meta, deprecated_name);
+ }
+ if (value != NULL) {
+ pcmk__scan_min_int(value, &integer, 0);
+ }
+ return integer;
+}
+
gboolean
-clone_unpack(pe_resource_t * rsc, pe_working_set_t * data_set)
+clone_unpack(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler)
{
int lpc = 0;
xmlNode *a_child = NULL;
xmlNode *xml_obj = rsc->xml;
clone_variant_data_t *clone_data = NULL;
- const char *max_clones = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION_MAX);
- const char *max_clones_node = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION_NODEMAX);
-
pe_rsc_trace(rsc, "Processing resource %s...", rsc->id);
clone_data = calloc(1, sizeof(clone_variant_data_t));
rsc->variant_opaque = clone_data;
- if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
- const char *promoted_max = NULL;
- const char *promoted_node_max = NULL;
-
- promoted_max = g_hash_table_lookup(rsc->meta,
- XML_RSC_ATTR_PROMOTED_MAX);
- if (promoted_max == NULL) {
- // @COMPAT deprecated since 2.0.0
- promoted_max = g_hash_table_lookup(rsc->meta,
- PCMK_XA_PROMOTED_MAX_LEGACY);
- }
-
- promoted_node_max = g_hash_table_lookup(rsc->meta,
- XML_RSC_ATTR_PROMOTED_NODEMAX);
- if (promoted_node_max == NULL) {
- // @COMPAT deprecated since 2.0.0
- promoted_node_max =
- g_hash_table_lookup(rsc->meta,
- PCMK_XA_PROMOTED_NODE_MAX_LEGACY);
- }
-
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_promotable)) {
// Use 1 as default but 0 for minimum and invalid
- if (promoted_max == NULL) {
- clone_data->promoted_max = 1;
- } else {
- pcmk__scan_min_int(promoted_max, &(clone_data->promoted_max), 0);
- }
+ // @COMPAT PCMK_XA_PROMOTED_MAX_LEGACY deprecated since 2.0.0
+ clone_data->promoted_max = unpack_meta_int(rsc, PCMK_META_PROMOTED_MAX,
+ PCMK_XA_PROMOTED_MAX_LEGACY,
+ 1);
// Use 1 as default but 0 for minimum and invalid
- if (promoted_node_max == NULL) {
- clone_data->promoted_node_max = 1;
- } else {
- pcmk__scan_min_int(promoted_node_max,
- &(clone_data->promoted_node_max), 0);
- }
+ // @COMPAT PCMK_XA_PROMOTED_NODE_MAX_LEGACY deprecated since 2.0.0
+ clone_data->promoted_node_max =
+ unpack_meta_int(rsc, PCMK_META_PROMOTED_NODE_MAX,
+ PCMK_XA_PROMOTED_NODE_MAX_LEGACY, 1);
}
// Implied by calloc()
/* clone_data->xml_obj_child = NULL; */
// Use 1 as default but 0 for minimum and invalid
- if (max_clones_node == NULL) {
- clone_data->clone_node_max = 1;
- } else {
- pcmk__scan_min_int(max_clones_node, &(clone_data->clone_node_max), 0);
- }
+ clone_data->clone_node_max = unpack_meta_int(rsc, PCMK_META_CLONE_NODE_MAX,
+ NULL, 1);
/* Use number of nodes (but always at least 1, which is handy for crm_verify
* for a CIB without nodes) as default, but 0 for minimum and invalid
*/
- if (max_clones == NULL) {
- clone_data->clone_max = QB_MAX(1, g_list_length(data_set->nodes));
- } else {
- pcmk__scan_min_int(max_clones, &(clone_data->clone_max), 0);
- }
+ clone_data->clone_max = unpack_meta_int(rsc, PCMK_META_CLONE_MAX, NULL,
+ QB_MAX(1, g_list_length(scheduler->nodes)));
if (crm_is_true(g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_ORDERED))) {
clone_data->flags = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE,
"Clone", rsc->id,
clone_data->flags,
- pe__clone_ordered,
- "pe__clone_ordered");
+ pcmk__clone_ordered,
+ "pcmk__clone_ordered");
}
- if ((rsc->flags & pe_rsc_unique) == 0 && clone_data->clone_node_max > 1) {
- pcmk__config_err("Ignoring " XML_RSC_ATTR_PROMOTED_MAX " for %s "
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_unique)
+ && (clone_data->clone_node_max > 1)) {
+
+ pcmk__config_err("Ignoring " PCMK_META_CLONE_NODE_MAX " of %d for %s "
"because anonymous clones support only one instance "
- "per node", rsc->id);
+ "per node", clone_data->clone_node_max, rsc->id);
clone_data->clone_node_max = 1;
}
@@ -382,9 +382,9 @@ clone_unpack(pe_resource_t * rsc, pe_working_set_t * data_set)
pe_rsc_trace(rsc, "\tClone max: %d", clone_data->clone_max);
pe_rsc_trace(rsc, "\tClone node max: %d", clone_data->clone_node_max);
pe_rsc_trace(rsc, "\tClone is unique: %s",
- pe__rsc_bool_str(rsc, pe_rsc_unique));
+ pe__rsc_bool_str(rsc, pcmk_rsc_unique));
pe_rsc_trace(rsc, "\tClone is promotable: %s",
- pe__rsc_bool_str(rsc, pe_rsc_promotable));
+ pe__rsc_bool_str(rsc, pcmk_rsc_promotable));
// Clones may contain a single group or primitive
for (a_child = pcmk__xe_first_child(xml_obj); a_child != NULL;
@@ -415,20 +415,20 @@ clone_unpack(pe_resource_t * rsc, pe_working_set_t * data_set)
* inherit when being unpacked, as well as in resource agents' environment.
*/
add_hash_param(rsc->meta, XML_RSC_ATTR_UNIQUE,
- pe__rsc_bool_str(rsc, pe_rsc_unique));
+ pe__rsc_bool_str(rsc, pcmk_rsc_unique));
if (clone_data->clone_max <= 0) {
/* Create one child instance so that unpack_find_resource() will hook up
* any orphans up to the parent correctly.
*/
- if (pe__create_clone_child(rsc, data_set) == NULL) {
+ if (pe__create_clone_child(rsc, scheduler) == NULL) {
return FALSE;
}
} else {
// Create a child instance for each available instance number
for (lpc = 0; lpc < clone_data->clone_max; lpc++) {
- if (pe__create_clone_child(rsc, data_set) == NULL) {
+ if (pe__create_clone_child(rsc, scheduler) == NULL) {
return FALSE;
}
}
@@ -439,12 +439,12 @@ clone_unpack(pe_resource_t * rsc, pe_working_set_t * data_set)
}
gboolean
-clone_active(pe_resource_t * rsc, gboolean all)
+clone_active(pcmk_resource_t * rsc, gboolean all)
{
GList *gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *child_rsc = (pcmk_resource_t *) gIter->data;
gboolean child_active = child_rsc->fns->active(child_rsc, all);
if (all == FALSE && child_active) {
@@ -492,27 +492,29 @@ short_print(const char *list, const char *prefix, const char *type,
}
static const char *
-configured_role_str(pe_resource_t * rsc)
+configured_role_str(pcmk_resource_t * rsc)
{
const char *target_role = g_hash_table_lookup(rsc->meta,
XML_RSC_ATTR_TARGET_ROLE);
if ((target_role == NULL) && rsc->children && rsc->children->data) {
- target_role = g_hash_table_lookup(((pe_resource_t*)rsc->children->data)->meta,
+ pcmk_resource_t *instance = rsc->children->data; // Any instance will do
+
+ target_role = g_hash_table_lookup(instance->meta,
XML_RSC_ATTR_TARGET_ROLE);
}
return target_role;
}
static enum rsc_role_e
-configured_role(pe_resource_t * rsc)
+configured_role(pcmk_resource_t *rsc)
{
const char *target_role = configured_role_str(rsc);
if (target_role) {
return text2role(target_role);
}
- return RSC_ROLE_UNKNOWN;
+ return pcmk_role_unknown;
}
/*!
@@ -520,7 +522,7 @@ configured_role(pe_resource_t * rsc)
* \deprecated This function will be removed in a future release
*/
static void
-clone_print_xml(pe_resource_t *rsc, const char *pre_text, long options,
+clone_print_xml(pcmk_resource_t *rsc, const char *pre_text, long options,
void *print_data)
{
char *child_text = crm_strdup_printf("%s ", pre_text);
@@ -530,19 +532,20 @@ clone_print_xml(pe_resource_t *rsc, const char *pre_text, long options,
status_print("%s<clone ", pre_text);
status_print(XML_ATTR_ID "=\"%s\" ", rsc->id);
status_print("multi_state=\"%s\" ",
- pe__rsc_bool_str(rsc, pe_rsc_promotable));
- status_print("unique=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_unique));
- status_print("managed=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_managed));
- status_print("failed=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_failed));
+ pe__rsc_bool_str(rsc, pcmk_rsc_promotable));
+ status_print("unique=\"%s\" ", pe__rsc_bool_str(rsc, pcmk_rsc_unique));
+ status_print("managed=\"%s\" ",
+ pe__rsc_bool_str(rsc, pcmk_rsc_managed));
+ status_print("failed=\"%s\" ", pe__rsc_bool_str(rsc, pcmk_rsc_failed));
status_print("failure_ignored=\"%s\" ",
- pe__rsc_bool_str(rsc, pe_rsc_failure_ignored));
+ pe__rsc_bool_str(rsc, pcmk_rsc_ignore_failure));
if (target_role) {
status_print("target_role=\"%s\" ", target_role);
}
status_print(">\n");
for (; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *child_rsc = (pcmk_resource_t *) gIter->data;
child_rsc->fns->print(child_rsc, child_text, options, print_data);
}
@@ -552,7 +555,7 @@ clone_print_xml(pe_resource_t *rsc, const char *pre_text, long options,
}
bool
-is_set_recursive(const pe_resource_t *rsc, long long flag, bool any)
+is_set_recursive(const pcmk_resource_t *rsc, long long flag, bool any)
{
GList *gIter;
bool all = !any;
@@ -587,7 +590,7 @@ is_set_recursive(const pe_resource_t *rsc, long long flag, bool any)
* \deprecated This function will be removed in a future release
*/
void
-clone_print(pe_resource_t *rsc, const char *pre_text, long options,
+clone_print(pcmk_resource_t *rsc, const char *pre_text, long options,
void *print_data)
{
GString *list_text = NULL;
@@ -616,9 +619,9 @@ clone_print(pe_resource_t *rsc, const char *pre_text, long options,
status_print("%sClone Set: %s [%s]%s%s%s",
pre_text ? pre_text : "", rsc->id, ID(clone_data->xml_obj_child),
- pcmk_is_set(rsc->flags, pe_rsc_promotable)? " (promotable)" : "",
- pcmk_is_set(rsc->flags, pe_rsc_unique)? " (unique)" : "",
- pcmk_is_set(rsc->flags, pe_rsc_managed)? "" : " (unmanaged)");
+ pcmk_is_set(rsc->flags, pcmk_rsc_promotable)? " (promotable)" : "",
+ pcmk_is_set(rsc->flags, pcmk_rsc_unique)? " (unique)" : "",
+ pcmk_is_set(rsc->flags, pcmk_rsc_managed)? "" : " (unmanaged)");
if (options & pe_print_html) {
status_print("\n<ul>\n");
@@ -629,16 +632,17 @@ clone_print(pe_resource_t *rsc, const char *pre_text, long options,
for (; gIter != NULL; gIter = gIter->next) {
gboolean print_full = FALSE;
- pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *child_rsc = (pcmk_resource_t *) gIter->data;
gboolean partially_active = child_rsc->fns->active(child_rsc, FALSE);
if (options & pe_print_clone_details) {
print_full = TRUE;
}
- if (pcmk_is_set(rsc->flags, pe_rsc_unique)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_unique)) {
// Print individual instance when unique (except stopped orphans)
- if (partially_active || !pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
+ if (partially_active
+ || !pcmk_is_set(rsc->flags, pcmk_rsc_removed)) {
print_full = TRUE;
}
@@ -652,15 +656,15 @@ clone_print(pe_resource_t *rsc, const char *pre_text, long options,
} else if (partially_active == FALSE) {
// List stopped instances when requested (except orphans)
- if (!pcmk_is_set(child_rsc->flags, pe_rsc_orphan)
+ if (!pcmk_is_set(child_rsc->flags, pcmk_rsc_removed)
&& !pcmk_is_set(options, pe_print_clone_active)) {
pcmk__add_word(&stopped_list, 1024, child_rsc->id);
}
- } else if (is_set_recursive(child_rsc, pe_rsc_orphan, TRUE)
- || is_set_recursive(child_rsc, pe_rsc_managed, FALSE) == FALSE
- || is_set_recursive(child_rsc, pe_rsc_failed, TRUE)) {
+ } else if (is_set_recursive(child_rsc, pcmk_rsc_removed, TRUE)
+ || !is_set_recursive(child_rsc, pcmk_rsc_managed, FALSE)
+ || is_set_recursive(child_rsc, pcmk_rsc_failed, TRUE)) {
// Print individual instance when active orphaned/unmanaged/failed
print_full = TRUE;
@@ -668,8 +672,9 @@ clone_print(pe_resource_t *rsc, const char *pre_text, long options,
} else if (child_rsc->fns->active(child_rsc, TRUE)) {
// Instance of fully active anonymous clone
- pe_node_t *location = child_rsc->fns->location(child_rsc, NULL, TRUE);
+ pcmk_node_t *location = NULL;
+ location = child_rsc->fns->location(child_rsc, NULL, TRUE);
if (location) {
// Instance is active on a single node
@@ -678,7 +683,7 @@ clone_print(pe_resource_t *rsc, const char *pre_text, long options,
if (location->details->online == FALSE && location->details->unclean) {
print_full = TRUE;
- } else if (a_role > RSC_ROLE_UNPROMOTED) {
+ } else if (a_role > pcmk_role_unpromoted) {
promoted_list = g_list_append(promoted_list, location);
} else {
@@ -709,7 +714,7 @@ clone_print(pe_resource_t *rsc, const char *pre_text, long options,
/* Promoted */
promoted_list = g_list_sort(promoted_list, pe__cmp_node_name);
for (gIter = promoted_list; gIter; gIter = gIter->next) {
- pe_node_t *host = gIter->data;
+ pcmk_node_t *host = gIter->data;
pcmk__add_word(&list_text, 1024, host->details->uname);
active_instances++;
@@ -725,17 +730,17 @@ clone_print(pe_resource_t *rsc, const char *pre_text, long options,
/* Started/Unpromoted */
started_list = g_list_sort(started_list, pe__cmp_node_name);
for (gIter = started_list; gIter; gIter = gIter->next) {
- pe_node_t *host = gIter->data;
+ pcmk_node_t *host = gIter->data;
pcmk__add_word(&list_text, 1024, host->details->uname);
active_instances++;
}
if (list_text != NULL) {
- if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_promotable)) {
enum rsc_role_e role = configured_role(rsc);
- if (role == RSC_ROLE_UNPROMOTED) {
+ if (role == pcmk_role_unpromoted) {
short_print((const char *) list_text->str, child_text,
UNPROMOTED_INSTANCES " (target-role)", NULL,
options, print_data);
@@ -756,11 +761,11 @@ clone_print(pe_resource_t *rsc, const char *pre_text, long options,
const char *state = "Stopped";
enum rsc_role_e role = configured_role(rsc);
- if (role == RSC_ROLE_STOPPED) {
+ if (role == pcmk_role_stopped) {
state = "Stopped (disabled)";
}
- if (!pcmk_is_set(rsc->flags, pe_rsc_unique)
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_unique)
&& (clone_data->clone_max > active_instances)) {
GList *nIter;
@@ -780,7 +785,7 @@ clone_print(pe_resource_t *rsc, const char *pre_text, long options,
list = g_list_sort(list, pe__cmp_node_name);
for (nIter = list; nIter != NULL; nIter = nIter->next) {
- pe_node_t *node = (pe_node_t *)nIter->data;
+ pcmk_node_t *node = (pcmk_node_t *) nIter->data;
if (pe_find_node(rsc->running_on, node->details->uname) == NULL) {
pcmk__add_word(&stopped_list, 1024, node->details->uname);
@@ -809,12 +814,13 @@ clone_print(pe_resource_t *rsc, const char *pre_text, long options,
free(child_text);
}
-PCMK__OUTPUT_ARGS("clone", "uint32_t", "pe_resource_t *", "GList *", "GList *")
+PCMK__OUTPUT_ARGS("clone", "uint32_t", "pcmk_resource_t *", "GList *",
+ "GList *")
int
pe__clone_xml(pcmk__output_t *out, va_list args)
{
uint32_t show_opts = va_arg(args, uint32_t);
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
GList *only_node = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
@@ -838,7 +844,7 @@ pe__clone_xml(pcmk__output_t *out, va_list args)
all = g_list_prepend(all, (gpointer) "*");
for (; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *child_rsc = (pcmk_resource_t *) gIter->data;
if (pcmk__rsc_filtered_by_node(child_rsc, only_node)) {
continue;
@@ -852,16 +858,18 @@ pe__clone_xml(pcmk__output_t *out, va_list args)
printed_header = TRUE;
desc = pe__resource_description(rsc, show_opts);
-
rc = pe__name_and_nvpairs_xml(out, true, "clone", 10,
"id", rsc->id,
- "multi_state", pe__rsc_bool_str(rsc, pe_rsc_promotable),
- "unique", pe__rsc_bool_str(rsc, pe_rsc_unique),
- "maintenance", pe__rsc_bool_str(rsc, pe_rsc_maintenance),
- "managed", pe__rsc_bool_str(rsc, pe_rsc_managed),
+ "multi_state",
+ pe__rsc_bool_str(rsc, pcmk_rsc_promotable),
+ "unique", pe__rsc_bool_str(rsc, pcmk_rsc_unique),
+ "maintenance",
+ pe__rsc_bool_str(rsc, pcmk_rsc_maintenance),
+ "managed", pe__rsc_bool_str(rsc, pcmk_rsc_managed),
"disabled", pcmk__btoa(pe__resource_is_disabled(rsc)),
- "failed", pe__rsc_bool_str(rsc, pe_rsc_failed),
- "failure_ignored", pe__rsc_bool_str(rsc, pe_rsc_failure_ignored),
+ "failed", pe__rsc_bool_str(rsc, pcmk_rsc_failed),
+ "failure_ignored",
+ pe__rsc_bool_str(rsc, pcmk_rsc_ignore_failure),
"target_role", configured_role_str(rsc),
"description", desc);
CRM_ASSERT(rc == pcmk_rc_ok);
@@ -879,12 +887,13 @@ pe__clone_xml(pcmk__output_t *out, va_list args)
return rc;
}
-PCMK__OUTPUT_ARGS("clone", "uint32_t", "pe_resource_t *", "GList *", "GList *")
+PCMK__OUTPUT_ARGS("clone", "uint32_t", "pcmk_resource_t *", "GList *",
+ "GList *")
int
pe__clone_default(pcmk__output_t *out, va_list args)
{
uint32_t show_opts = va_arg(args, uint32_t);
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
GList *only_node = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
@@ -916,7 +925,7 @@ pe__clone_default(pcmk__output_t *out, va_list args)
for (; gIter != NULL; gIter = gIter->next) {
gboolean print_full = FALSE;
- pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *child_rsc = (pcmk_resource_t *) gIter->data;
gboolean partially_active = child_rsc->fns->active(child_rsc, FALSE);
if (pcmk__rsc_filtered_by_node(child_rsc, only_node)) {
@@ -931,9 +940,10 @@ pe__clone_default(pcmk__output_t *out, va_list args)
print_full = TRUE;
}
- if (pcmk_is_set(rsc->flags, pe_rsc_unique)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_unique)) {
// Print individual instance when unique (except stopped orphans)
- if (partially_active || !pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
+ if (partially_active
+ || !pcmk_is_set(rsc->flags, pcmk_rsc_removed)) {
print_full = TRUE;
}
@@ -947,7 +957,7 @@ pe__clone_default(pcmk__output_t *out, va_list args)
} else if (partially_active == FALSE) {
// List stopped instances when requested (except orphans)
- if (!pcmk_is_set(child_rsc->flags, pe_rsc_orphan)
+ if (!pcmk_is_set(child_rsc->flags, pcmk_rsc_removed)
&& !pcmk_is_set(show_opts, pcmk_show_clone_detail)
&& pcmk_is_set(show_opts, pcmk_show_inactive_rscs)) {
if (stopped == NULL) {
@@ -956,9 +966,9 @@ pe__clone_default(pcmk__output_t *out, va_list args)
g_hash_table_insert(stopped, strdup(child_rsc->id), strdup("Stopped"));
}
- } else if (is_set_recursive(child_rsc, pe_rsc_orphan, TRUE)
- || is_set_recursive(child_rsc, pe_rsc_managed, FALSE) == FALSE
- || is_set_recursive(child_rsc, pe_rsc_failed, TRUE)) {
+ } else if (is_set_recursive(child_rsc, pcmk_rsc_removed, TRUE)
+ || !is_set_recursive(child_rsc, pcmk_rsc_managed, FALSE)
+ || is_set_recursive(child_rsc, pcmk_rsc_failed, TRUE)) {
// Print individual instance when active orphaned/unmanaged/failed
print_full = TRUE;
@@ -966,8 +976,9 @@ pe__clone_default(pcmk__output_t *out, va_list args)
} else if (child_rsc->fns->active(child_rsc, TRUE)) {
// Instance of fully active anonymous clone
- pe_node_t *location = child_rsc->fns->location(child_rsc, NULL, TRUE);
+ pcmk_node_t *location = NULL;
+ location = child_rsc->fns->location(child_rsc, NULL, TRUE);
if (location) {
// Instance is active on a single node
@@ -976,7 +987,7 @@ pe__clone_default(pcmk__output_t *out, va_list args)
if (location->details->online == FALSE && location->details->unclean) {
print_full = TRUE;
- } else if (a_role > RSC_ROLE_UNPROMOTED) {
+ } else if (a_role > pcmk_role_unpromoted) {
promoted_list = g_list_append(promoted_list, location);
} else {
@@ -1014,7 +1025,7 @@ pe__clone_default(pcmk__output_t *out, va_list args)
/* Promoted */
promoted_list = g_list_sort(promoted_list, pe__cmp_node_name);
for (gIter = promoted_list; gIter; gIter = gIter->next) {
- pe_node_t *host = gIter->data;
+ pcmk_node_t *host = gIter->data;
if (!pcmk__str_in_list(host->details->uname, only_node,
pcmk__str_star_matches|pcmk__str_casei)) {
@@ -1037,7 +1048,7 @@ pe__clone_default(pcmk__output_t *out, va_list args)
/* Started/Unpromoted */
started_list = g_list_sort(started_list, pe__cmp_node_name);
for (gIter = started_list; gIter; gIter = gIter->next) {
- pe_node_t *host = gIter->data;
+ pcmk_node_t *host = gIter->data;
if (!pcmk__str_in_list(host->details->uname, only_node,
pcmk__str_star_matches|pcmk__str_casei)) {
@@ -1052,10 +1063,10 @@ pe__clone_default(pcmk__output_t *out, va_list args)
if ((list_text != NULL) && (list_text->len > 0)) {
clone_header(out, &rc, rsc, clone_data, desc);
- if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_promotable)) {
enum rsc_role_e role = configured_role(rsc);
- if (role == RSC_ROLE_UNPROMOTED) {
+ if (role == pcmk_role_unpromoted) {
out->list_item(out, NULL,
UNPROMOTED_INSTANCES " (target-role): [ %s ]",
(const char *) list_text->str);
@@ -1075,7 +1086,7 @@ pe__clone_default(pcmk__output_t *out, va_list args)
}
if (pcmk_is_set(show_opts, pcmk_show_inactive_rscs)) {
- if (!pcmk_is_set(rsc->flags, pe_rsc_unique)
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_unique)
&& (clone_data->clone_max > active_instances)) {
GList *nIter;
@@ -1096,7 +1107,7 @@ pe__clone_default(pcmk__output_t *out, va_list args)
list = g_list_sort(list, pe__cmp_node_name);
for (nIter = list; nIter != NULL; nIter = nIter->next) {
- pe_node_t *node = (pe_node_t *)nIter->data;
+ pcmk_node_t *node = (pcmk_node_t *) nIter->data;
if (pe_find_node(rsc->running_on, node->details->uname) == NULL &&
pcmk__str_in_list(node->details->uname, only_node,
@@ -1104,7 +1115,7 @@ pe__clone_default(pcmk__output_t *out, va_list args)
xmlNode *probe_op = pe__failed_probe_for_rsc(rsc, node->details->uname);
const char *state = "Stopped";
- if (configured_role(rsc) == RSC_ROLE_STOPPED) {
+ if (configured_role(rsc) == pcmk_role_stopped) {
state = "Stopped (disabled)";
}
@@ -1166,7 +1177,7 @@ pe__clone_default(pcmk__output_t *out, va_list args)
}
void
-clone_free(pe_resource_t * rsc)
+clone_free(pcmk_resource_t * rsc)
{
clone_variant_data_t *clone_data = NULL;
@@ -1175,7 +1186,7 @@ clone_free(pe_resource_t * rsc)
pe_rsc_trace(rsc, "Freeing %s", rsc->id);
for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *child_rsc = (pcmk_resource_t *) gIter->data;
CRM_ASSERT(child_rsc);
pe_rsc_trace(child_rsc, "Freeing child %s", child_rsc->id);
@@ -1200,13 +1211,13 @@ clone_free(pe_resource_t * rsc)
}
enum rsc_role_e
-clone_resource_state(const pe_resource_t * rsc, gboolean current)
+clone_resource_state(const pcmk_resource_t * rsc, gboolean current)
{
- enum rsc_role_e clone_role = RSC_ROLE_UNKNOWN;
+ enum rsc_role_e clone_role = pcmk_role_unknown;
GList *gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *child_rsc = (pcmk_resource_t *) gIter->data;
enum rsc_role_e a_role = child_rsc->fns->state(child_rsc, current);
if (a_role > clone_role) {
@@ -1222,17 +1233,17 @@ clone_resource_state(const pe_resource_t * rsc, gboolean current)
* \internal
* \brief Check whether a clone has an instance for every node
*
- * \param[in] rsc Clone to check
- * \param[in] data_set Cluster state
+ * \param[in] rsc Clone to check
+ * \param[in] scheduler Scheduler data
*/
bool
-pe__is_universal_clone(const pe_resource_t *rsc,
- const pe_working_set_t *data_set)
+pe__is_universal_clone(const pcmk_resource_t *rsc,
+ const pcmk_scheduler_t *scheduler)
{
if (pe_rsc_is_clone(rsc)) {
clone_variant_data_t *clone_data = rsc->variant_opaque;
- if (clone_data->clone_max == g_list_length(data_set->nodes)) {
+ if (clone_data->clone_max == g_list_length(scheduler->nodes)) {
return TRUE;
}
}
@@ -1240,7 +1251,7 @@ pe__is_universal_clone(const pe_resource_t *rsc,
}
gboolean
-pe__clone_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
+pe__clone_is_filtered(const pcmk_resource_t *rsc, GList *only_rsc,
gboolean check_parent)
{
gboolean passes = FALSE;
@@ -1256,9 +1267,9 @@ pe__clone_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
for (const GList *iter = rsc->children;
iter != NULL; iter = iter->next) {
- const pe_resource_t *child_rsc = NULL;
+ const pcmk_resource_t *child_rsc = NULL;
- child_rsc = (const pe_resource_t *) iter->data;
+ child_rsc = (const pcmk_resource_t *) iter->data;
if (!child_rsc->fns->is_filtered(child_rsc, only_rsc, FALSE)) {
passes = TRUE;
break;
@@ -1270,7 +1281,7 @@ pe__clone_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
}
const char *
-pe__clone_child_id(const pe_resource_t *rsc)
+pe__clone_child_id(const pcmk_resource_t *rsc)
{
clone_variant_data_t *clone_data = NULL;
get_clone_variant_data(clone_data, rsc);
@@ -1286,12 +1297,12 @@ pe__clone_child_id(const pe_resource_t *rsc)
* \return true if clone is ordered, otherwise false
*/
bool
-pe__clone_is_ordered(const pe_resource_t *clone)
+pe__clone_is_ordered(const pcmk_resource_t *clone)
{
clone_variant_data_t *clone_data = NULL;
get_clone_variant_data(clone_data, clone);
- return pcmk_is_set(clone_data->flags, pe__clone_ordered);
+ return pcmk_is_set(clone_data->flags, pcmk__clone_ordered);
}
/*!
@@ -1305,7 +1316,7 @@ pe__clone_is_ordered(const pe_resource_t *clone)
* already set or pcmk_rc_already if it was)
*/
int
-pe__set_clone_flag(pe_resource_t *clone, enum pe__clone_flags flag)
+pe__set_clone_flag(pcmk_resource_t *clone, enum pcmk__clone_flags flag)
{
clone_variant_data_t *clone_data = NULL;
@@ -1321,6 +1332,26 @@ pe__set_clone_flag(pe_resource_t *clone, enum pe__clone_flags flag)
/*!
* \internal
+ * \brief Check whether a clone flag is set
+ *
+ * \param[in] group Clone resource to check
+ * \param[in] flags Flag or flags to check
+ *
+ * \return \c true if all \p flags are set for \p clone, otherwise \c false
+ */
+bool
+pe__clone_flag_is_set(const pcmk_resource_t *clone, uint32_t flags)
+{
+ clone_variant_data_t *clone_data = NULL;
+
+ get_clone_variant_data(clone_data, clone);
+ CRM_ASSERT(clone_data != NULL);
+
+ return pcmk_all_flags_set(clone_data->flags, flags);
+}
+
+/*!
+ * \internal
* \brief Create pseudo-actions needed for promotable clones
*
* \param[in,out] clone Promotable clone to create actions for
@@ -1328,63 +1359,59 @@ pe__set_clone_flag(pe_resource_t *clone, enum pe__clone_flags flag)
* \param[in] any_demoting Whether any instance will be demoted
*/
void
-pe__create_promotable_pseudo_ops(pe_resource_t *clone, bool any_promoting,
+pe__create_promotable_pseudo_ops(pcmk_resource_t *clone, bool any_promoting,
bool any_demoting)
{
- pe_action_t *action = NULL;
- pe_action_t *action_complete = NULL;
+ pcmk_action_t *action = NULL;
+ pcmk_action_t *action_complete = NULL;
clone_variant_data_t *clone_data = NULL;
get_clone_variant_data(clone_data, clone);
// Create a "promote" action for the clone itself
- action = pe__new_rsc_pseudo_action(clone, RSC_PROMOTE, !any_promoting,
- true);
+ action = pe__new_rsc_pseudo_action(clone, PCMK_ACTION_PROMOTE,
+ !any_promoting, true);
// Create a "promoted" action for when all promotions are done
- action_complete = pe__new_rsc_pseudo_action(clone, RSC_PROMOTED,
+ action_complete = pe__new_rsc_pseudo_action(clone, PCMK_ACTION_PROMOTED,
!any_promoting, true);
action_complete->priority = INFINITY;
// Create notification pseudo-actions for promotion
if (clone_data->promote_notify == NULL) {
clone_data->promote_notify = pe__action_notif_pseudo_ops(clone,
- RSC_PROMOTE,
+ PCMK_ACTION_PROMOTE,
action,
action_complete);
}
// Create a "demote" action for the clone itself
- action = pe__new_rsc_pseudo_action(clone, RSC_DEMOTE, !any_demoting, true);
+ action = pe__new_rsc_pseudo_action(clone, PCMK_ACTION_DEMOTE,
+ !any_demoting, true);
// Create a "demoted" action for when all demotions are done
- action_complete = pe__new_rsc_pseudo_action(clone, RSC_DEMOTED,
+ action_complete = pe__new_rsc_pseudo_action(clone, PCMK_ACTION_DEMOTED,
!any_demoting, true);
action_complete->priority = INFINITY;
// Create notification pseudo-actions for demotion
if (clone_data->demote_notify == NULL) {
clone_data->demote_notify = pe__action_notif_pseudo_ops(clone,
- RSC_DEMOTE,
+ PCMK_ACTION_DEMOTE,
action,
action_complete);
if (clone_data->promote_notify != NULL) {
order_actions(clone_data->stop_notify->post_done,
- clone_data->promote_notify->pre,
- pe_order_optional);
+ clone_data->promote_notify->pre, pcmk__ar_ordered);
order_actions(clone_data->start_notify->post_done,
- clone_data->promote_notify->pre,
- pe_order_optional);
+ clone_data->promote_notify->pre, pcmk__ar_ordered);
order_actions(clone_data->demote_notify->post_done,
- clone_data->promote_notify->pre,
- pe_order_optional);
+ clone_data->promote_notify->pre, pcmk__ar_ordered);
order_actions(clone_data->demote_notify->post_done,
- clone_data->start_notify->pre,
- pe_order_optional);
+ clone_data->start_notify->pre, pcmk__ar_ordered);
order_actions(clone_data->demote_notify->post_done,
- clone_data->stop_notify->pre,
- pe_order_optional);
+ clone_data->stop_notify->pre, pcmk__ar_ordered);
}
}
}
@@ -1396,7 +1423,7 @@ pe__create_promotable_pseudo_ops(pe_resource_t *clone, bool any_promoting,
* \param[in,out] clone Clone to create notifications for
*/
void
-pe__create_clone_notifications(pe_resource_t *clone)
+pe__create_clone_notifications(pcmk_resource_t *clone)
{
clone_variant_data_t *clone_data = NULL;
@@ -1415,7 +1442,7 @@ pe__create_clone_notifications(pe_resource_t *clone)
* \param[in,out] clone Clone to free notification data for
*/
void
-pe__free_clone_notification_data(pe_resource_t *clone)
+pe__free_clone_notification_data(pcmk_resource_t *clone)
{
clone_variant_data_t *clone_data = NULL;
@@ -1445,26 +1472,45 @@ pe__free_clone_notification_data(pe_resource_t *clone)
* \param[in,out] stopped Stopped action for \p clone
*/
void
-pe__create_clone_notif_pseudo_ops(pe_resource_t *clone,
- pe_action_t *start, pe_action_t *started,
- pe_action_t *stop, pe_action_t *stopped)
+pe__create_clone_notif_pseudo_ops(pcmk_resource_t *clone,
+ pcmk_action_t *start, pcmk_action_t *started,
+ pcmk_action_t *stop, pcmk_action_t *stopped)
{
clone_variant_data_t *clone_data = NULL;
get_clone_variant_data(clone_data, clone);
if (clone_data->start_notify == NULL) {
- clone_data->start_notify = pe__action_notif_pseudo_ops(clone, RSC_START,
+ clone_data->start_notify = pe__action_notif_pseudo_ops(clone,
+ PCMK_ACTION_START,
start, started);
}
if (clone_data->stop_notify == NULL) {
- clone_data->stop_notify = pe__action_notif_pseudo_ops(clone, RSC_STOP,
+ clone_data->stop_notify = pe__action_notif_pseudo_ops(clone,
+ PCMK_ACTION_STOP,
stop, stopped);
if ((clone_data->start_notify != NULL)
&& (clone_data->stop_notify != NULL)) {
order_actions(clone_data->stop_notify->post_done,
- clone_data->start_notify->pre, pe_order_optional);
+ clone_data->start_notify->pre, pcmk__ar_ordered);
}
}
}
+
+/*!
+ * \internal
+ * \brief Get maximum clone resource instances per node
+ *
+ * \param[in] rsc Clone resource to check
+ *
+ * \return Maximum number of \p rsc instances that can be active on one node
+ */
+unsigned int
+pe__clone_max_per_node(const pcmk_resource_t *rsc)
+{
+ const clone_variant_data_t *clone_data = NULL;
+
+ get_clone_variant_data(clone_data, rsc);
+ return clone_data->clone_node_max;
+}
diff --git a/lib/pengine/common.c b/lib/pengine/common.c
index 6c69bfc..0fdd5a1 100644
--- a/lib/pengine/common.c
+++ b/lib/pengine/common.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2004-2022 the Pacemaker project contributors
+ * Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -15,6 +15,7 @@
#include <glib.h>
+#include <crm/common/scheduler_internal.h>
#include <crm/pengine/internal.h>
gboolean was_processing_error = FALSE;
@@ -104,7 +105,7 @@ static pcmk__cluster_option_t pe_opts[] = {
},
{
"stonith-action", NULL, "select", "reboot, off, poweroff",
- "reboot", pcmk__is_fencing_action,
+ PCMK_ACTION_REBOOT, pcmk__is_fencing_action,
N_("Action to send to fence device when a node needs to be fenced "
"(\"poweroff\" is a deprecated alias for \"off\")"),
NULL
@@ -157,7 +158,17 @@ static pcmk__cluster_option_t pe_opts[] = {
"twice, the maximum `pcmk_delay_base/max`. By default, priority "
"fencing delay is disabled.")
},
-
+ {
+ XML_CONFIG_ATTR_NODE_PENDING_TIMEOUT, NULL, "time", NULL,
+ "0", pcmk__valid_interval_spec,
+ N_("How long to wait for a node that has joined the cluster to join "
+ "the controller process group"),
+ N_("Fence nodes that do not join the controller process group within "
+ "this much time after joining the cluster, to allow the cluster "
+ "to continue managing resources. A value of 0 means never fence "
+ "pending nodes. Setting the value to 2h means fence nodes after "
+ "2 hours.")
+ },
{
"cluster-delay", NULL, "time", NULL,
"60s", pcmk__valid_interval_spec,
@@ -311,34 +322,34 @@ fail2text(enum action_fail_response fail)
const char *result = "<unknown>";
switch (fail) {
- case action_fail_ignore:
+ case pcmk_on_fail_ignore:
result = "ignore";
break;
- case action_fail_demote:
+ case pcmk_on_fail_demote:
result = "demote";
break;
- case action_fail_block:
+ case pcmk_on_fail_block:
result = "block";
break;
- case action_fail_recover:
+ case pcmk_on_fail_restart:
result = "recover";
break;
- case action_fail_migrate:
+ case pcmk_on_fail_ban:
result = "migrate";
break;
- case action_fail_stop:
+ case pcmk_on_fail_stop:
result = "stop";
break;
- case action_fail_fence:
+ case pcmk_on_fail_fence_node:
result = "fence";
break;
- case action_fail_standby:
+ case pcmk_on_fail_standby_node:
result = "standby";
break;
- case action_fail_restart_container:
+ case pcmk_on_fail_restart_container:
result = "restart-container";
break;
- case action_fail_reset_remote:
+ case pcmk_on_fail_reset_remote:
result = "reset-remote";
break;
}
@@ -348,49 +359,46 @@ fail2text(enum action_fail_response fail)
enum action_tasks
text2task(const char *task)
{
- if (pcmk__str_eq(task, CRMD_ACTION_STOP, pcmk__str_casei)) {
- return stop_rsc;
- } else if (pcmk__str_eq(task, CRMD_ACTION_STOPPED, pcmk__str_casei)) {
- return stopped_rsc;
- } else if (pcmk__str_eq(task, CRMD_ACTION_START, pcmk__str_casei)) {
- return start_rsc;
- } else if (pcmk__str_eq(task, CRMD_ACTION_STARTED, pcmk__str_casei)) {
- return started_rsc;
- } else if (pcmk__str_eq(task, CRM_OP_SHUTDOWN, pcmk__str_casei)) {
- return shutdown_crm;
- } else if (pcmk__str_eq(task, CRM_OP_FENCE, pcmk__str_casei)) {
- return stonith_node;
- } else if (pcmk__str_eq(task, CRMD_ACTION_STATUS, pcmk__str_casei)) {
- return monitor_rsc;
- } else if (pcmk__str_eq(task, CRMD_ACTION_NOTIFY, pcmk__str_casei)) {
- return action_notify;
- } else if (pcmk__str_eq(task, CRMD_ACTION_NOTIFIED, pcmk__str_casei)) {
- return action_notified;
- } else if (pcmk__str_eq(task, CRMD_ACTION_PROMOTE, pcmk__str_casei)) {
- return action_promote;
- } else if (pcmk__str_eq(task, CRMD_ACTION_DEMOTE, pcmk__str_casei)) {
- return action_demote;
- } else if (pcmk__str_eq(task, CRMD_ACTION_PROMOTED, pcmk__str_casei)) {
- return action_promoted;
- } else if (pcmk__str_eq(task, CRMD_ACTION_DEMOTED, pcmk__str_casei)) {
- return action_demoted;
- }
-#if SUPPORT_TRACING
- if (pcmk__str_eq(task, CRMD_ACTION_CANCEL, pcmk__str_casei)) {
- return no_action;
- } else if (pcmk__str_eq(task, CRMD_ACTION_DELETE, pcmk__str_casei)) {
- return no_action;
- } else if (pcmk__str_eq(task, CRMD_ACTION_STATUS, pcmk__str_casei)) {
- return no_action;
- } else if (pcmk__str_eq(task, CRMD_ACTION_MIGRATE, pcmk__str_casei)) {
- return no_action;
- } else if (pcmk__str_eq(task, CRMD_ACTION_MIGRATED, pcmk__str_casei)) {
- return no_action;
- }
- crm_trace("Unsupported action: %s", task);
-#endif
+ if (pcmk__str_eq(task, PCMK_ACTION_STOP, pcmk__str_casei)) {
+ return pcmk_action_stop;
+
+ } else if (pcmk__str_eq(task, PCMK_ACTION_STOPPED, pcmk__str_casei)) {
+ return pcmk_action_stopped;
+
+ } else if (pcmk__str_eq(task, PCMK_ACTION_START, pcmk__str_casei)) {
+ return pcmk_action_start;
+
+ } else if (pcmk__str_eq(task, PCMK_ACTION_RUNNING, pcmk__str_casei)) {
+ return pcmk_action_started;
+
+ } else if (pcmk__str_eq(task, PCMK_ACTION_DO_SHUTDOWN, pcmk__str_casei)) {
+ return pcmk_action_shutdown;
- return no_action;
+ } else if (pcmk__str_eq(task, PCMK_ACTION_STONITH, pcmk__str_casei)) {
+ return pcmk_action_fence;
+
+ } else if (pcmk__str_eq(task, PCMK_ACTION_MONITOR, pcmk__str_casei)) {
+ return pcmk_action_monitor;
+
+ } else if (pcmk__str_eq(task, PCMK_ACTION_NOTIFY, pcmk__str_casei)) {
+ return pcmk_action_notify;
+
+ } else if (pcmk__str_eq(task, PCMK_ACTION_NOTIFIED, pcmk__str_casei)) {
+ return pcmk_action_notified;
+
+ } else if (pcmk__str_eq(task, PCMK_ACTION_PROMOTE, pcmk__str_casei)) {
+ return pcmk_action_promote;
+
+ } else if (pcmk__str_eq(task, PCMK_ACTION_DEMOTE, pcmk__str_casei)) {
+ return pcmk_action_demote;
+
+ } else if (pcmk__str_eq(task, PCMK_ACTION_PROMOTED, pcmk__str_casei)) {
+ return pcmk_action_promoted;
+
+ } else if (pcmk__str_eq(task, PCMK_ACTION_DEMOTED, pcmk__str_casei)) {
+ return pcmk_action_demoted;
+ }
+ return pcmk_action_unspecified;
}
const char *
@@ -399,47 +407,47 @@ task2text(enum action_tasks task)
const char *result = "<unknown>";
switch (task) {
- case no_action:
+ case pcmk_action_unspecified:
result = "no_action";
break;
- case stop_rsc:
- result = CRMD_ACTION_STOP;
+ case pcmk_action_stop:
+ result = PCMK_ACTION_STOP;
break;
- case stopped_rsc:
- result = CRMD_ACTION_STOPPED;
+ case pcmk_action_stopped:
+ result = PCMK_ACTION_STOPPED;
break;
- case start_rsc:
- result = CRMD_ACTION_START;
+ case pcmk_action_start:
+ result = PCMK_ACTION_START;
break;
- case started_rsc:
- result = CRMD_ACTION_STARTED;
+ case pcmk_action_started:
+ result = PCMK_ACTION_RUNNING;
break;
- case shutdown_crm:
- result = CRM_OP_SHUTDOWN;
+ case pcmk_action_shutdown:
+ result = PCMK_ACTION_DO_SHUTDOWN;
break;
- case stonith_node:
- result = CRM_OP_FENCE;
+ case pcmk_action_fence:
+ result = PCMK_ACTION_STONITH;
break;
- case monitor_rsc:
- result = CRMD_ACTION_STATUS;
+ case pcmk_action_monitor:
+ result = PCMK_ACTION_MONITOR;
break;
- case action_notify:
- result = CRMD_ACTION_NOTIFY;
+ case pcmk_action_notify:
+ result = PCMK_ACTION_NOTIFY;
break;
- case action_notified:
- result = CRMD_ACTION_NOTIFIED;
+ case pcmk_action_notified:
+ result = PCMK_ACTION_NOTIFIED;
break;
- case action_promote:
- result = CRMD_ACTION_PROMOTE;
+ case pcmk_action_promote:
+ result = PCMK_ACTION_PROMOTE;
break;
- case action_promoted:
- result = CRMD_ACTION_PROMOTED;
+ case pcmk_action_promoted:
+ result = PCMK_ACTION_PROMOTED;
break;
- case action_demote:
- result = CRMD_ACTION_DEMOTE;
+ case pcmk_action_demote:
+ result = PCMK_ACTION_DEMOTE;
break;
- case action_demoted:
- result = CRMD_ACTION_DEMOTED;
+ case pcmk_action_demoted:
+ result = PCMK_ACTION_DEMOTED;
break;
}
@@ -450,50 +458,50 @@ const char *
role2text(enum rsc_role_e role)
{
switch (role) {
- case RSC_ROLE_UNKNOWN:
- return RSC_ROLE_UNKNOWN_S;
- case RSC_ROLE_STOPPED:
- return RSC_ROLE_STOPPED_S;
- case RSC_ROLE_STARTED:
- return RSC_ROLE_STARTED_S;
- case RSC_ROLE_UNPROMOTED:
+ case pcmk_role_stopped:
+ return PCMK__ROLE_STOPPED;
+
+ case pcmk_role_started:
+ return PCMK__ROLE_STARTED;
+
+ case pcmk_role_unpromoted:
#ifdef PCMK__COMPAT_2_0
- return RSC_ROLE_UNPROMOTED_LEGACY_S;
+ return PCMK__ROLE_UNPROMOTED_LEGACY;
#else
- return RSC_ROLE_UNPROMOTED_S;
+ return PCMK__ROLE_UNPROMOTED;
#endif
- case RSC_ROLE_PROMOTED:
+
+ case pcmk_role_promoted:
#ifdef PCMK__COMPAT_2_0
- return RSC_ROLE_PROMOTED_LEGACY_S;
+ return PCMK__ROLE_PROMOTED_LEGACY;
#else
- return RSC_ROLE_PROMOTED_S;
+ return PCMK__ROLE_PROMOTED;
#endif
+
+ default: // pcmk_role_unknown
+ return PCMK__ROLE_UNKNOWN;
}
- CRM_CHECK(role >= RSC_ROLE_UNKNOWN, return RSC_ROLE_UNKNOWN_S);
- CRM_CHECK(role < RSC_ROLE_MAX, return RSC_ROLE_UNKNOWN_S);
- // coverity[dead_error_line]
- return RSC_ROLE_UNKNOWN_S;
}
enum rsc_role_e
text2role(const char *role)
{
CRM_ASSERT(role != NULL);
- if (pcmk__str_eq(role, RSC_ROLE_STOPPED_S, pcmk__str_casei)) {
- return RSC_ROLE_STOPPED;
- } else if (pcmk__str_eq(role, RSC_ROLE_STARTED_S, pcmk__str_casei)) {
- return RSC_ROLE_STARTED;
- } else if (pcmk__strcase_any_of(role, RSC_ROLE_UNPROMOTED_S,
- RSC_ROLE_UNPROMOTED_LEGACY_S, NULL)) {
- return RSC_ROLE_UNPROMOTED;
- } else if (pcmk__strcase_any_of(role, RSC_ROLE_PROMOTED_S,
- RSC_ROLE_PROMOTED_LEGACY_S, NULL)) {
- return RSC_ROLE_PROMOTED;
- } else if (pcmk__str_eq(role, RSC_ROLE_UNKNOWN_S, pcmk__str_casei)) {
- return RSC_ROLE_UNKNOWN;
+ if (pcmk__str_eq(role, PCMK__ROLE_STOPPED, pcmk__str_casei)) {
+ return pcmk_role_stopped;
+ } else if (pcmk__str_eq(role, PCMK__ROLE_STARTED, pcmk__str_casei)) {
+ return pcmk_role_started;
+ } else if (pcmk__strcase_any_of(role, PCMK__ROLE_UNPROMOTED,
+ PCMK__ROLE_UNPROMOTED_LEGACY, NULL)) {
+ return pcmk_role_unpromoted;
+ } else if (pcmk__strcase_any_of(role, PCMK__ROLE_PROMOTED,
+ PCMK__ROLE_PROMOTED_LEGACY, NULL)) {
+ return pcmk_role_promoted;
+ } else if (pcmk__str_eq(role, PCMK__ROLE_UNKNOWN, pcmk__str_casei)) {
+ return pcmk_role_unknown;
}
crm_err("Unknown role: %s", role);
- return RSC_ROLE_UNKNOWN;
+ return pcmk_role_unknown;
}
void
@@ -514,48 +522,103 @@ add_hash_param(GHashTable * hash, const char *name, const char *value)
}
}
+/*!
+ * \internal
+ * \brief Look up an attribute value on the appropriate node
+ *
+ * If \p node is a guest node and either the \c XML_RSC_ATTR_TARGET meta
+ * attribute is set to "host" for \p rsc or \p force_host is \c true, query the
+ * attribute on the node's host. Otherwise, query the attribute on \p node
+ * itself.
+ *
+ * \param[in] node Node to query attribute value on by default
+ * \param[in] name Name of attribute to query
+ * \param[in] rsc Resource on whose behalf we're querying
+ * \param[in] node_type Type of resource location lookup
+ * \param[in] force_host Force a lookup on the guest node's host, regardless of
+ * the \c XML_RSC_ATTR_TARGET value
+ *
+ * \return Value of the attribute on \p node or on the host of \p node
+ *
+ * \note If \p force_host is \c true, \p node \e must be a guest node.
+ */
const char *
-pe_node_attribute_calculated(const pe_node_t *node, const char *name,
- const pe_resource_t *rsc)
+pe__node_attribute_calculated(const pcmk_node_t *node, const char *name,
+ const pcmk_resource_t *rsc,
+ enum pcmk__rsc_node node_type,
+ bool force_host)
{
- const char *source;
-
- if(node == NULL) {
- return NULL;
+ // @TODO: Use pe__is_guest_node() after merging libpe_{rules,status}
+ bool is_guest = (node != NULL)
+ && (node->details->type == pcmk_node_variant_remote)
+ && (node->details->remote_rsc != NULL)
+ && (node->details->remote_rsc->container != NULL);
+ const char *source = NULL;
+ const char *node_type_s = NULL;
+ const char *reason = NULL;
+
+ const pcmk_resource_t *container = NULL;
+ const pcmk_node_t *host = NULL;
+
+ CRM_ASSERT((node != NULL) && (name != NULL) && (rsc != NULL)
+ && (!force_host || is_guest));
+
+ /* Ignore XML_RSC_ATTR_TARGET if node is not a guest node. This represents a
+ * user configuration error.
+ */
+ source = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET);
+ if (!force_host
+ && (!is_guest || !pcmk__str_eq(source, "host", pcmk__str_casei))) {
- } else if(rsc == NULL) {
return g_hash_table_lookup(node->details->attrs, name);
}
- source = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET);
- if(source == NULL || !pcmk__str_eq("host", source, pcmk__str_casei)) {
- return g_hash_table_lookup(node->details->attrs, name);
- }
+ container = node->details->remote_rsc->container;
- /* Use attributes set for the containers location
- * instead of for the container itself
- *
- * Useful when the container is using the host's local
- * storage
- */
+ switch (node_type) {
+ case pcmk__rsc_node_assigned:
+ node_type_s = "assigned";
+ host = container->allocated_to;
+ if (host == NULL) {
+ reason = "not assigned";
+ }
+ break;
- CRM_ASSERT(node->details->remote_rsc);
- CRM_ASSERT(node->details->remote_rsc->container);
+ case pcmk__rsc_node_current:
+ node_type_s = "current";
- if(node->details->remote_rsc->container->running_on) {
- pe_node_t *host = node->details->remote_rsc->container->running_on->data;
- pe_rsc_trace(rsc, "%s: Looking for %s on the container host %s",
- rsc->id, name, pe__node_name(host));
- return g_hash_table_lookup(host->details->attrs, name);
+ if (container->running_on != NULL) {
+ host = container->running_on->data;
+ }
+ if (host == NULL) {
+ reason = "inactive";
+ }
+ break;
+
+ default:
+ // Add support for other enum pcmk__rsc_node values if needed
+ CRM_ASSERT(false);
+ break;
}
- pe_rsc_trace(rsc, "%s: Not looking for %s on the container host: %s is inactive",
- rsc->id, name, node->details->remote_rsc->container->id);
+ if (host != NULL) {
+ const char *value = g_hash_table_lookup(host->details->attrs, name);
+
+ pe_rsc_trace(rsc,
+ "%s: Value lookup for %s on %s container host %s %s%s",
+ rsc->id, name, node_type_s, pe__node_name(host),
+ ((value != NULL)? "succeeded: " : "failed"),
+ pcmk__s(value, ""));
+ return value;
+ }
+ pe_rsc_trace(rsc,
+ "%s: Not looking for %s on %s container host: %s is %s",
+ rsc->id, name, node_type_s, container->id, reason);
return NULL;
}
const char *
-pe_node_attribute_raw(const pe_node_t *node, const char *name)
+pe_node_attribute_raw(const pcmk_node_t *node, const char *name)
{
if(node == NULL) {
return NULL;
diff --git a/lib/pengine/complex.c b/lib/pengine/complex.c
index f168124..0ab2e04 100644
--- a/lib/pengine/complex.c
+++ b/lib/pengine/complex.c
@@ -13,15 +13,17 @@
#include <crm/pengine/internal.h>
#include <crm/msg_xml.h>
#include <crm/common/xml_internal.h>
+#include <crm/common/scheduler_internal.h>
#include "pe_status_private.h"
void populate_hash(xmlNode * nvpair_list, GHashTable * hash, const char **attrs, int attrs_length);
-static pe_node_t *active_node(const pe_resource_t *rsc, unsigned int *count_all,
- unsigned int *count_clean);
+static pcmk_node_t *active_node(const pcmk_resource_t *rsc,
+ unsigned int *count_all,
+ unsigned int *count_clean);
-resource_object_functions_t resource_class_functions[] = {
+pcmk_rsc_methods_t resource_class_functions[] = {
{
native_unpack,
native_find_rsc,
@@ -34,6 +36,7 @@ resource_object_functions_t resource_class_functions[] = {
pe__count_common,
pe__native_is_filtered,
active_node,
+ pe__primitive_max_per_node,
},
{
group_unpack,
@@ -47,6 +50,7 @@ resource_object_functions_t resource_class_functions[] = {
pe__count_common,
pe__group_is_filtered,
active_node,
+ pe__group_max_per_node,
},
{
clone_unpack,
@@ -60,6 +64,7 @@ resource_object_functions_t resource_class_functions[] = {
pe__count_common,
pe__clone_is_filtered,
active_node,
+ pe__clone_max_per_node,
},
{
pe__unpack_bundle,
@@ -73,6 +78,7 @@ resource_object_functions_t resource_class_functions[] = {
pe__count_bundle,
pe__bundle_is_filtered,
pe__bundle_active_node,
+ pe__bundle_max_per_node,
}
};
@@ -80,23 +86,23 @@ static enum pe_obj_types
get_resource_type(const char *name)
{
if (pcmk__str_eq(name, XML_CIB_TAG_RESOURCE, pcmk__str_casei)) {
- return pe_native;
+ return pcmk_rsc_variant_primitive;
} else if (pcmk__str_eq(name, XML_CIB_TAG_GROUP, pcmk__str_casei)) {
- return pe_group;
+ return pcmk_rsc_variant_group;
} else if (pcmk__str_eq(name, XML_CIB_TAG_INCARNATION, pcmk__str_casei)) {
- return pe_clone;
+ return pcmk_rsc_variant_clone;
} else if (pcmk__str_eq(name, PCMK_XE_PROMOTABLE_LEGACY, pcmk__str_casei)) {
// @COMPAT deprecated since 2.0.0
- return pe_clone;
+ return pcmk_rsc_variant_clone;
} else if (pcmk__str_eq(name, XML_CIB_TAG_CONTAINER, pcmk__str_casei)) {
- return pe_container;
+ return pcmk_rsc_variant_bundle;
}
- return pe_unknown;
+ return pcmk_rsc_variant_unknown;
}
static void
@@ -106,10 +112,12 @@ dup_attr(gpointer key, gpointer value, gpointer user_data)
}
static void
-expand_parents_fixed_nvpairs(pe_resource_t * rsc, pe_rule_eval_data_t * rule_data, GHashTable * meta_hash, pe_working_set_t * data_set)
+expand_parents_fixed_nvpairs(pcmk_resource_t *rsc,
+ pe_rule_eval_data_t *rule_data,
+ GHashTable *meta_hash, pcmk_scheduler_t *scheduler)
{
GHashTable *parent_orig_meta = pcmk__strkey_table(free, free);
- pe_resource_t *p = rsc->parent;
+ pcmk_resource_t *p = rsc->parent;
if (p == NULL) {
return ;
@@ -119,8 +127,8 @@ expand_parents_fixed_nvpairs(pe_resource_t * rsc, pe_rule_eval_data_t * rule_dat
/* The fixed value of the lower parent resource takes precedence and is not overwritten. */
while(p != NULL) {
/* A hash table for comparison is generated, including the id-ref. */
- pe__unpack_dataset_nvpairs(p->xml, XML_TAG_META_SETS,
- rule_data, parent_orig_meta, NULL, FALSE, data_set);
+ pe__unpack_dataset_nvpairs(p->xml, XML_TAG_META_SETS, rule_data,
+ parent_orig_meta, NULL, FALSE, scheduler);
p = p->parent;
}
@@ -146,8 +154,8 @@ expand_parents_fixed_nvpairs(pe_resource_t * rsc, pe_rule_eval_data_t * rule_dat
}
void
-get_meta_attributes(GHashTable * meta_hash, pe_resource_t * rsc,
- pe_node_t * node, pe_working_set_t * data_set)
+get_meta_attributes(GHashTable * meta_hash, pcmk_resource_t * rsc,
+ pcmk_node_t *node, pcmk_scheduler_t *scheduler)
{
pe_rsc_eval_data_t rsc_rule_data = {
.standard = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS),
@@ -157,8 +165,8 @@ get_meta_attributes(GHashTable * meta_hash, pe_resource_t * rsc,
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
- .role = RSC_ROLE_UNKNOWN,
- .now = data_set->now,
+ .role = pcmk_role_unknown,
+ .now = scheduler->now,
.match_data = NULL,
.rsc_data = &rsc_rule_data,
.op_data = NULL
@@ -170,23 +178,23 @@ get_meta_attributes(GHashTable * meta_hash, pe_resource_t * rsc,
for (xmlAttrPtr a = pcmk__xe_first_attr(rsc->xml); a != NULL; a = a->next) {
const char *prop_name = (const char *) a->name;
- const char *prop_value = crm_element_value(rsc->xml, prop_name);
+ const char *prop_value = pcmk__xml_attr_value(a);
add_hash_param(meta_hash, prop_name, prop_value);
}
pe__unpack_dataset_nvpairs(rsc->xml, XML_TAG_META_SETS, &rule_data,
- meta_hash, NULL, FALSE, data_set);
+ meta_hash, NULL, FALSE, scheduler);
/* Set the "meta_attributes" explicitly set in the parent resource to the hash table of the child resource. */
/* If it is already explicitly set as a child, it will not be overwritten. */
if (rsc->parent != NULL) {
- expand_parents_fixed_nvpairs(rsc, &rule_data, meta_hash, data_set);
+ expand_parents_fixed_nvpairs(rsc, &rule_data, meta_hash, scheduler);
}
/* check the defaults */
- pe__unpack_dataset_nvpairs(data_set->rsc_defaults, XML_TAG_META_SETS,
- &rule_data, meta_hash, NULL, FALSE, data_set);
+ pe__unpack_dataset_nvpairs(scheduler->rsc_defaults, XML_TAG_META_SETS,
+ &rule_data, meta_hash, NULL, FALSE, scheduler);
/* If there is "meta_attributes" that the parent resource has not explicitly set, set a value that is not set from rsc_default either. */
/* The values already set up to this point will not be overwritten. */
@@ -196,13 +204,13 @@ get_meta_attributes(GHashTable * meta_hash, pe_resource_t * rsc,
}
void
-get_rsc_attributes(GHashTable *meta_hash, const pe_resource_t *rsc,
- const pe_node_t *node, pe_working_set_t *data_set)
+get_rsc_attributes(GHashTable *meta_hash, const pcmk_resource_t *rsc,
+ const pcmk_node_t *node, pcmk_scheduler_t *scheduler)
{
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
- .role = RSC_ROLE_UNKNOWN,
- .now = data_set->now,
+ .role = pcmk_role_unknown,
+ .now = scheduler->now,
.match_data = NULL,
.rsc_data = NULL,
.op_data = NULL
@@ -213,16 +221,17 @@ get_rsc_attributes(GHashTable *meta_hash, const pe_resource_t *rsc,
}
pe__unpack_dataset_nvpairs(rsc->xml, XML_TAG_ATTR_SETS, &rule_data,
- meta_hash, NULL, FALSE, data_set);
+ meta_hash, NULL, FALSE, scheduler);
/* set anything else based on the parent */
if (rsc->parent != NULL) {
- get_rsc_attributes(meta_hash, rsc->parent, node, data_set);
+ get_rsc_attributes(meta_hash, rsc->parent, node, scheduler);
} else {
/* and finally check the defaults */
- pe__unpack_dataset_nvpairs(data_set->rsc_defaults, XML_TAG_ATTR_SETS,
- &rule_data, meta_hash, NULL, FALSE, data_set);
+ pe__unpack_dataset_nvpairs(scheduler->rsc_defaults, XML_TAG_ATTR_SETS,
+ &rule_data, meta_hash, NULL, FALSE,
+ scheduler);
}
}
@@ -234,9 +243,9 @@ template_op_key(xmlNode * op)
char *key = NULL;
if ((role == NULL)
- || pcmk__strcase_any_of(role, RSC_ROLE_STARTED_S, RSC_ROLE_UNPROMOTED_S,
- RSC_ROLE_UNPROMOTED_LEGACY_S, NULL)) {
- role = RSC_ROLE_UNKNOWN_S;
+ || pcmk__strcase_any_of(role, PCMK__ROLE_STARTED, PCMK__ROLE_UNPROMOTED,
+ PCMK__ROLE_UNPROMOTED_LEGACY, NULL)) {
+ role = PCMK__ROLE_UNKNOWN;
}
key = crm_strdup_printf("%s-%s", name, role);
@@ -244,7 +253,8 @@ template_op_key(xmlNode * op)
}
static gboolean
-unpack_template(xmlNode * xml_obj, xmlNode ** expanded_xml, pe_working_set_t * data_set)
+unpack_template(xmlNode *xml_obj, xmlNode **expanded_xml,
+ pcmk_scheduler_t *scheduler)
{
xmlNode *cib_resources = NULL;
xmlNode *template = NULL;
@@ -268,7 +278,7 @@ unpack_template(xmlNode * xml_obj, xmlNode ** expanded_xml, pe_working_set_t * d
id = ID(xml_obj);
if (id == NULL) {
- pe_err("'%s' object must have a id", crm_element_name(xml_obj));
+ pe_err("'%s' object must have a id", xml_obj->name);
return FALSE;
}
@@ -277,7 +287,8 @@ unpack_template(xmlNode * xml_obj, xmlNode ** expanded_xml, pe_working_set_t * d
return FALSE;
}
- cib_resources = get_xpath_object("//"XML_CIB_TAG_RESOURCES, data_set->input, LOG_TRACE);
+ cib_resources = get_xpath_object("//" XML_CIB_TAG_RESOURCES,
+ scheduler->input, LOG_TRACE);
if (cib_resources == NULL) {
pe_err("No resources configured");
return FALSE;
@@ -292,7 +303,7 @@ unpack_template(xmlNode * xml_obj, xmlNode ** expanded_xml, pe_working_set_t * d
new_xml = copy_xml(template);
xmlNodeSetName(new_xml, xml_obj->name);
- crm_xml_replace(new_xml, XML_ATTR_ID, id);
+ crm_xml_add(new_xml, XML_ATTR_ID, id);
clone = crm_element_value(xml_obj, XML_RSC_ATTR_INCARNATION);
if(clone) {
@@ -346,19 +357,19 @@ unpack_template(xmlNode * xml_obj, xmlNode ** expanded_xml, pe_working_set_t * d
/*free_xml(*expanded_xml); */
*expanded_xml = new_xml;
- /* Disable multi-level templates for now */
- /*if(unpack_template(new_xml, expanded_xml, data_set) == FALSE) {
+#if 0 /* Disable multi-level templates for now */
+ if (!unpack_template(new_xml, expanded_xml, scheduler)) {
free_xml(*expanded_xml);
*expanded_xml = NULL;
-
return FALSE;
- } */
+ }
+#endif
return TRUE;
}
static gboolean
-add_template_rsc(xmlNode * xml_obj, pe_working_set_t * data_set)
+add_template_rsc(xmlNode *xml_obj, pcmk_scheduler_t *scheduler)
{
const char *template_ref = NULL;
const char *id = NULL;
@@ -375,7 +386,7 @@ add_template_rsc(xmlNode * xml_obj, pe_working_set_t * data_set)
id = ID(xml_obj);
if (id == NULL) {
- pe_err("'%s' object must have a id", crm_element_name(xml_obj));
+ pe_err("'%s' object must have a id", xml_obj->name);
return FALSE;
}
@@ -384,7 +395,7 @@ add_template_rsc(xmlNode * xml_obj, pe_working_set_t * data_set)
return FALSE;
}
- if (add_tag_ref(data_set->template_rsc_sets, template_ref, id) == FALSE) {
+ if (add_tag_ref(scheduler->template_rsc_sets, template_ref, id) == FALSE) {
return FALSE;
}
@@ -392,7 +403,7 @@ add_template_rsc(xmlNode * xml_obj, pe_working_set_t * data_set)
}
static bool
-detect_promotable(pe_resource_t *rsc)
+detect_promotable(pcmk_resource_t *rsc)
{
const char *promotable = g_hash_table_lookup(rsc->meta,
XML_RSC_ATTR_PROMOTABLE);
@@ -402,8 +413,7 @@ detect_promotable(pe_resource_t *rsc)
}
// @COMPAT deprecated since 2.0.0
- if (pcmk__str_eq(crm_element_name(rsc->xml), PCMK_XE_PROMOTABLE_LEGACY,
- pcmk__str_casei)) {
+ if (pcmk__xe_is(rsc->xml, PCMK_XE_PROMOTABLE_LEGACY)) {
/* @TODO in some future version, pe_warn_once() here,
* then drop support in even later version
*/
@@ -423,18 +433,18 @@ free_params_table(gpointer data)
/*!
* \brief Get a table of resource parameters
*
- * \param[in,out] rsc Resource to query
- * \param[in] node Node for evaluating rules (NULL for defaults)
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] rsc Resource to query
+ * \param[in] node Node for evaluating rules (NULL for defaults)
+ * \param[in,out] scheduler Scheduler data
*
* \return Hash table containing resource parameter names and values
- * (or NULL if \p rsc or \p data_set is NULL)
+ * (or NULL if \p rsc or \p scheduler is NULL)
* \note The returned table will be destroyed when the resource is freed, so
* callers should not destroy it.
*/
GHashTable *
-pe_rsc_params(pe_resource_t *rsc, const pe_node_t *node,
- pe_working_set_t *data_set)
+pe_rsc_params(pcmk_resource_t *rsc, const pcmk_node_t *node,
+ pcmk_scheduler_t *scheduler)
{
GHashTable *params_on_node = NULL;
@@ -445,7 +455,7 @@ pe_rsc_params(pe_resource_t *rsc, const pe_node_t *node,
const char *node_name = "";
// Sanity check
- if ((rsc == NULL) || (data_set == NULL)) {
+ if ((rsc == NULL) || (scheduler == NULL)) {
return NULL;
}
if ((node != NULL) && (node->details->uname != NULL)) {
@@ -462,7 +472,7 @@ pe_rsc_params(pe_resource_t *rsc, const pe_node_t *node,
// If none exists yet, create one with parameters evaluated for node
if (params_on_node == NULL) {
params_on_node = pcmk__strkey_table(free, free);
- get_rsc_attributes(params_on_node, rsc, node, data_set);
+ get_rsc_attributes(params_on_node, rsc, node, scheduler);
g_hash_table_insert(rsc->parameter_cache, strdup(node_name),
params_on_node);
}
@@ -478,29 +488,30 @@ pe_rsc_params(pe_resource_t *rsc, const pe_node_t *node,
* \param[in] is_default Whether \p value was selected by default
*/
static void
-unpack_requires(pe_resource_t *rsc, const char *value, bool is_default)
+unpack_requires(pcmk_resource_t *rsc, const char *value, bool is_default)
{
if (pcmk__str_eq(value, PCMK__VALUE_NOTHING, pcmk__str_casei)) {
} else if (pcmk__str_eq(value, PCMK__VALUE_QUORUM, pcmk__str_casei)) {
- pe__set_resource_flags(rsc, pe_rsc_needs_quorum);
+ pe__set_resource_flags(rsc, pcmk_rsc_needs_quorum);
} else if (pcmk__str_eq(value, PCMK__VALUE_FENCING, pcmk__str_casei)) {
- pe__set_resource_flags(rsc, pe_rsc_needs_fencing);
- if (!pcmk_is_set(rsc->cluster->flags, pe_flag_stonith_enabled)) {
+ pe__set_resource_flags(rsc, pcmk_rsc_needs_fencing);
+ if (!pcmk_is_set(rsc->cluster->flags, pcmk_sched_fencing_enabled)) {
pcmk__config_warn("%s requires fencing but fencing is disabled",
rsc->id);
}
} else if (pcmk__str_eq(value, PCMK__VALUE_UNFENCING, pcmk__str_casei)) {
- if (pcmk_is_set(rsc->flags, pe_rsc_fence_device)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_fence_device)) {
pcmk__config_warn("Resetting \"" XML_RSC_ATTR_REQUIRES "\" for %s "
"to \"" PCMK__VALUE_QUORUM "\" because fencing "
"devices cannot require unfencing", rsc->id);
unpack_requires(rsc, PCMK__VALUE_QUORUM, true);
return;
- } else if (!pcmk_is_set(rsc->cluster->flags, pe_flag_stonith_enabled)) {
+ } else if (!pcmk_is_set(rsc->cluster->flags,
+ pcmk_sched_fencing_enabled)) {
pcmk__config_warn("Resetting \"" XML_RSC_ATTR_REQUIRES "\" for %s "
"to \"" PCMK__VALUE_QUORUM "\" because fencing "
"is disabled", rsc->id);
@@ -508,27 +519,29 @@ unpack_requires(pe_resource_t *rsc, const char *value, bool is_default)
return;
} else {
- pe__set_resource_flags(rsc,
- pe_rsc_needs_fencing|pe_rsc_needs_unfencing);
+ pe__set_resource_flags(rsc, pcmk_rsc_needs_fencing
+ |pcmk_rsc_needs_unfencing);
}
} else {
const char *orig_value = value;
- if (pcmk_is_set(rsc->flags, pe_rsc_fence_device)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_fence_device)) {
value = PCMK__VALUE_QUORUM;
- } else if ((rsc->variant == pe_native)
+ } else if ((rsc->variant == pcmk_rsc_variant_primitive)
&& xml_contains_remote_node(rsc->xml)) {
value = PCMK__VALUE_QUORUM;
- } else if (pcmk_is_set(rsc->cluster->flags, pe_flag_enable_unfencing)) {
+ } else if (pcmk_is_set(rsc->cluster->flags,
+ pcmk_sched_enable_unfencing)) {
value = PCMK__VALUE_UNFENCING;
- } else if (pcmk_is_set(rsc->cluster->flags, pe_flag_stonith_enabled)) {
+ } else if (pcmk_is_set(rsc->cluster->flags,
+ pcmk_sched_fencing_enabled)) {
value = PCMK__VALUE_FENCING;
- } else if (rsc->cluster->no_quorum_policy == no_quorum_ignore) {
+ } else if (rsc->cluster->no_quorum_policy == pcmk_no_quorum_ignore) {
value = PCMK__VALUE_NOTHING;
} else {
@@ -550,18 +563,18 @@ unpack_requires(pe_resource_t *rsc, const char *value, bool is_default)
#ifndef PCMK__COMPAT_2_0
static void
-warn_about_deprecated_classes(pe_resource_t *rsc)
+warn_about_deprecated_classes(pcmk_resource_t *rsc)
{
const char *std = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
if (pcmk__str_eq(std, PCMK_RESOURCE_CLASS_UPSTART, pcmk__str_none)) {
- pe_warn_once(pe_wo_upstart,
+ pe_warn_once(pcmk__wo_upstart,
"Support for Upstart resources (such as %s) is deprecated "
"and will be removed in a future release of Pacemaker",
rsc->id);
} else if (pcmk__str_eq(std, PCMK_RESOURCE_CLASS_NAGIOS, pcmk__str_none)) {
- pe_warn_once(pe_wo_nagios,
+ pe_warn_once(pcmk__wo_nagios,
"Support for Nagios resources (such as %s) is deprecated "
"and will be removed in a future release of Pacemaker",
rsc->id);
@@ -574,12 +587,12 @@ warn_about_deprecated_classes(pe_resource_t *rsc)
* \brief Unpack configuration XML for a given resource
*
* Unpack the XML object containing a resource's configuration into a new
- * \c pe_resource_t object.
+ * \c pcmk_resource_t object.
*
- * \param[in] xml_obj XML node containing the resource's configuration
- * \param[out] rsc Where to store the unpacked resource information
- * \param[in] parent Resource's parent, if any
- * \param[in,out] data_set Cluster working set
+ * \param[in] xml_obj XML node containing the resource's configuration
+ * \param[out] rsc Where to store the unpacked resource information
+ * \param[in] parent Resource's parent, if any
+ * \param[in,out] scheduler Scheduler data
*
* \return Standard Pacemaker return code
* \note If pcmk_rc_ok is returned, \p *rsc is guaranteed to be non-NULL, and
@@ -587,8 +600,8 @@ warn_about_deprecated_classes(pe_resource_t *rsc)
* free() method. Otherwise, \p *rsc is guaranteed to be NULL.
*/
int
-pe__unpack_resource(xmlNode *xml_obj, pe_resource_t **rsc,
- pe_resource_t *parent, pe_working_set_t *data_set)
+pe__unpack_resource(xmlNode *xml_obj, pcmk_resource_t **rsc,
+ pcmk_resource_t *parent, pcmk_scheduler_t *scheduler)
{
xmlNode *expanded_xml = NULL;
xmlNode *ops = NULL;
@@ -599,7 +612,7 @@ pe__unpack_resource(xmlNode *xml_obj, pe_resource_t **rsc,
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
- .role = RSC_ROLE_UNKNOWN,
+ .role = pcmk_role_unknown,
.now = NULL,
.match_data = NULL,
.rsc_data = NULL,
@@ -607,31 +620,31 @@ pe__unpack_resource(xmlNode *xml_obj, pe_resource_t **rsc,
};
CRM_CHECK(rsc != NULL, return EINVAL);
- CRM_CHECK((xml_obj != NULL) && (data_set != NULL),
+ CRM_CHECK((xml_obj != NULL) && (scheduler != NULL),
*rsc = NULL;
return EINVAL);
- rule_data.now = data_set->now;
+ rule_data.now = scheduler->now;
crm_log_xml_trace(xml_obj, "[raw XML]");
id = crm_element_value(xml_obj, XML_ATTR_ID);
if (id == NULL) {
pe_err("Ignoring <%s> configuration without " XML_ATTR_ID,
- crm_element_name(xml_obj));
+ xml_obj->name);
return pcmk_rc_unpack_error;
}
- if (unpack_template(xml_obj, &expanded_xml, data_set) == FALSE) {
+ if (unpack_template(xml_obj, &expanded_xml, scheduler) == FALSE) {
return pcmk_rc_unpack_error;
}
- *rsc = calloc(1, sizeof(pe_resource_t));
+ *rsc = calloc(1, sizeof(pcmk_resource_t));
if (*rsc == NULL) {
crm_crit("Unable to allocate memory for resource '%s'", id);
return ENOMEM;
}
- (*rsc)->cluster = data_set;
+ (*rsc)->cluster = scheduler;
if (expanded_xml) {
crm_log_xml_trace(expanded_xml, "[expanded XML]");
@@ -648,12 +661,12 @@ pe__unpack_resource(xmlNode *xml_obj, pe_resource_t **rsc,
(*rsc)->parent = parent;
ops = find_xml_node((*rsc)->xml, "operations", FALSE);
- (*rsc)->ops_xml = expand_idref(ops, data_set->input);
+ (*rsc)->ops_xml = expand_idref(ops, scheduler->input);
- (*rsc)->variant = get_resource_type(crm_element_name((*rsc)->xml));
- if ((*rsc)->variant == pe_unknown) {
+ (*rsc)->variant = get_resource_type((const char *) (*rsc)->xml->name);
+ if ((*rsc)->variant == pcmk_rsc_variant_unknown) {
pe_err("Ignoring resource '%s' of unknown type '%s'",
- id, crm_element_name((*rsc)->xml));
+ id, (*rsc)->xml->name);
common_free(*rsc);
*rsc = NULL;
return pcmk_rc_unpack_error;
@@ -678,23 +691,23 @@ pe__unpack_resource(xmlNode *xml_obj, pe_resource_t **rsc,
(*rsc)->fns = &resource_class_functions[(*rsc)->variant];
- get_meta_attributes((*rsc)->meta, *rsc, NULL, data_set);
- (*rsc)->parameters = pe_rsc_params(*rsc, NULL, data_set); // \deprecated
+ get_meta_attributes((*rsc)->meta, *rsc, NULL, scheduler);
+ (*rsc)->parameters = pe_rsc_params(*rsc, NULL, scheduler); // \deprecated
(*rsc)->flags = 0;
- pe__set_resource_flags(*rsc, pe_rsc_runnable|pe_rsc_provisional);
+ pe__set_resource_flags(*rsc, pcmk_rsc_runnable|pcmk_rsc_unassigned);
- if (!pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)) {
- pe__set_resource_flags(*rsc, pe_rsc_managed);
+ if (!pcmk_is_set(scheduler->flags, pcmk_sched_in_maintenance)) {
+ pe__set_resource_flags(*rsc, pcmk_rsc_managed);
}
(*rsc)->rsc_cons = NULL;
(*rsc)->rsc_tickets = NULL;
(*rsc)->actions = NULL;
- (*rsc)->role = RSC_ROLE_STOPPED;
- (*rsc)->next_role = RSC_ROLE_UNKNOWN;
+ (*rsc)->role = pcmk_role_stopped;
+ (*rsc)->next_role = pcmk_role_unknown;
- (*rsc)->recovery_type = recovery_stop_start;
+ (*rsc)->recovery_type = pcmk_multiply_active_restart;
(*rsc)->stickiness = 0;
(*rsc)->migration_threshold = INFINITY;
(*rsc)->failure_timeout = 0;
@@ -704,12 +717,12 @@ pe__unpack_resource(xmlNode *xml_obj, pe_resource_t **rsc,
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_CRITICAL);
if ((value == NULL) || crm_is_true(value)) {
- pe__set_resource_flags(*rsc, pe_rsc_critical);
+ pe__set_resource_flags(*rsc, pcmk_rsc_critical);
}
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_NOTIFY);
if (crm_is_true(value)) {
- pe__set_resource_flags(*rsc, pe_rsc_notify);
+ pe__set_resource_flags(*rsc, pcmk_rsc_notify);
}
if (xml_contains_remote_node((*rsc)->xml)) {
@@ -723,7 +736,7 @@ pe__unpack_resource(xmlNode *xml_obj, pe_resource_t **rsc,
value = g_hash_table_lookup((*rsc)->meta, XML_OP_ATTR_ALLOW_MIGRATE);
if (crm_is_true(value)) {
- pe__set_resource_flags(*rsc, pe_rsc_allow_migrate);
+ pe__set_resource_flags(*rsc, pcmk_rsc_migratable);
} else if ((value == NULL) && remote_node) {
/* By default, we want remote nodes to be able
* to float around the cluster without having to stop all the
@@ -732,38 +745,38 @@ pe__unpack_resource(xmlNode *xml_obj, pe_resource_t **rsc,
* problems, migration support can be explicitly turned off with
* allow-migrate=false.
*/
- pe__set_resource_flags(*rsc, pe_rsc_allow_migrate);
+ pe__set_resource_flags(*rsc, pcmk_rsc_migratable);
}
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_MANAGED);
if (value != NULL && !pcmk__str_eq("default", value, pcmk__str_casei)) {
if (crm_is_true(value)) {
- pe__set_resource_flags(*rsc, pe_rsc_managed);
+ pe__set_resource_flags(*rsc, pcmk_rsc_managed);
} else {
- pe__clear_resource_flags(*rsc, pe_rsc_managed);
+ pe__clear_resource_flags(*rsc, pcmk_rsc_managed);
}
}
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_MAINTENANCE);
if (crm_is_true(value)) {
- pe__clear_resource_flags(*rsc, pe_rsc_managed);
- pe__set_resource_flags(*rsc, pe_rsc_maintenance);
+ pe__clear_resource_flags(*rsc, pcmk_rsc_managed);
+ pe__set_resource_flags(*rsc, pcmk_rsc_maintenance);
}
- if (pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)) {
- pe__clear_resource_flags(*rsc, pe_rsc_managed);
- pe__set_resource_flags(*rsc, pe_rsc_maintenance);
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_in_maintenance)) {
+ pe__clear_resource_flags(*rsc, pcmk_rsc_managed);
+ pe__set_resource_flags(*rsc, pcmk_rsc_maintenance);
}
if (pe_rsc_is_clone(pe__const_top_resource(*rsc, false))) {
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_UNIQUE);
if (crm_is_true(value)) {
- pe__set_resource_flags(*rsc, pe_rsc_unique);
+ pe__set_resource_flags(*rsc, pcmk_rsc_unique);
}
if (detect_promotable(*rsc)) {
- pe__set_resource_flags(*rsc, pe_rsc_promotable);
+ pe__set_resource_flags(*rsc, pcmk_rsc_promotable);
}
} else {
- pe__set_resource_flags(*rsc, pe_rsc_unique);
+ pe__set_resource_flags(*rsc, pcmk_rsc_unique);
}
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_RESTART);
@@ -771,7 +784,7 @@ pe__unpack_resource(xmlNode *xml_obj, pe_resource_t **rsc,
(*rsc)->restart_type = pe_restart_restart;
pe_rsc_trace((*rsc), "%s dependency restart handling: restart",
(*rsc)->id);
- pe_warn_once(pe_wo_restart_type,
+ pe_warn_once(pcmk__wo_restart_type,
"Support for restart-type is deprecated and will be removed in a future release");
} else {
@@ -782,17 +795,17 @@ pe__unpack_resource(xmlNode *xml_obj, pe_resource_t **rsc,
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_MULTIPLE);
if (pcmk__str_eq(value, "stop_only", pcmk__str_casei)) {
- (*rsc)->recovery_type = recovery_stop_only;
+ (*rsc)->recovery_type = pcmk_multiply_active_stop;
pe_rsc_trace((*rsc), "%s multiple running resource recovery: stop only",
(*rsc)->id);
} else if (pcmk__str_eq(value, "block", pcmk__str_casei)) {
- (*rsc)->recovery_type = recovery_block;
+ (*rsc)->recovery_type = pcmk_multiply_active_block;
pe_rsc_trace((*rsc), "%s multiple running resource recovery: block",
(*rsc)->id);
} else if (pcmk__str_eq(value, "stop_unexpected", pcmk__str_casei)) {
- (*rsc)->recovery_type = recovery_stop_unexpected;
+ (*rsc)->recovery_type = pcmk_multiply_active_unexpected;
pe_rsc_trace((*rsc), "%s multiple running resource recovery: "
"stop unexpected instances",
(*rsc)->id);
@@ -803,7 +816,7 @@ pe__unpack_resource(xmlNode *xml_obj, pe_resource_t **rsc,
pe_warn("%s is not a valid value for " XML_RSC_ATTR_MULTIPLE
", using default of \"stop_start\"", value);
}
- (*rsc)->recovery_type = recovery_stop_start;
+ (*rsc)->recovery_type = pcmk_multiply_active_restart;
pe_rsc_trace((*rsc), "%s multiple running resource recovery: "
"stop/start", (*rsc)->id);
}
@@ -813,7 +826,7 @@ pe__unpack_resource(xmlNode *xml_obj, pe_resource_t **rsc,
(*rsc)->stickiness = char2score(value);
}
- value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_FAIL_STICKINESS);
+ value = g_hash_table_lookup((*rsc)->meta, PCMK_META_MIGRATION_THRESHOLD);
if (value != NULL && !pcmk__str_eq("default", value, pcmk__str_casei)) {
(*rsc)->migration_threshold = char2score(value);
if ((*rsc)->migration_threshold < 0) {
@@ -821,8 +834,8 @@ pe__unpack_resource(xmlNode *xml_obj, pe_resource_t **rsc,
* should probably use the default (INFINITY) or 0 (to disable)
* instead.
*/
- pe_warn_once(pe_wo_neg_threshold,
- XML_RSC_ATTR_FAIL_STICKINESS
+ pe_warn_once(pcmk__wo_neg_threshold,
+ PCMK_META_MIGRATION_THRESHOLD
" must be non-negative, using 1 instead");
(*rsc)->migration_threshold = 1;
}
@@ -830,21 +843,21 @@ pe__unpack_resource(xmlNode *xml_obj, pe_resource_t **rsc,
if (pcmk__str_eq(crm_element_value((*rsc)->xml, XML_AGENT_ATTR_CLASS),
PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) {
- pe__set_working_set_flags(data_set, pe_flag_have_stonith_resource);
- pe__set_resource_flags(*rsc, pe_rsc_fence_device);
+ pe__set_working_set_flags(scheduler, pcmk_sched_have_fencing);
+ pe__set_resource_flags(*rsc, pcmk_rsc_fence_device);
}
value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_REQUIRES);
unpack_requires(*rsc, value, false);
- value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_FAIL_TIMEOUT);
+ value = g_hash_table_lookup((*rsc)->meta, PCMK_META_FAILURE_TIMEOUT);
if (value != NULL) {
// Stored as seconds
(*rsc)->failure_timeout = (int) (crm_parse_interval_spec(value) / 1000);
}
if (remote_node) {
- GHashTable *params = pe_rsc_params(*rsc, NULL, data_set);
+ GHashTable *params = pe_rsc_params(*rsc, NULL, scheduler);
/* Grabbing the value now means that any rules based on node attributes
* will evaluate to false, so such rules should not be used with
@@ -865,34 +878,35 @@ pe__unpack_resource(xmlNode *xml_obj, pe_resource_t **rsc,
get_target_role(*rsc, &((*rsc)->next_role));
pe_rsc_trace((*rsc), "%s desired next state: %s", (*rsc)->id,
- (*rsc)->next_role != RSC_ROLE_UNKNOWN ? role2text((*rsc)->next_role) : "default");
+ (*rsc)->next_role != pcmk_role_unknown? role2text((*rsc)->next_role) : "default");
- if ((*rsc)->fns->unpack(*rsc, data_set) == FALSE) {
+ if ((*rsc)->fns->unpack(*rsc, scheduler) == FALSE) {
(*rsc)->fns->free(*rsc);
*rsc = NULL;
return pcmk_rc_unpack_error;
}
- if (pcmk_is_set(data_set->flags, pe_flag_symmetric_cluster)) {
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_symmetric_cluster)) {
// This tag must stay exactly the same because it is tested elsewhere
- resource_location(*rsc, NULL, 0, "symmetric_default", data_set);
+ resource_location(*rsc, NULL, 0, "symmetric_default", scheduler);
} else if (guest_node) {
/* remote resources tied to a container resource must always be allowed
* to opt-in to the cluster. Whether the connection resource is actually
* allowed to be placed on a node is dependent on the container resource */
- resource_location(*rsc, NULL, 0, "remote_connection_default", data_set);
+ resource_location(*rsc, NULL, 0, "remote_connection_default",
+ scheduler);
}
pe_rsc_trace((*rsc), "%s action notification: %s", (*rsc)->id,
- pcmk_is_set((*rsc)->flags, pe_rsc_notify)? "required" : "not required");
+ pcmk_is_set((*rsc)->flags, pcmk_rsc_notify)? "required" : "not required");
(*rsc)->utilization = pcmk__strkey_table(free, free);
pe__unpack_dataset_nvpairs((*rsc)->xml, XML_TAG_UTILIZATION, &rule_data,
- (*rsc)->utilization, NULL, FALSE, data_set);
+ (*rsc)->utilization, NULL, FALSE, scheduler);
if (expanded_xml) {
- if (add_template_rsc(xml_obj, data_set) == FALSE) {
+ if (add_template_rsc(xml_obj, scheduler) == FALSE) {
(*rsc)->fns->free(*rsc);
*rsc = NULL;
return pcmk_rc_unpack_error;
@@ -902,9 +916,9 @@ pe__unpack_resource(xmlNode *xml_obj, pe_resource_t **rsc,
}
gboolean
-is_parent(pe_resource_t *child, pe_resource_t *rsc)
+is_parent(pcmk_resource_t *child, pcmk_resource_t *rsc)
{
- pe_resource_t *parent = child;
+ pcmk_resource_t *parent = child;
if (parent == NULL || rsc == NULL) {
return FALSE;
@@ -918,15 +932,16 @@ is_parent(pe_resource_t *child, pe_resource_t *rsc)
return FALSE;
}
-pe_resource_t *
-uber_parent(pe_resource_t * rsc)
+pcmk_resource_t *
+uber_parent(pcmk_resource_t *rsc)
{
- pe_resource_t *parent = rsc;
+ pcmk_resource_t *parent = rsc;
if (parent == NULL) {
return NULL;
}
- while (parent->parent != NULL && parent->parent->variant != pe_container) {
+ while ((parent->parent != NULL)
+ && (parent->parent->variant != pcmk_rsc_variant_bundle)) {
parent = parent->parent;
}
return parent;
@@ -943,16 +958,17 @@ uber_parent(pe_resource_t * rsc)
* the bundle if \p rsc is bundled and \p include_bundle is true,
* otherwise the topmost parent of \p rsc up to a clone
*/
-const pe_resource_t *
-pe__const_top_resource(const pe_resource_t *rsc, bool include_bundle)
+const pcmk_resource_t *
+pe__const_top_resource(const pcmk_resource_t *rsc, bool include_bundle)
{
- const pe_resource_t *parent = rsc;
+ const pcmk_resource_t *parent = rsc;
if (parent == NULL) {
return NULL;
}
while (parent->parent != NULL) {
- if (!include_bundle && (parent->parent->variant == pe_container)) {
+ if (!include_bundle
+ && (parent->parent->variant == pcmk_rsc_variant_bundle)) {
break;
}
parent = parent->parent;
@@ -961,7 +977,7 @@ pe__const_top_resource(const pe_resource_t *rsc, bool include_bundle)
}
void
-common_free(pe_resource_t * rsc)
+common_free(pcmk_resource_t * rsc)
{
if (rsc == NULL) {
return;
@@ -984,7 +1000,9 @@ common_free(pe_resource_t * rsc)
g_hash_table_destroy(rsc->utilization);
}
- if ((rsc->parent == NULL) && pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
+ if ((rsc->parent == NULL)
+ && pcmk_is_set(rsc->flags, pcmk_rsc_removed)) {
+
free_xml(rsc->xml);
rsc->xml = NULL;
free_xml(rsc->orig_xml);
@@ -1037,8 +1055,8 @@ common_free(pe_resource_t * rsc)
* \return true if the count should continue, or false if sufficiently known
*/
bool
-pe__count_active_node(const pe_resource_t *rsc, pe_node_t *node,
- pe_node_t **active, unsigned int *count_all,
+pe__count_active_node(const pcmk_resource_t *rsc, pcmk_node_t *node,
+ pcmk_node_t **active, unsigned int *count_all,
unsigned int *count_clean)
{
bool keep_looking = false;
@@ -1065,7 +1083,7 @@ pe__count_active_node(const pe_resource_t *rsc, pe_node_t *node,
} else {
keep_looking = true;
}
- } else if (!pcmk_is_set(rsc->flags, pe_rsc_needs_fencing)) {
+ } else if (!pcmk_is_set(rsc->flags, pcmk_rsc_needs_fencing)) {
if (is_happy && ((*active == NULL) || !(*active)->details->online
|| (*active)->details->unclean)) {
*active = node; // This is the first clean node
@@ -1079,12 +1097,12 @@ pe__count_active_node(const pe_resource_t *rsc, pe_node_t *node,
return keep_looking;
}
-// Shared implementation of resource_object_functions_t:active_node()
-static pe_node_t *
-active_node(const pe_resource_t *rsc, unsigned int *count_all,
+// Shared implementation of pcmk_rsc_methods_t:active_node()
+static pcmk_node_t *
+active_node(const pcmk_resource_t *rsc, unsigned int *count_all,
unsigned int *count_clean)
{
- pe_node_t *active = NULL;
+ pcmk_node_t *active = NULL;
if (count_all != NULL) {
*count_all = 0;
@@ -1096,7 +1114,7 @@ active_node(const pe_resource_t *rsc, unsigned int *count_all,
return NULL;
}
for (GList *iter = rsc->running_on; iter != NULL; iter = iter->next) {
- if (!pe__count_active_node(rsc, (pe_node_t *) iter->data, &active,
+ if (!pe__count_active_node(rsc, (pcmk_node_t *) iter->data, &active,
count_all, count_clean)) {
break; // Don't waste time iterating if we don't have to
}
@@ -1117,8 +1135,8 @@ active_node(const pe_resource_t *rsc, unsigned int *count_all,
* active nodes or only clean active nodes is desired according to the
* "requires" meta-attribute.
*/
-pe_node_t *
-pe__find_active_requires(const pe_resource_t *rsc, unsigned int *count)
+pcmk_node_t *
+pe__find_active_requires(const pcmk_resource_t *rsc, unsigned int *count)
{
if (rsc == NULL) {
if (count != NULL) {
@@ -1126,7 +1144,7 @@ pe__find_active_requires(const pe_resource_t *rsc, unsigned int *count)
}
return NULL;
- } else if (pcmk_is_set(rsc->flags, pe_rsc_needs_fencing)) {
+ } else if (pcmk_is_set(rsc->flags, pcmk_rsc_needs_fencing)) {
return rsc->fns->active_node(rsc, count, NULL);
} else {
@@ -1135,20 +1153,20 @@ pe__find_active_requires(const pe_resource_t *rsc, unsigned int *count)
}
void
-pe__count_common(pe_resource_t *rsc)
+pe__count_common(pcmk_resource_t *rsc)
{
if (rsc->children != NULL) {
for (GList *item = rsc->children; item != NULL; item = item->next) {
- ((pe_resource_t *) item->data)->fns->count(item->data);
+ ((pcmk_resource_t *) item->data)->fns->count(item->data);
}
- } else if (!pcmk_is_set(rsc->flags, pe_rsc_orphan)
- || (rsc->role > RSC_ROLE_STOPPED)) {
+ } else if (!pcmk_is_set(rsc->flags, pcmk_rsc_removed)
+ || (rsc->role > pcmk_role_stopped)) {
rsc->cluster->ninstances++;
if (pe__resource_is_disabled(rsc)) {
rsc->cluster->disabled_resources++;
}
- if (pcmk_is_set(rsc->flags, pe_rsc_block)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_blocked)) {
rsc->cluster->blocked_resources++;
}
}
@@ -1163,7 +1181,7 @@ pe__count_common(pe_resource_t *rsc)
* \param[in] why Human-friendly reason why role is changing (for logs)
*/
void
-pe__set_next_role(pe_resource_t *rsc, enum rsc_role_e role, const char *why)
+pe__set_next_role(pcmk_resource_t *rsc, enum rsc_role_e role, const char *why)
{
CRM_ASSERT((rsc != NULL) && (why != NULL));
if (rsc->next_role != role) {
diff --git a/lib/pengine/failcounts.c b/lib/pengine/failcounts.c
index a4a3e11..6990d3d 100644
--- a/lib/pengine/failcounts.c
+++ b/lib/pengine/failcounts.c
@@ -77,7 +77,8 @@ is_matched_failure(const char *rsc_id, const xmlNode *conf_op_xml,
}
static gboolean
-block_failure(const pe_node_t *node, pe_resource_t *rsc, const xmlNode *xml_op)
+block_failure(const pcmk_node_t *node, pcmk_resource_t *rsc,
+ const xmlNode *xml_op)
{
char *xml_name = clone_strip(rsc->id);
@@ -180,11 +181,11 @@ block_failure(const pe_node_t *node, pe_resource_t *rsc, const xmlNode *xml_op)
* \note The caller is responsible for freeing the result.
*/
static inline char *
-rsc_fail_name(const pe_resource_t *rsc)
+rsc_fail_name(const pcmk_resource_t *rsc)
{
const char *name = (rsc->clone_name? rsc->clone_name : rsc->id);
- return pcmk_is_set(rsc->flags, pe_rsc_unique)? strdup(name) : clone_strip(name);
+ return pcmk_is_set(rsc->flags, pcmk_rsc_unique)? strdup(name) : clone_strip(name);
}
/*!
@@ -236,7 +237,6 @@ generate_fail_regex(const char *prefix, const char *rsc_name,
* \brief Compile regular expressions to match failure-related node attributes
*
* \param[in] rsc Resource being checked for failures
- * \param[in] data_set Data set (for CRM feature set version)
* \param[out] failcount_re Storage for regular expression for fail count
* \param[out] lastfailure_re Storage for regular expression for last failure
*
@@ -245,23 +245,25 @@ generate_fail_regex(const char *prefix, const char *rsc_name,
* regfree().
*/
static int
-generate_fail_regexes(const pe_resource_t *rsc,
- const pe_working_set_t *data_set,
+generate_fail_regexes(const pcmk_resource_t *rsc,
regex_t *failcount_re, regex_t *lastfailure_re)
{
+ int rc = pcmk_rc_ok;
char *rsc_name = rsc_fail_name(rsc);
- const char *version = crm_element_value(data_set->input, XML_ATTR_CRM_VERSION);
+ const char *version = crm_element_value(rsc->cluster->input,
+ XML_ATTR_CRM_VERSION);
+
+ // @COMPAT Pacemaker <= 1.1.16 used a single fail count per resource
gboolean is_legacy = (compare_version(version, "3.0.13") < 0);
- int rc = pcmk_rc_ok;
if (generate_fail_regex(PCMK__FAIL_COUNT_PREFIX, rsc_name, is_legacy,
- pcmk_is_set(rsc->flags, pe_rsc_unique),
+ pcmk_is_set(rsc->flags, pcmk_rsc_unique),
failcount_re) != pcmk_rc_ok) {
rc = EINVAL;
} else if (generate_fail_regex(PCMK__LAST_FAILURE_PREFIX, rsc_name,
is_legacy,
- pcmk_is_set(rsc->flags, pe_rsc_unique),
+ pcmk_is_set(rsc->flags, pcmk_rsc_unique),
lastfailure_re) != pcmk_rc_ok) {
rc = EINVAL;
regfree(failcount_re);
@@ -271,68 +273,137 @@ generate_fail_regexes(const pe_resource_t *rsc,
return rc;
}
-int
-pe_get_failcount(const pe_node_t *node, pe_resource_t *rsc,
- time_t *last_failure, uint32_t flags, const xmlNode *xml_op)
+// Data for fail-count-related iterators
+struct failcount_data {
+ const pcmk_node_t *node;// Node to check for fail count
+ pcmk_resource_t *rsc; // Resource to check for fail count
+ uint32_t flags; // Fail count flags
+ const xmlNode *xml_op; // History entry for expiration purposes (or NULL)
+ regex_t failcount_re; // Fail count regular expression to match
+ regex_t lastfailure_re; // Last failure regular expression to match
+ int failcount; // Fail count so far
+ time_t last_failure; // Time of most recent failure so far
+};
+
+/*!
+ * \internal
+ * \brief Update fail count and last failure appropriately for a node attribute
+ *
+ * \param[in] key Node attribute name
+ * \param[in] value Node attribute value
+ * \param[in] user_data Fail count data to update
+ */
+static void
+update_failcount_for_attr(gpointer key, gpointer value, gpointer user_data)
{
- char *key = NULL;
- const char *value = NULL;
- regex_t failcount_re, lastfailure_re;
- int failcount = 0;
- time_t last = 0;
- GHashTableIter iter;
-
- CRM_CHECK(generate_fail_regexes(rsc, rsc->cluster, &failcount_re,
- &lastfailure_re) == pcmk_rc_ok,
- return 0);
+ struct failcount_data *fc_data = user_data;
+
+ // If this is a matching fail count attribute, update fail count
+ if (regexec(&(fc_data->failcount_re), (const char *) key, 0, NULL, 0) == 0) {
+ fc_data->failcount = pcmk__add_scores(fc_data->failcount,
+ char2score(value));
+ pe_rsc_trace(fc_data->rsc, "Added %s (%s) to %s fail count (now %s)",
+ (const char *) key, (const char *) value, fc_data->rsc->id,
+ pcmk_readable_score(fc_data->failcount));
+ return;
+ }
- /* Resource fail count is sum of all matching operation fail counts */
- g_hash_table_iter_init(&iter, node->details->attrs);
- while (g_hash_table_iter_next(&iter, (gpointer *) &key, (gpointer *) &value)) {
- if (regexec(&failcount_re, key, 0, NULL, 0) == 0) {
- failcount = pcmk__add_scores(failcount, char2score(value));
- crm_trace("Added %s (%s) to %s fail count (now %s)",
- key, value, rsc->id, pcmk_readable_score(failcount));
- } else if (regexec(&lastfailure_re, key, 0, NULL, 0) == 0) {
- long long last_ll;
-
- if (pcmk__scan_ll(value, &last_ll, 0LL) == pcmk_rc_ok) {
- last = (time_t) QB_MAX(last, last_ll);
- }
+ // If this is a matching last failure attribute, update last failure
+ if (regexec(&(fc_data->lastfailure_re), (const char *) key, 0, NULL,
+ 0) == 0) {
+ long long last_ll;
+
+ if (pcmk__scan_ll(value, &last_ll, 0LL) == pcmk_rc_ok) {
+ fc_data->last_failure = (time_t) QB_MAX(fc_data->last_failure,
+ last_ll);
}
}
+}
- regfree(&failcount_re);
- regfree(&lastfailure_re);
+/*!
+ * \internal
+ * \brief Update fail count and last failure appropriately for a filler resource
+ *
+ * \param[in] data Filler resource
+ * \param[in] user_data Fail count data to update
+ */
+static void
+update_failcount_for_filler(gpointer data, gpointer user_data)
+{
+ pcmk_resource_t *filler = data;
+ struct failcount_data *fc_data = user_data;
+ time_t filler_last_failure = 0;
+
+ fc_data->failcount += pe_get_failcount(fc_data->node, filler,
+ &filler_last_failure, fc_data->flags,
+ fc_data->xml_op);
+ fc_data->last_failure = QB_MAX(fc_data->last_failure, filler_last_failure);
+}
- if ((failcount > 0) && (last > 0) && (last_failure != NULL)) {
- *last_failure = last;
- }
+/*!
+ * \internal
+ * \brief Get a resource's fail count on a node
+ *
+ * \param[in] node Node to check
+ * \param[in,out] rsc Resource to check
+ * \param[out] last_failure If not NULL, where to set time of most recent
+ * failure of \p rsc on \p node
+ * \param[in] flags Group of enum pcmk__fc_flags
+ * \param[in] xml_op If not NULL, consider only the action in this
+ * history entry when determining whether on-fail
+ * is configured as "blocked", otherwise consider
+ * all actions configured for \p rsc
+ *
+ * \return Fail count for \p rsc on \p node according to \p flags
+ */
+int
+pe_get_failcount(const pcmk_node_t *node, pcmk_resource_t *rsc,
+ time_t *last_failure, uint32_t flags, const xmlNode *xml_op)
+{
+ struct failcount_data fc_data = {
+ .node = node,
+ .rsc = rsc,
+ .flags = flags,
+ .xml_op = xml_op,
+ .failcount = 0,
+ .last_failure = (time_t) 0,
+ };
+
+ // Calculate resource failcount as sum of all matching operation failcounts
+ CRM_CHECK(generate_fail_regexes(rsc, &fc_data.failcount_re,
+ &fc_data.lastfailure_re) == pcmk_rc_ok,
+ return 0);
+ g_hash_table_foreach(node->details->attrs, update_failcount_for_attr,
+ &fc_data);
+ regfree(&(fc_data.failcount_re));
+ regfree(&(fc_data.lastfailure_re));
- /* If failure blocks the resource, disregard any failure timeout */
- if ((failcount > 0) && rsc->failure_timeout
+ // If failure blocks the resource, disregard any failure timeout
+ if ((fc_data.failcount > 0) && (rsc->failure_timeout > 0)
&& block_failure(node, rsc, xml_op)) {
- pe_warn("Ignoring failure timeout %d for %s because it conflicts with on-fail=block",
+ pe_warn("Ignoring failure timeout %d for %s "
+ "because it conflicts with on-fail=block",
rsc->failure_timeout, rsc->id);
rsc->failure_timeout = 0;
}
- /* If all failures have expired, ignore fail count */
- if (pcmk_is_set(flags, pe_fc_effective) && (failcount > 0) && (last > 0)
- && rsc->failure_timeout) {
+ // If all failures have expired, ignore fail count
+ if (pcmk_is_set(flags, pcmk__fc_effective) && (fc_data.failcount > 0)
+ && (fc_data.last_failure > 0) && (rsc->failure_timeout != 0)) {
time_t now = get_effective_time(rsc->cluster);
- if (now > (last + rsc->failure_timeout)) {
- crm_debug("Failcount for %s on %s expired after %ds",
- rsc->id, pe__node_name(node), rsc->failure_timeout);
- failcount = 0;
+ if (now > (fc_data.last_failure + rsc->failure_timeout)) {
+ pe_rsc_debug(rsc, "Failcount for %s on %s expired after %ds",
+ rsc->id, pe__node_name(node), rsc->failure_timeout);
+ fc_data.failcount = 0;
}
}
- /* We never want the fail counts of a bundle container's fillers to
- * count towards the container's fail count.
+ /* Add the fail count of any filler resources, except that we never want the
+ * fail counts of a bundle container's fillers to count towards the
+ * container's fail count.
*
* Most importantly, a Pacemaker Remote connection to a bundle container
* is a filler of the container, but can reside on a different node than the
@@ -340,62 +411,56 @@ pe_get_failcount(const pe_node_t *node, pe_resource_t *rsc,
* container's fail count on that node could lead to attempting to stop the
* container on the wrong node.
*/
-
- if (pcmk_is_set(flags, pe_fc_fillers) && rsc->fillers
+ if (pcmk_is_set(flags, pcmk__fc_fillers) && (rsc->fillers != NULL)
&& !pe_rsc_is_bundled(rsc)) {
- GList *gIter = NULL;
-
- for (gIter = rsc->fillers; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *filler = (pe_resource_t *) gIter->data;
- time_t filler_last_failure = 0;
-
- failcount += pe_get_failcount(node, filler, &filler_last_failure,
- flags, xml_op);
-
- if (last_failure && filler_last_failure > *last_failure) {
- *last_failure = filler_last_failure;
- }
- }
-
- if (failcount > 0) {
- crm_info("Container %s and the resources within it "
- "have failed %s time%s on %s",
- rsc->id, pcmk_readable_score(failcount),
- pcmk__plural_s(failcount), pe__node_name(node));
+ g_list_foreach(rsc->fillers, update_failcount_for_filler, &fc_data);
+ if (fc_data.failcount > 0) {
+ pe_rsc_info(rsc,
+ "Container %s and the resources within it "
+ "have failed %s time%s on %s",
+ rsc->id, pcmk_readable_score(fc_data.failcount),
+ pcmk__plural_s(fc_data.failcount), pe__node_name(node));
}
- } else if (failcount > 0) {
- crm_info("%s has failed %s time%s on %s",
- rsc->id, pcmk_readable_score(failcount),
- pcmk__plural_s(failcount), pe__node_name(node));
+ } else if (fc_data.failcount > 0) {
+ pe_rsc_info(rsc, "%s has failed %s time%s on %s",
+ rsc->id, pcmk_readable_score(fc_data.failcount),
+ pcmk__plural_s(fc_data.failcount), pe__node_name(node));
}
- return failcount;
+ if (last_failure != NULL) {
+ if ((fc_data.failcount > 0) && (fc_data.last_failure > 0)) {
+ *last_failure = fc_data.last_failure;
+ } else {
+ *last_failure = 0;
+ }
+ }
+ return fc_data.failcount;
}
/*!
* \brief Schedule a controller operation to clear a fail count
*
- * \param[in,out] rsc Resource with failure
- * \param[in] node Node failure occurred on
- * \param[in] reason Readable description why needed (for logging)
- * \param[in,out] data_set Working set for cluster
+ * \param[in,out] rsc Resource with failure
+ * \param[in] node Node failure occurred on
+ * \param[in] reason Readable description why needed (for logging)
+ * \param[in,out] scheduler Scheduler data cluster
*
* \return Scheduled action
*/
-pe_action_t *
-pe__clear_failcount(pe_resource_t *rsc, const pe_node_t *node,
- const char *reason, pe_working_set_t *data_set)
+pcmk_action_t *
+pe__clear_failcount(pcmk_resource_t *rsc, const pcmk_node_t *node,
+ const char *reason, pcmk_scheduler_t *scheduler)
{
char *key = NULL;
- pe_action_t *clear = NULL;
+ pcmk_action_t *clear = NULL;
- CRM_CHECK(rsc && node && reason && data_set, return NULL);
+ CRM_CHECK(rsc && node && reason && scheduler, return NULL);
- key = pcmk__op_key(rsc->id, CRM_OP_CLEAR_FAILCOUNT, 0);
- clear = custom_action(rsc, key, CRM_OP_CLEAR_FAILCOUNT, node, FALSE, TRUE,
- data_set);
+ key = pcmk__op_key(rsc->id, PCMK_ACTION_CLEAR_FAILCOUNT, 0);
+ clear = custom_action(rsc, key, PCMK_ACTION_CLEAR_FAILCOUNT, node, FALSE,
+ scheduler);
add_hash_param(clear->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE);
crm_notice("Clearing failure of %s on %s because %s " CRM_XS " %s",
rsc->id, pe__node_name(node), reason, clear->uuid);
diff --git a/lib/pengine/group.c b/lib/pengine/group.c
index d54b01a..dad610c 100644
--- a/lib/pengine/group.c
+++ b/lib/pengine/group.c
@@ -21,8 +21,8 @@
#include <pe_status_private.h>
typedef struct group_variant_data_s {
- pe_resource_t *last_child; // Last group member
- uint32_t flags; // Group of enum pe__group_flags
+ pcmk_resource_t *last_child; // Last group member
+ uint32_t flags; // Group of enum pcmk__group_flags
} group_variant_data_t;
/*!
@@ -33,11 +33,11 @@ typedef struct group_variant_data_s {
*
* \return Last member of \p group if any, otherwise NULL
*/
-pe_resource_t *
-pe__last_group_member(const pe_resource_t *group)
+pcmk_resource_t *
+pe__last_group_member(const pcmk_resource_t *group)
{
if (group != NULL) {
- CRM_CHECK((group->variant == pe_group)
+ CRM_CHECK((group->variant == pcmk_rsc_variant_group)
&& (group->variant_opaque != NULL), return NULL);
return ((group_variant_data_t *) group->variant_opaque)->last_child;
}
@@ -54,11 +54,11 @@ pe__last_group_member(const pe_resource_t *group)
* \return true if all \p flags are set for \p group, otherwise false
*/
bool
-pe__group_flag_is_set(const pe_resource_t *group, uint32_t flags)
+pe__group_flag_is_set(const pcmk_resource_t *group, uint32_t flags)
{
group_variant_data_t *group_data = NULL;
- CRM_CHECK((group != NULL) && (group->variant == pe_group)
+ CRM_CHECK((group != NULL) && (group->variant == pcmk_rsc_variant_group)
&& (group->variant_opaque != NULL), return false);
group_data = (group_variant_data_t *) group->variant_opaque;
return pcmk_all_flags_set(group_data->flags, flags);
@@ -74,7 +74,7 @@ pe__group_flag_is_set(const pe_resource_t *group, uint32_t flags)
* \param[in] wo_bit "Warn once" flag to use for deprecation warning
*/
static void
-set_group_flag(pe_resource_t *group, const char *option, uint32_t flag,
+set_group_flag(pcmk_resource_t *group, const char *option, uint32_t flag,
uint32_t wo_bit)
{
const char *value_s = NULL;
@@ -97,12 +97,12 @@ set_group_flag(pe_resource_t *group, const char *option, uint32_t flag,
}
static int
-inactive_resources(pe_resource_t *rsc)
+inactive_resources(pcmk_resource_t *rsc)
{
int retval = 0;
for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *child_rsc = (pcmk_resource_t *) gIter->data;
if (!child_rsc->fns->active(child_rsc, TRUE)) {
retval++;
@@ -113,7 +113,7 @@ inactive_resources(pe_resource_t *rsc)
}
static void
-group_header(pcmk__output_t *out, int *rc, const pe_resource_t *rsc,
+group_header(pcmk__output_t *out, int *rc, const pcmk_resource_t *rsc,
int n_inactive, bool show_inactive, const char *desc)
{
GString *attrs = NULL;
@@ -128,10 +128,10 @@ group_header(pcmk__output_t *out, int *rc, const pe_resource_t *rsc,
pcmk__add_separated_word(&attrs, 64, "disabled", ", ");
}
- if (pcmk_is_set(rsc->flags, pe_rsc_maintenance)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_maintenance)) {
pcmk__add_separated_word(&attrs, 64, "maintenance", ", ");
- } else if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ } else if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
pcmk__add_separated_word(&attrs, 64, "unmanaged", ", ");
}
@@ -150,8 +150,8 @@ group_header(pcmk__output_t *out, int *rc, const pe_resource_t *rsc,
}
static bool
-skip_child_rsc(pe_resource_t *rsc, pe_resource_t *child, gboolean parent_passes,
- GList *only_rsc, uint32_t show_opts)
+skip_child_rsc(pcmk_resource_t *rsc, pcmk_resource_t *child,
+ gboolean parent_passes, GList *only_rsc, uint32_t show_opts)
{
bool star_list = pcmk__list_of_1(only_rsc) &&
pcmk__str_eq("*", g_list_first(only_rsc)->data, pcmk__str_none);
@@ -177,7 +177,7 @@ skip_child_rsc(pe_resource_t *rsc, pe_resource_t *child, gboolean parent_passes,
}
gboolean
-group_unpack(pe_resource_t * rsc, pe_working_set_t * data_set)
+group_unpack(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler)
{
xmlNode *xml_obj = rsc->xml;
xmlNode *xml_native_rsc = NULL;
@@ -191,9 +191,10 @@ group_unpack(pe_resource_t * rsc, pe_working_set_t * data_set)
rsc->variant_opaque = group_data;
// @COMPAT These are deprecated since 2.1.5
- set_group_flag(rsc, XML_RSC_ATTR_ORDERED, pe__group_ordered,
- pe_wo_group_order);
- set_group_flag(rsc, "collocated", pe__group_colocated, pe_wo_group_coloc);
+ set_group_flag(rsc, XML_RSC_ATTR_ORDERED, pcmk__group_ordered,
+ pcmk__wo_group_order);
+ set_group_flag(rsc, "collocated", pcmk__group_colocated,
+ pcmk__wo_group_coloc);
clone_id = crm_element_value(rsc->xml, XML_RSC_ATTR_INCARNATION);
@@ -202,11 +203,11 @@ group_unpack(pe_resource_t * rsc, pe_working_set_t * data_set)
if (pcmk__str_eq((const char *)xml_native_rsc->name,
XML_CIB_TAG_RESOURCE, pcmk__str_none)) {
- pe_resource_t *new_rsc = NULL;
+ pcmk_resource_t *new_rsc = NULL;
crm_xml_add(xml_native_rsc, XML_RSC_ATTR_INCARNATION, clone_id);
if (pe__unpack_resource(xml_native_rsc, &new_rsc, rsc,
- data_set) != pcmk_rc_ok) {
+ scheduler) != pcmk_rc_ok) {
continue;
}
@@ -232,14 +233,14 @@ group_unpack(pe_resource_t * rsc, pe_working_set_t * data_set)
}
gboolean
-group_active(pe_resource_t * rsc, gboolean all)
+group_active(pcmk_resource_t *rsc, gboolean all)
{
gboolean c_all = TRUE;
gboolean c_any = FALSE;
GList *gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *child_rsc = (pcmk_resource_t *) gIter->data;
if (child_rsc->fns->active(child_rsc, all)) {
c_any = TRUE;
@@ -261,7 +262,7 @@ group_active(pe_resource_t * rsc, gboolean all)
* \deprecated This function will be removed in a future release
*/
static void
-group_print_xml(pe_resource_t *rsc, const char *pre_text, long options,
+group_print_xml(pcmk_resource_t *rsc, const char *pre_text, long options,
void *print_data)
{
GList *gIter = rsc->children;
@@ -272,7 +273,7 @@ group_print_xml(pe_resource_t *rsc, const char *pre_text, long options,
status_print(">\n");
for (; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *child_rsc = (pcmk_resource_t *) gIter->data;
child_rsc->fns->print(child_rsc, child_text, options, print_data);
}
@@ -286,7 +287,7 @@ group_print_xml(pe_resource_t *rsc, const char *pre_text, long options,
* \deprecated This function will be removed in a future release
*/
void
-group_print(pe_resource_t *rsc, const char *pre_text, long options,
+group_print(pcmk_resource_t *rsc, const char *pre_text, long options,
void *print_data)
{
char *child_text = NULL;
@@ -317,7 +318,7 @@ group_print(pe_resource_t *rsc, const char *pre_text, long options,
} else {
for (; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *child_rsc = (pcmk_resource_t *) gIter->data;
if (options & pe_print_html) {
status_print("<li>\n");
@@ -335,12 +336,13 @@ group_print(pe_resource_t *rsc, const char *pre_text, long options,
free(child_text);
}
-PCMK__OUTPUT_ARGS("group", "uint32_t", "pe_resource_t *", "GList *", "GList *")
+PCMK__OUTPUT_ARGS("group", "uint32_t", "pcmk_resource_t *", "GList *",
+ "GList *")
int
pe__group_xml(pcmk__output_t *out, va_list args)
{
uint32_t show_opts = va_arg(args, uint32_t);
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
GList *only_node = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
@@ -359,7 +361,7 @@ pe__group_xml(pcmk__output_t *out, va_list args)
}
for (; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *child_rsc = (pcmk_resource_t *) gIter->data;
if (skip_child_rsc(rsc, child_rsc, parent_passes, only_rsc, show_opts)) {
continue;
@@ -367,8 +369,8 @@ pe__group_xml(pcmk__output_t *out, va_list args)
if (rc == pcmk_rc_no_output) {
char *count = pcmk__itoa(g_list_length(gIter));
- const char *maint_s = pe__rsc_bool_str(rsc, pe_rsc_maintenance);
- const char *managed_s = pe__rsc_bool_str(rsc, pe_rsc_managed);
+ const char *maint_s = pe__rsc_bool_str(rsc, pcmk_rsc_maintenance);
+ const char *managed_s = pe__rsc_bool_str(rsc, pcmk_rsc_managed);
const char *disabled_s = pcmk__btoa(pe__resource_is_disabled(rsc));
rc = pe__name_and_nvpairs_xml(out, true, "group", 5,
@@ -393,12 +395,13 @@ pe__group_xml(pcmk__output_t *out, va_list args)
return rc;
}
-PCMK__OUTPUT_ARGS("group", "uint32_t", "pe_resource_t *", "GList *", "GList *")
+PCMK__OUTPUT_ARGS("group", "uint32_t", "pcmk_resource_t *", "GList *",
+ "GList *")
int
pe__group_default(pcmk__output_t *out, va_list args)
{
uint32_t show_opts = va_arg(args, uint32_t);
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
GList *only_node = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
@@ -431,7 +434,7 @@ pe__group_default(pcmk__output_t *out, va_list args)
} else {
for (GList *gIter = rsc->children; gIter; gIter = gIter->next) {
- pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *child_rsc = (pcmk_resource_t *) gIter->data;
if (skip_child_rsc(rsc, child_rsc, parent_passes, only_rsc, show_opts)) {
continue;
@@ -450,14 +453,14 @@ pe__group_default(pcmk__output_t *out, va_list args)
}
void
-group_free(pe_resource_t * rsc)
+group_free(pcmk_resource_t * rsc)
{
CRM_CHECK(rsc != NULL, return);
pe_rsc_trace(rsc, "Freeing %s", rsc->id);
for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *child_rsc = (pcmk_resource_t *) gIter->data;
CRM_ASSERT(child_rsc);
pe_rsc_trace(child_rsc, "Freeing child %s", child_rsc->id);
@@ -471,13 +474,13 @@ group_free(pe_resource_t * rsc)
}
enum rsc_role_e
-group_resource_state(const pe_resource_t * rsc, gboolean current)
+group_resource_state(const pcmk_resource_t * rsc, gboolean current)
{
- enum rsc_role_e group_role = RSC_ROLE_UNKNOWN;
+ enum rsc_role_e group_role = pcmk_role_unknown;
GList *gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *child_rsc = (pcmk_resource_t *) gIter->data;
enum rsc_role_e role = child_rsc->fns->state(child_rsc, current);
if (role > group_role) {
@@ -490,7 +493,7 @@ group_resource_state(const pe_resource_t * rsc, gboolean current)
}
gboolean
-pe__group_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
+pe__group_is_filtered(const pcmk_resource_t *rsc, GList *only_rsc,
gboolean check_parent)
{
gboolean passes = FALSE;
@@ -508,7 +511,7 @@ pe__group_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
for (const GList *iter = rsc->children;
iter != NULL; iter = iter->next) {
- const pe_resource_t *child_rsc = (const pe_resource_t *) iter->data;
+ const pcmk_resource_t *child_rsc = iter->data;
if (!child_rsc->fns->is_filtered(child_rsc, only_rsc, FALSE)) {
passes = TRUE;
@@ -519,3 +522,18 @@ pe__group_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
return !passes;
}
+
+/*!
+ * \internal
+ * \brief Get maximum group resource instances per node
+ *
+ * \param[in] rsc Group resource to check
+ *
+ * \return Maximum number of \p rsc instances that can be active on one node
+ */
+unsigned int
+pe__group_max_per_node(const pcmk_resource_t *rsc)
+{
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_group));
+ return 1U;
+}
diff --git a/lib/pengine/native.c b/lib/pengine/native.c
index 5e92ddc..48b1a6a 100644
--- a/lib/pengine/native.c
+++ b/lib/pengine/native.c
@@ -30,18 +30,19 @@
* \brief Check whether a resource is active on multiple nodes
*/
static bool
-is_multiply_active(const pe_resource_t *rsc)
+is_multiply_active(const pcmk_resource_t *rsc)
{
unsigned int count = 0;
- if (rsc->variant == pe_native) {
+ if (rsc->variant == pcmk_rsc_variant_primitive) {
pe__find_active_requires(rsc, &count);
}
return count > 1;
}
static void
-native_priority_to_node(pe_resource_t * rsc, pe_node_t * node, gboolean failed)
+native_priority_to_node(pcmk_resource_t *rsc, pcmk_node_t *node,
+ gboolean failed)
{
int priority = 0;
@@ -49,7 +50,7 @@ native_priority_to_node(pe_resource_t * rsc, pe_node_t * node, gboolean failed)
return;
}
- if (rsc->role == RSC_ROLE_PROMOTED) {
+ if (rsc->role == pcmk_role_promoted) {
// Promoted instance takes base priority + 1
priority = rsc->priority + 1;
@@ -60,9 +61,9 @@ native_priority_to_node(pe_resource_t * rsc, pe_node_t * node, gboolean failed)
node->details->priority += priority;
pe_rsc_trace(rsc, "%s now has priority %d with %s'%s' (priority: %d%s)",
pe__node_name(node), node->details->priority,
- (rsc->role == RSC_ROLE_PROMOTED)? "promoted " : "",
+ (rsc->role == pcmk_role_promoted)? "promoted " : "",
rsc->id, rsc->priority,
- (rsc->role == RSC_ROLE_PROMOTED)? " + 1" : "");
+ (rsc->role == pcmk_role_promoted)? " + 1" : "");
/* Priority of a resource running on a guest node is added to the cluster
* node as well. */
@@ -71,28 +72,29 @@ native_priority_to_node(pe_resource_t * rsc, pe_node_t * node, gboolean failed)
GList *gIter = node->details->remote_rsc->container->running_on;
for (; gIter != NULL; gIter = gIter->next) {
- pe_node_t *a_node = gIter->data;
+ pcmk_node_t *a_node = gIter->data;
a_node->details->priority += priority;
pe_rsc_trace(rsc, "%s now has priority %d with %s'%s' (priority: %d%s) "
"from guest node %s",
pe__node_name(a_node), a_node->details->priority,
- (rsc->role == RSC_ROLE_PROMOTED)? "promoted " : "",
+ (rsc->role == pcmk_role_promoted)? "promoted " : "",
rsc->id, rsc->priority,
- (rsc->role == RSC_ROLE_PROMOTED)? " + 1" : "",
+ (rsc->role == pcmk_role_promoted)? " + 1" : "",
pe__node_name(node));
}
}
}
void
-native_add_running(pe_resource_t * rsc, pe_node_t * node, pe_working_set_t * data_set, gboolean failed)
+native_add_running(pcmk_resource_t *rsc, pcmk_node_t *node,
+ pcmk_scheduler_t *scheduler, gboolean failed)
{
GList *gIter = rsc->running_on;
CRM_CHECK(node != NULL, return);
for (; gIter != NULL; gIter = gIter->next) {
- pe_node_t *a_node = (pe_node_t *) gIter->data;
+ pcmk_node_t *a_node = (pcmk_node_t *) gIter->data;
CRM_CHECK(a_node != NULL, return);
if (pcmk__str_eq(a_node->details->id, node->details->id, pcmk__str_casei)) {
@@ -101,25 +103,27 @@ native_add_running(pe_resource_t * rsc, pe_node_t * node, pe_working_set_t * dat
}
pe_rsc_trace(rsc, "Adding %s to %s %s", rsc->id, pe__node_name(node),
- pcmk_is_set(rsc->flags, pe_rsc_managed)? "" : "(unmanaged)");
+ pcmk_is_set(rsc->flags, pcmk_rsc_managed)? "" : "(unmanaged)");
rsc->running_on = g_list_append(rsc->running_on, node);
- if (rsc->variant == pe_native) {
+ if (rsc->variant == pcmk_rsc_variant_primitive) {
node->details->running_rsc = g_list_append(node->details->running_rsc, rsc);
native_priority_to_node(rsc, node, failed);
}
- if (rsc->variant == pe_native && node->details->maintenance) {
- pe__clear_resource_flags(rsc, pe_rsc_managed);
- pe__set_resource_flags(rsc, pe_rsc_maintenance);
+ if ((rsc->variant == pcmk_rsc_variant_primitive)
+ && node->details->maintenance) {
+ pe__clear_resource_flags(rsc, pcmk_rsc_managed);
+ pe__set_resource_flags(rsc, pcmk_rsc_maintenance);
}
- if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
- pe_resource_t *p = rsc->parent;
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
+ pcmk_resource_t *p = rsc->parent;
pe_rsc_info(rsc, "resource %s isn't managed", rsc->id);
- resource_location(rsc, node, INFINITY, "not_managed_default", data_set);
+ resource_location(rsc, node, INFINITY, "not_managed_default",
+ scheduler);
while(p && node->details->online) {
/* add without the additional location constraint */
@@ -131,43 +135,46 @@ native_add_running(pe_resource_t * rsc, pe_node_t * node, pe_working_set_t * dat
if (is_multiply_active(rsc)) {
switch (rsc->recovery_type) {
- case recovery_stop_only:
+ case pcmk_multiply_active_stop:
{
GHashTableIter gIter;
- pe_node_t *local_node = NULL;
+ pcmk_node_t *local_node = NULL;
/* make sure it doesn't come up again */
if (rsc->allowed_nodes != NULL) {
g_hash_table_destroy(rsc->allowed_nodes);
}
- rsc->allowed_nodes = pe__node_list2table(data_set->nodes);
+ rsc->allowed_nodes = pe__node_list2table(scheduler->nodes);
g_hash_table_iter_init(&gIter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&gIter, NULL, (void **)&local_node)) {
local_node->weight = -INFINITY;
}
}
break;
- case recovery_block:
- pe__clear_resource_flags(rsc, pe_rsc_managed);
- pe__set_resource_flags(rsc, pe_rsc_block);
+ case pcmk_multiply_active_block:
+ pe__clear_resource_flags(rsc, pcmk_rsc_managed);
+ pe__set_resource_flags(rsc, pcmk_rsc_blocked);
/* If the resource belongs to a group or bundle configured with
* multiple-active=block, block the entire entity.
*/
if (rsc->parent
- && (rsc->parent->variant == pe_group || rsc->parent->variant == pe_container)
- && rsc->parent->recovery_type == recovery_block) {
+ && ((rsc->parent->variant == pcmk_rsc_variant_group)
+ || (rsc->parent->variant == pcmk_rsc_variant_bundle))
+ && (rsc->parent->recovery_type == pcmk_multiply_active_block)) {
GList *gIter = rsc->parent->children;
for (; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *child = gIter->data;
- pe__clear_resource_flags(child, pe_rsc_managed);
- pe__set_resource_flags(child, pe_rsc_block);
+ pe__clear_resource_flags(child, pcmk_rsc_managed);
+ pe__set_resource_flags(child, pcmk_rsc_blocked);
}
}
break;
- default: // recovery_stop_start, recovery_stop_unexpected
+
+ // pcmk_multiply_active_restart, pcmk_multiply_active_unexpected
+ default:
/* The scheduler will do the right thing because the relevant
* variables and flags are set when unpacking the history.
*/
@@ -183,22 +190,22 @@ native_add_running(pe_resource_t * rsc, pe_node_t * node, pe_working_set_t * dat
}
if (rsc->parent != NULL) {
- native_add_running(rsc->parent, node, data_set, FALSE);
+ native_add_running(rsc->parent, node, scheduler, FALSE);
}
}
static void
-recursive_clear_unique(pe_resource_t *rsc, gpointer user_data)
+recursive_clear_unique(pcmk_resource_t *rsc, gpointer user_data)
{
- pe__clear_resource_flags(rsc, pe_rsc_unique);
+ pe__clear_resource_flags(rsc, pcmk_rsc_unique);
add_hash_param(rsc->meta, XML_RSC_ATTR_UNIQUE, XML_BOOLEAN_FALSE);
g_list_foreach(rsc->children, (GFunc) recursive_clear_unique, NULL);
}
gboolean
-native_unpack(pe_resource_t * rsc, pe_working_set_t * data_set)
+native_unpack(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler)
{
- pe_resource_t *parent = uber_parent(rsc);
+ pcmk_resource_t *parent = uber_parent(rsc);
const char *standard = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
uint32_t ra_caps = pcmk_get_ra_caps(standard);
@@ -206,14 +213,15 @@ native_unpack(pe_resource_t * rsc, pe_working_set_t * data_set)
// Only some agent standards support unique and promotable clones
if (!pcmk_is_set(ra_caps, pcmk_ra_cap_unique)
- && pcmk_is_set(rsc->flags, pe_rsc_unique) && pe_rsc_is_clone(parent)) {
+ && pcmk_is_set(rsc->flags, pcmk_rsc_unique)
+ && pe_rsc_is_clone(parent)) {
/* @COMPAT We should probably reject this situation as an error (as we
* do for promotable below) rather than warn and convert, but that would
* be a backward-incompatible change that we should probably do with a
* transform at a schema major version bump.
*/
- pe__force_anon(standard, parent, rsc->id, data_set);
+ pe__force_anon(standard, parent, rsc->id, scheduler);
/* Clear globally-unique on the parent and all its descendants unpacked
* so far (clearing the parent should make any future children unpacking
@@ -224,7 +232,7 @@ native_unpack(pe_resource_t * rsc, pe_working_set_t * data_set)
recursive_clear_unique(rsc, NULL);
}
if (!pcmk_is_set(ra_caps, pcmk_ra_cap_promotable)
- && pcmk_is_set(parent->flags, pe_rsc_promotable)) {
+ && pcmk_is_set(parent->flags, pcmk_rsc_promotable)) {
pe_err("Resource %s is of type %s and therefore "
"cannot be used as a promotable clone resource",
@@ -235,42 +243,44 @@ native_unpack(pe_resource_t * rsc, pe_working_set_t * data_set)
}
static bool
-rsc_is_on_node(pe_resource_t *rsc, const pe_node_t *node, int flags)
+rsc_is_on_node(pcmk_resource_t *rsc, const pcmk_node_t *node, int flags)
{
pe_rsc_trace(rsc, "Checking whether %s is on %s",
rsc->id, pe__node_name(node));
- if (pcmk_is_set(flags, pe_find_current) && rsc->running_on) {
+ if (pcmk_is_set(flags, pcmk_rsc_match_current_node)
+ && (rsc->running_on != NULL)) {
for (GList *iter = rsc->running_on; iter; iter = iter->next) {
- pe_node_t *loc = (pe_node_t *) iter->data;
+ pcmk_node_t *loc = (pcmk_node_t *) iter->data;
if (loc->details == node->details) {
return true;
}
}
- } else if (pcmk_is_set(flags, pe_find_inactive)
+ } else if (pcmk_is_set(flags, pe_find_inactive) // @COMPAT deprecated
&& (rsc->running_on == NULL)) {
return true;
- } else if (!pcmk_is_set(flags, pe_find_current) && rsc->allocated_to
+ } else if (!pcmk_is_set(flags, pcmk_rsc_match_current_node)
+ && (rsc->allocated_to != NULL)
&& (rsc->allocated_to->details == node->details)) {
return true;
}
return false;
}
-pe_resource_t *
-native_find_rsc(pe_resource_t * rsc, const char *id, const pe_node_t *on_node,
- int flags)
+pcmk_resource_t *
+native_find_rsc(pcmk_resource_t *rsc, const char *id,
+ const pcmk_node_t *on_node, int flags)
{
bool match = false;
- pe_resource_t *result = NULL;
+ pcmk_resource_t *result = NULL;
CRM_CHECK(id && rsc && rsc->id, return NULL);
- if (flags & pe_find_clone) {
+ if (pcmk_is_set(flags, pcmk_rsc_match_clone_only)) {
const char *rid = ID(rsc->xml);
if (!pe_rsc_is_clone(pe__const_top_resource(rsc, false))) {
@@ -283,13 +293,13 @@ native_find_rsc(pe_resource_t * rsc, const char *id, const pe_node_t *on_node,
} else if (!strcmp(id, rsc->id)) {
match = true;
- } else if (pcmk_is_set(flags, pe_find_renamed)
+ } else if (pcmk_is_set(flags, pcmk_rsc_match_history)
&& rsc->clone_name && strcmp(rsc->clone_name, id) == 0) {
match = true;
- } else if (pcmk_is_set(flags, pe_find_any)
- || (pcmk_is_set(flags, pe_find_anon)
- && !pcmk_is_set(rsc->flags, pe_rsc_unique))) {
+ } else if (pcmk_is_set(flags, pcmk_rsc_match_basename)
+ || (pcmk_is_set(flags, pcmk_rsc_match_anon_basename)
+ && !pcmk_is_set(rsc->flags, pcmk_rsc_unique))) {
match = pe_base_name_eq(rsc, id);
}
@@ -304,7 +314,7 @@ native_find_rsc(pe_resource_t * rsc, const char *id, const pe_node_t *on_node,
}
for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *child = (pcmk_resource_t *) gIter->data;
result = rsc->fns->find_rsc(child, id, on_node, flags);
if (result) {
@@ -316,8 +326,8 @@ native_find_rsc(pe_resource_t * rsc, const char *id, const pe_node_t *on_node,
// create is ignored
char *
-native_parameter(pe_resource_t * rsc, pe_node_t * node, gboolean create, const char *name,
- pe_working_set_t * data_set)
+native_parameter(pcmk_resource_t *rsc, pcmk_node_t *node, gboolean create,
+ const char *name, pcmk_scheduler_t *scheduler)
{
char *value_copy = NULL;
const char *value = NULL;
@@ -327,7 +337,7 @@ native_parameter(pe_resource_t * rsc, pe_node_t * node, gboolean create, const c
CRM_CHECK(name != NULL && strlen(name) != 0, return NULL);
pe_rsc_trace(rsc, "Looking up %s in %s", name, rsc->id);
- params = pe_rsc_params(rsc, node, data_set);
+ params = pe_rsc_params(rsc, node, scheduler);
value = g_hash_table_lookup(params, name);
if (value == NULL) {
/* try meta attributes instead */
@@ -338,16 +348,17 @@ native_parameter(pe_resource_t * rsc, pe_node_t * node, gboolean create, const c
}
gboolean
-native_active(pe_resource_t * rsc, gboolean all)
+native_active(pcmk_resource_t * rsc, gboolean all)
{
for (GList *gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
- pe_node_t *a_node = (pe_node_t *) gIter->data;
+ pcmk_node_t *a_node = (pcmk_node_t *) gIter->data;
if (a_node->details->unclean) {
pe_rsc_trace(rsc, "Resource %s: %s is unclean",
rsc->id, pe__node_name(a_node));
return TRUE;
- } else if (a_node->details->online == FALSE && pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ } else if (!a_node->details->online
+ && pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
pe_rsc_trace(rsc, "Resource %s: %s is offline",
rsc->id, pe__node_name(a_node));
} else {
@@ -365,27 +376,32 @@ struct print_data_s {
};
static const char *
-native_pending_state(const pe_resource_t *rsc)
+native_pending_state(const pcmk_resource_t *rsc)
{
const char *pending_state = NULL;
- if (pcmk__str_eq(rsc->pending_task, CRMD_ACTION_START, pcmk__str_casei)) {
+ if (pcmk__str_eq(rsc->pending_task, PCMK_ACTION_START, pcmk__str_casei)) {
pending_state = "Starting";
- } else if (pcmk__str_eq(rsc->pending_task, CRMD_ACTION_STOP, pcmk__str_casei)) {
+ } else if (pcmk__str_eq(rsc->pending_task, PCMK_ACTION_STOP,
+ pcmk__str_casei)) {
pending_state = "Stopping";
- } else if (pcmk__str_eq(rsc->pending_task, CRMD_ACTION_MIGRATE, pcmk__str_casei)) {
+ } else if (pcmk__str_eq(rsc->pending_task, PCMK_ACTION_MIGRATE_TO,
+ pcmk__str_casei)) {
pending_state = "Migrating";
- } else if (pcmk__str_eq(rsc->pending_task, CRMD_ACTION_MIGRATED, pcmk__str_casei)) {
+ } else if (pcmk__str_eq(rsc->pending_task, PCMK_ACTION_MIGRATE_FROM,
+ pcmk__str_casei)) {
/* Work might be done in here. */
pending_state = "Migrating";
- } else if (pcmk__str_eq(rsc->pending_task, CRMD_ACTION_PROMOTE, pcmk__str_casei)) {
+ } else if (pcmk__str_eq(rsc->pending_task, PCMK_ACTION_PROMOTE,
+ pcmk__str_casei)) {
pending_state = "Promoting";
- } else if (pcmk__str_eq(rsc->pending_task, CRMD_ACTION_DEMOTE, pcmk__str_casei)) {
+ } else if (pcmk__str_eq(rsc->pending_task, PCMK_ACTION_DEMOTE,
+ pcmk__str_casei)) {
pending_state = "Demoting";
}
@@ -393,11 +409,11 @@ native_pending_state(const pe_resource_t *rsc)
}
static const char *
-native_pending_task(const pe_resource_t *rsc)
+native_pending_task(const pcmk_resource_t *rsc)
{
const char *pending_task = NULL;
- if (pcmk__str_eq(rsc->pending_task, CRMD_ACTION_STATUS, pcmk__str_casei)) {
+ if (pcmk__str_eq(rsc->pending_task, PCMK_ACTION_MONITOR, pcmk__str_casei)) {
pending_task = "Monitoring";
/* Pending probes are not printed, even if pending
@@ -415,21 +431,21 @@ native_pending_task(const pe_resource_t *rsc)
}
static enum rsc_role_e
-native_displayable_role(const pe_resource_t *rsc)
+native_displayable_role(const pcmk_resource_t *rsc)
{
enum rsc_role_e role = rsc->role;
- if ((role == RSC_ROLE_STARTED)
+ if ((role == pcmk_role_started)
&& pcmk_is_set(pe__const_top_resource(rsc, false)->flags,
- pe_rsc_promotable)) {
+ pcmk_rsc_promotable)) {
- role = RSC_ROLE_UNPROMOTED;
+ role = pcmk_role_unpromoted;
}
return role;
}
static const char *
-native_displayable_state(const pe_resource_t *rsc, bool print_pending)
+native_displayable_state(const pcmk_resource_t *rsc, bool print_pending)
{
const char *rsc_state = NULL;
@@ -447,7 +463,7 @@ native_displayable_state(const pe_resource_t *rsc, bool print_pending)
* \deprecated This function will be removed in a future release
*/
static void
-native_print_xml(pe_resource_t *rsc, const char *pre_text, long options,
+native_print_xml(pcmk_resource_t *rsc, const char *pre_text, long options,
void *print_data)
{
const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
@@ -471,12 +487,14 @@ native_print_xml(pe_resource_t *rsc, const char *pre_text, long options,
status_print("target_role=\"%s\" ", target_role);
}
status_print("active=\"%s\" ", pcmk__btoa(rsc->fns->active(rsc, TRUE)));
- status_print("orphaned=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_orphan));
- status_print("blocked=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_block));
- status_print("managed=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_managed));
- status_print("failed=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_failed));
+ status_print("orphaned=\"%s\" ", pe__rsc_bool_str(rsc, pcmk_rsc_removed));
+ status_print("blocked=\"%s\" ",
+ pe__rsc_bool_str(rsc, pcmk_rsc_blocked));
+ status_print("managed=\"%s\" ",
+ pe__rsc_bool_str(rsc, pcmk_rsc_managed));
+ status_print("failed=\"%s\" ", pe__rsc_bool_str(rsc, pcmk_rsc_failed));
status_print("failure_ignored=\"%s\" ",
- pe__rsc_bool_str(rsc, pe_rsc_failure_ignored));
+ pe__rsc_bool_str(rsc, pcmk_rsc_ignore_failure));
status_print("nodes_running_on=\"%d\" ", g_list_length(rsc->running_on));
if (options & pe_print_pending) {
@@ -496,7 +514,7 @@ native_print_xml(pe_resource_t *rsc, const char *pre_text, long options,
status_print(">\n");
for (; gIter != NULL; gIter = gIter->next) {
- pe_node_t *node = (pe_node_t *) gIter->data;
+ pcmk_node_t *node = (pcmk_node_t *) gIter->data;
status_print("%s <node name=\"%s\" " XML_ATTR_ID "=\"%s\" "
"cached=\"%s\"/>\n",
@@ -542,8 +560,8 @@ add_output_node(GString *s, const char *node, bool have_nodes)
* \note Caller must free the result with g_free().
*/
gchar *
-pcmk__native_output_string(const pe_resource_t *rsc, const char *name,
- const pe_node_t *node, uint32_t show_opts,
+pcmk__native_output_string(const pcmk_resource_t *rsc, const char *name,
+ const pcmk_node_t *node, uint32_t show_opts,
const char *target_role, bool show_nodes)
{
const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
@@ -552,7 +570,7 @@ pcmk__native_output_string(const pe_resource_t *rsc, const char *name,
GString *outstr = NULL;
bool have_flags = false;
- if (rsc->variant != pe_native) {
+ if (rsc->variant != pcmk_rsc_variant_primitive) {
return NULL;
}
@@ -580,14 +598,14 @@ pcmk__native_output_string(const pe_resource_t *rsc, const char *name,
pcmk__s(provider, ""), ":", kind, "):\t", NULL);
// State on node
- if (pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_removed)) {
g_string_append(outstr, " ORPHANED");
}
- if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_failed)) {
enum rsc_role_e role = native_displayable_role(rsc);
g_string_append(outstr, " FAILED");
- if (role > RSC_ROLE_UNPROMOTED) {
+ if (role > pcmk_role_unpromoted) {
pcmk__add_word(&outstr, 0, role2text(role));
}
} else {
@@ -600,7 +618,7 @@ pcmk__native_output_string(const pe_resource_t *rsc, const char *name,
}
// Failed probe operation
- if (native_displayable_role(rsc) == RSC_ROLE_STOPPED) {
+ if (native_displayable_role(rsc) == pcmk_role_stopped) {
xmlNode *probe_op = pe__failed_probe_for_rsc(rsc, node ? node->details->uname : NULL);
if (probe_op != NULL) {
int rc;
@@ -632,30 +650,31 @@ pcmk__native_output_string(const pe_resource_t *rsc, const char *name,
* Started, as it is the default anyways, and doesn't prevent the
* resource from becoming promoted).
*/
- if (target_role_e == RSC_ROLE_STOPPED) {
+ if (target_role_e == pcmk_role_stopped) {
have_flags = add_output_flag(outstr, "disabled", have_flags);
} else if (pcmk_is_set(pe__const_top_resource(rsc, false)->flags,
- pe_rsc_promotable)
- && target_role_e == RSC_ROLE_UNPROMOTED) {
+ pcmk_rsc_promotable)
+ && (target_role_e == pcmk_role_unpromoted)) {
have_flags = add_output_flag(outstr, "target-role:", have_flags);
g_string_append(outstr, target_role);
}
}
// Blocked or maintenance implies unmanaged
- if (pcmk_any_flags_set(rsc->flags, pe_rsc_block|pe_rsc_maintenance)) {
- if (pcmk_is_set(rsc->flags, pe_rsc_block)) {
+ if (pcmk_any_flags_set(rsc->flags,
+ pcmk_rsc_blocked|pcmk_rsc_maintenance)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_blocked)) {
have_flags = add_output_flag(outstr, "blocked", have_flags);
- } else if (pcmk_is_set(rsc->flags, pe_rsc_maintenance)) {
+ } else if (pcmk_is_set(rsc->flags, pcmk_rsc_maintenance)) {
have_flags = add_output_flag(outstr, "maintenance", have_flags);
}
- } else if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ } else if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
have_flags = add_output_flag(outstr, "unmanaged", have_flags);
}
- if (pcmk_is_set(rsc->flags, pe_rsc_failure_ignored)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_ignore_failure)) {
have_flags = add_output_flag(outstr, "failure ignored", have_flags);
}
@@ -682,7 +701,7 @@ pcmk__native_output_string(const pe_resource_t *rsc, const char *name,
bool have_nodes = false;
for (GList *iter = rsc->running_on; iter != NULL; iter = iter->next) {
- pe_node_t *n = (pe_node_t *) iter->data;
+ pcmk_node_t *n = (pcmk_node_t *) iter->data;
have_nodes = add_output_node(outstr, n->details->uname, have_nodes);
}
@@ -695,8 +714,8 @@ pcmk__native_output_string(const pe_resource_t *rsc, const char *name,
}
int
-pe__common_output_html(pcmk__output_t *out, const pe_resource_t *rsc,
- const char *name, const pe_node_t *node,
+pe__common_output_html(pcmk__output_t *out, const pcmk_resource_t *rsc,
+ const char *name, const pcmk_node_t *node,
uint32_t show_opts)
{
const char *kind = crm_element_value(rsc->xml, XML_ATTR_TYPE);
@@ -705,7 +724,7 @@ pe__common_output_html(pcmk__output_t *out, const pe_resource_t *rsc,
xmlNodePtr list_node = NULL;
const char *cl = NULL;
- CRM_ASSERT(rsc->variant == pe_native);
+ CRM_ASSERT(rsc->variant == pcmk_rsc_variant_primitive);
CRM_ASSERT(kind != NULL);
if (rsc->meta) {
@@ -720,19 +739,20 @@ pe__common_output_html(pcmk__output_t *out, const pe_resource_t *rsc,
target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
}
- if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
cl = "rsc-managed";
- } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
+ } else if (pcmk_is_set(rsc->flags, pcmk_rsc_failed)) {
cl = "rsc-failed";
- } else if (rsc->variant == pe_native && (rsc->running_on == NULL)) {
+ } else if ((rsc->variant == pcmk_rsc_variant_primitive)
+ && (rsc->running_on == NULL)) {
cl = "rsc-failed";
} else if (pcmk__list_of_multiple(rsc->running_on)) {
cl = "rsc-multiple";
- } else if (pcmk_is_set(rsc->flags, pe_rsc_failure_ignored)) {
+ } else if (pcmk_is_set(rsc->flags, pcmk_rsc_ignore_failure)) {
cl = "rsc-failure-ignored";
} else {
@@ -752,13 +772,13 @@ pe__common_output_html(pcmk__output_t *out, const pe_resource_t *rsc,
}
int
-pe__common_output_text(pcmk__output_t *out, const pe_resource_t *rsc,
- const char *name, const pe_node_t *node,
+pe__common_output_text(pcmk__output_t *out, const pcmk_resource_t *rsc,
+ const char *name, const pcmk_node_t *node,
uint32_t show_opts)
{
const char *target_role = NULL;
- CRM_ASSERT(rsc->variant == pe_native);
+ CRM_ASSERT(rsc->variant == pcmk_rsc_variant_primitive);
if (rsc->meta) {
const char *is_internal = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INTERNAL_RSC);
@@ -788,12 +808,12 @@ pe__common_output_text(pcmk__output_t *out, const pe_resource_t *rsc,
* \deprecated This function will be removed in a future release
*/
void
-common_print(pe_resource_t *rsc, const char *pre_text, const char *name,
- const pe_node_t *node, long options, void *print_data)
+common_print(pcmk_resource_t *rsc, const char *pre_text, const char *name,
+ const pcmk_node_t *node, long options, void *print_data)
{
const char *target_role = NULL;
- CRM_ASSERT(rsc->variant == pe_native);
+ CRM_ASSERT(rsc->variant == pcmk_rsc_variant_primitive);
if (rsc->meta) {
const char *is_internal = g_hash_table_lookup(rsc->meta,
@@ -818,10 +838,10 @@ common_print(pe_resource_t *rsc, const char *pre_text, const char *name,
}
if (options & pe_print_html) {
- if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
status_print("<font color=\"yellow\">");
- } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
+ } else if (pcmk_is_set(rsc->flags, pcmk_rsc_failed)) {
status_print("<font color=\"red\">");
} else if (rsc->running_on == NULL) {
@@ -830,7 +850,7 @@ common_print(pe_resource_t *rsc, const char *pre_text, const char *name,
} else if (pcmk__list_of_multiple(rsc->running_on)) {
status_print("<font color=\"orange\">");
- } else if (pcmk_is_set(rsc->flags, pe_rsc_failure_ignored)) {
+ } else if (pcmk_is_set(rsc->flags, pcmk_rsc_ignore_failure)) {
status_print("<font color=\"yellow\">");
} else {
@@ -863,7 +883,7 @@ common_print(pe_resource_t *rsc, const char *pre_text, const char *name,
}
for (; gIter != NULL; gIter = gIter->next) {
- pe_node_t *n = (pe_node_t *) gIter->data;
+ pcmk_node_t *n = (pcmk_node_t *) gIter->data;
counter++;
@@ -908,12 +928,12 @@ common_print(pe_resource_t *rsc, const char *pre_text, const char *name,
* \deprecated This function will be removed in a future release
*/
void
-native_print(pe_resource_t *rsc, const char *pre_text, long options,
+native_print(pcmk_resource_t *rsc, const char *pre_text, long options,
void *print_data)
{
- const pe_node_t *node = NULL;
+ const pcmk_node_t *node = NULL;
- CRM_ASSERT(rsc->variant == pe_native);
+ CRM_ASSERT(rsc->variant == pcmk_rsc_variant_primitive);
if (options & pe_print_xml) {
native_print_xml(rsc, pre_text, options, print_data);
return;
@@ -929,12 +949,13 @@ native_print(pe_resource_t *rsc, const char *pre_text, long options,
common_print(rsc, pre_text, rsc_printable_id(rsc), node, options, print_data);
}
-PCMK__OUTPUT_ARGS("primitive", "uint32_t", "pe_resource_t *", "GList *", "GList *")
+PCMK__OUTPUT_ARGS("primitive", "uint32_t", "pcmk_resource_t *", "GList *",
+ "GList *")
int
pe__resource_xml(pcmk__output_t *out, va_list args)
{
uint32_t show_opts = va_arg(args, uint32_t);
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
GList *only_node G_GNUC_UNUSED = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
@@ -956,7 +977,7 @@ pe__resource_xml(pcmk__output_t *out, va_list args)
target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
}
- CRM_ASSERT(rsc->variant == pe_native);
+ CRM_ASSERT(rsc->variant == pcmk_rsc_variant_primitive);
if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
return pcmk_rc_no_output;
@@ -979,12 +1000,12 @@ pe__resource_xml(pcmk__output_t *out, va_list args)
"role", rsc_state,
"target_role", target_role,
"active", pcmk__btoa(rsc->fns->active(rsc, TRUE)),
- "orphaned", pe__rsc_bool_str(rsc, pe_rsc_orphan),
- "blocked", pe__rsc_bool_str(rsc, pe_rsc_block),
- "maintenance", pe__rsc_bool_str(rsc, pe_rsc_maintenance),
- "managed", pe__rsc_bool_str(rsc, pe_rsc_managed),
- "failed", pe__rsc_bool_str(rsc, pe_rsc_failed),
- "failure_ignored", pe__rsc_bool_str(rsc, pe_rsc_failure_ignored),
+ "orphaned", pe__rsc_bool_str(rsc, pcmk_rsc_removed),
+ "blocked", pe__rsc_bool_str(rsc, pcmk_rsc_blocked),
+ "maintenance", pe__rsc_bool_str(rsc, pcmk_rsc_maintenance),
+ "managed", pe__rsc_bool_str(rsc, pcmk_rsc_managed),
+ "failed", pe__rsc_bool_str(rsc, pcmk_rsc_failed),
+ "failure_ignored", pe__rsc_bool_str(rsc, pcmk_rsc_ignore_failure),
"nodes_running_on", nodes_running_on,
"pending", (print_pending? native_pending_task(rsc) : NULL),
"locked_to", lock_node_name,
@@ -997,7 +1018,7 @@ pe__resource_xml(pcmk__output_t *out, va_list args)
GList *gIter = rsc->running_on;
for (; gIter != NULL; gIter = gIter->next) {
- pe_node_t *node = (pe_node_t *) gIter->data;
+ pcmk_node_t *node = (pcmk_node_t *) gIter->data;
rc = pe__name_and_nvpairs_xml(out, false, "node", 3,
"name", node->details->uname,
@@ -1011,22 +1032,23 @@ pe__resource_xml(pcmk__output_t *out, va_list args)
return rc;
}
-PCMK__OUTPUT_ARGS("primitive", "uint32_t", "pe_resource_t *", "GList *", "GList *")
+PCMK__OUTPUT_ARGS("primitive", "uint32_t", "pcmk_resource_t *", "GList *",
+ "GList *")
int
pe__resource_html(pcmk__output_t *out, va_list args)
{
uint32_t show_opts = va_arg(args, uint32_t);
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
GList *only_node G_GNUC_UNUSED = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
- const pe_node_t *node = pe__current_node(rsc);
+ const pcmk_node_t *node = pe__current_node(rsc);
if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
return pcmk_rc_no_output;
}
- CRM_ASSERT(rsc->variant == pe_native);
+ CRM_ASSERT(rsc->variant == pcmk_rsc_variant_primitive);
if (node == NULL) {
// This is set only if a non-probe action is pending on this node
@@ -1035,18 +1057,19 @@ pe__resource_html(pcmk__output_t *out, va_list args)
return pe__common_output_html(out, rsc, rsc_printable_id(rsc), node, show_opts);
}
-PCMK__OUTPUT_ARGS("primitive", "uint32_t", "pe_resource_t *", "GList *", "GList *")
+PCMK__OUTPUT_ARGS("primitive", "uint32_t", "pcmk_resource_t *", "GList *",
+ "GList *")
int
pe__resource_text(pcmk__output_t *out, va_list args)
{
uint32_t show_opts = va_arg(args, uint32_t);
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
GList *only_node G_GNUC_UNUSED = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
- const pe_node_t *node = pe__current_node(rsc);
+ const pcmk_node_t *node = pe__current_node(rsc);
- CRM_ASSERT(rsc->variant == pe_native);
+ CRM_ASSERT(rsc->variant == pcmk_rsc_variant_primitive);
if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
return pcmk_rc_no_output;
@@ -1060,14 +1083,14 @@ pe__resource_text(pcmk__output_t *out, va_list args)
}
void
-native_free(pe_resource_t * rsc)
+native_free(pcmk_resource_t * rsc)
{
pe_rsc_trace(rsc, "Freeing resource action list (not the data)");
common_free(rsc);
}
enum rsc_role_e
-native_resource_state(const pe_resource_t * rsc, gboolean current)
+native_resource_state(const pcmk_resource_t * rsc, gboolean current)
{
enum rsc_role_e role = rsc->next_role;
@@ -1089,17 +1112,18 @@ native_resource_state(const pe_resource_t * rsc, gboolean current)
*
* \return If list contains only one node, that node, or NULL otherwise
*/
-pe_node_t *
-native_location(const pe_resource_t *rsc, GList **list, int current)
+pcmk_node_t *
+native_location(const pcmk_resource_t *rsc, GList **list, int current)
{
- pe_node_t *one = NULL;
+ // @COMPAT: Accept a pcmk__rsc_node argument instead of int current
+ pcmk_node_t *one = NULL;
GList *result = NULL;
if (rsc->children) {
GList *gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *child = (pcmk_resource_t *) gIter->data;
child->fns->location(child, &result, current);
}
@@ -1126,7 +1150,7 @@ native_location(const pe_resource_t *rsc, GList **list, int current)
GList *gIter = result;
for (; gIter != NULL; gIter = gIter->next) {
- pe_node_t *node = (pe_node_t *) gIter->data;
+ pcmk_node_t *node = (pcmk_node_t *) gIter->data;
if (*list == NULL || pe_find_node_id(*list, node->details->id) == NULL) {
*list = g_list_append(*list, node);
@@ -1144,7 +1168,7 @@ get_rscs_brief(GList *rsc_list, GHashTable * rsc_table, GHashTable * active_tabl
GList *gIter = rsc_list;
for (; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *rsc = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) gIter->data;
const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
const char *kind = crm_element_value(rsc->xml, XML_ATTR_TYPE);
@@ -1155,7 +1179,7 @@ get_rscs_brief(GList *rsc_list, GHashTable * rsc_table, GHashTable * active_tabl
int *rsc_counter = NULL;
int *active_counter = NULL;
- if (rsc->variant != pe_native) {
+ if (rsc->variant != pcmk_rsc_variant_primitive) {
continue;
}
@@ -1185,11 +1209,11 @@ get_rscs_brief(GList *rsc_list, GHashTable * rsc_table, GHashTable * active_tabl
GList *gIter2 = rsc->running_on;
for (; gIter2 != NULL; gIter2 = gIter2->next) {
- pe_node_t *node = (pe_node_t *) gIter2->data;
+ pcmk_node_t *node = (pcmk_node_t *) gIter2->data;
GHashTable *node_table = NULL;
if (node->details->unclean == FALSE && node->details->online == FALSE &&
- pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
continue;
}
@@ -1398,17 +1422,32 @@ pe__rscs_brief_output(pcmk__output_t *out, GList *rsc_list, uint32_t show_opts)
}
gboolean
-pe__native_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
+pe__native_is_filtered(const pcmk_resource_t *rsc, GList *only_rsc,
gboolean check_parent)
{
if (pcmk__str_in_list(rsc_printable_id(rsc), only_rsc, pcmk__str_star_matches) ||
pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches)) {
return FALSE;
} else if (check_parent && rsc->parent) {
- const pe_resource_t *up = pe__const_top_resource(rsc, true);
+ const pcmk_resource_t *up = pe__const_top_resource(rsc, true);
return up->fns->is_filtered(up, only_rsc, FALSE);
}
return TRUE;
}
+
+/*!
+ * \internal
+ * \brief Get maximum primitive resource instances per node
+ *
+ * \param[in] rsc Primitive resource to check
+ *
+ * \return Maximum number of \p rsc instances that can be active on one node
+ */
+unsigned int
+pe__primitive_max_per_node(const pcmk_resource_t *rsc)
+{
+ CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_primitive));
+ return 1U;
+}
diff --git a/lib/pengine/pe_actions.c b/lib/pengine/pe_actions.c
index ed7f0da..aaa6598 100644
--- a/lib/pengine/pe_actions.c
+++ b/lib/pengine/pe_actions.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2004-2022 the Pacemaker project contributors
+ * Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -14,29 +14,30 @@
#include <crm/crm.h>
#include <crm/msg_xml.h>
+#include <crm/common/scheduler_internal.h>
#include <crm/pengine/internal.h>
+#include <crm/common/xml_internal.h>
#include "pe_status_private.h"
-static void unpack_operation(pe_action_t *action, const xmlNode *xml_obj,
- const pe_resource_t *container,
- pe_working_set_t *data_set, guint interval_ms);
+static void unpack_operation(pcmk_action_t *action, const xmlNode *xml_obj,
+ guint interval_ms);
static void
-add_singleton(pe_working_set_t *data_set, pe_action_t *action)
+add_singleton(pcmk_scheduler_t *scheduler, pcmk_action_t *action)
{
- if (data_set->singletons == NULL) {
- data_set->singletons = pcmk__strkey_table(NULL, NULL);
+ if (scheduler->singletons == NULL) {
+ scheduler->singletons = pcmk__strkey_table(NULL, NULL);
}
- g_hash_table_insert(data_set->singletons, action->uuid, action);
+ g_hash_table_insert(scheduler->singletons, action->uuid, action);
}
-static pe_action_t *
-lookup_singleton(pe_working_set_t *data_set, const char *action_uuid)
+static pcmk_action_t *
+lookup_singleton(pcmk_scheduler_t *scheduler, const char *action_uuid)
{
- if (data_set->singletons == NULL) {
+ if (scheduler->singletons == NULL) {
return NULL;
}
- return g_hash_table_lookup(data_set->singletons, action_uuid);
+ return g_hash_table_lookup(scheduler->singletons, action_uuid);
}
/*!
@@ -46,21 +47,21 @@ lookup_singleton(pe_working_set_t *data_set, const char *action_uuid)
* \param[in] key Action key to match
* \param[in] rsc Resource to match (if any)
* \param[in] node Node to match (if any)
- * \param[in] data_set Cluster working set
+ * \param[in] scheduler Scheduler data
*
* \return Existing action that matches arguments (or NULL if none)
*/
-static pe_action_t *
-find_existing_action(const char *key, const pe_resource_t *rsc,
- const pe_node_t *node, const pe_working_set_t *data_set)
+static pcmk_action_t *
+find_existing_action(const char *key, const pcmk_resource_t *rsc,
+ const pcmk_node_t *node, const pcmk_scheduler_t *scheduler)
{
GList *matches = NULL;
- pe_action_t *action = NULL;
+ pcmk_action_t *action = NULL;
- /* When rsc is NULL, it would be quicker to check data_set->singletons,
- * but checking all data_set->actions takes the node into account.
+ /* When rsc is NULL, it would be quicker to check scheduler->singletons,
+ * but checking all scheduler->actions takes the node into account.
*/
- matches = find_actions(((rsc == NULL)? data_set->actions : rsc->actions),
+ matches = find_actions(((rsc == NULL)? scheduler->actions : rsc->actions),
key, node);
if (matches == NULL) {
return NULL;
@@ -72,79 +73,78 @@ find_existing_action(const char *key, const pe_resource_t *rsc,
return action;
}
+/*!
+ * \internal
+ * \brief Find the XML configuration corresponding to a specific action key
+ *
+ * \param[in] rsc Resource to find action configuration for
+ * \param[in] key "RSC_ACTION_INTERVAL" of action to find
+ * \param[in] include_disabled If false, do not return disabled actions
+ *
+ * \return XML configuration of desired action if any, otherwise NULL
+ */
static xmlNode *
-find_rsc_op_entry_helper(const pe_resource_t *rsc, const char *key,
- gboolean include_disabled)
+find_exact_action_config(const pcmk_resource_t *rsc, const char *action_name,
+ guint interval_ms, bool include_disabled)
{
- guint interval_ms = 0;
- gboolean do_retry = TRUE;
- char *local_key = NULL;
- const char *name = NULL;
- const char *interval_spec = NULL;
- char *match_key = NULL;
- xmlNode *op = NULL;
- xmlNode *operation = NULL;
-
- retry:
- for (operation = pcmk__xe_first_child(rsc->ops_xml); operation != NULL;
- operation = pcmk__xe_next(operation)) {
+ for (xmlNode *operation = first_named_child(rsc->ops_xml, XML_ATTR_OP);
+ operation != NULL; operation = crm_next_same_xml(operation)) {
- if (pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) {
- bool enabled = false;
+ bool enabled = false;
+ const char *config_name = NULL;
+ const char *interval_spec = NULL;
- name = crm_element_value(operation, "name");
- interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
- if (!include_disabled && pcmk__xe_get_bool_attr(operation, "enabled", &enabled) == pcmk_rc_ok &&
- !enabled) {
- continue;
- }
-
- interval_ms = crm_parse_interval_spec(interval_spec);
- match_key = pcmk__op_key(rsc->id, name, interval_ms);
- if (pcmk__str_eq(key, match_key, pcmk__str_casei)) {
- op = operation;
- }
- free(match_key);
-
- if (rsc->clone_name) {
- match_key = pcmk__op_key(rsc->clone_name, name, interval_ms);
- if (pcmk__str_eq(key, match_key, pcmk__str_casei)) {
- op = operation;
- }
- free(match_key);
- }
-
- if (op != NULL) {
- free(local_key);
- return op;
- }
+ // @TODO This does not consider rules, defaults, etc.
+ if (!include_disabled
+ && (pcmk__xe_get_bool_attr(operation, "enabled",
+ &enabled) == pcmk_rc_ok) && !enabled) {
+ continue;
}
- }
-
- free(local_key);
- if (do_retry == FALSE) {
- return NULL;
- }
- do_retry = FALSE;
- if (strstr(key, CRMD_ACTION_MIGRATE) || strstr(key, CRMD_ACTION_MIGRATED)) {
- local_key = pcmk__op_key(rsc->id, "migrate", 0);
- key = local_key;
- goto retry;
+ interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
+ if (crm_parse_interval_spec(interval_spec) != interval_ms) {
+ continue;
+ }
- } else if (strstr(key, "_notify_")) {
- local_key = pcmk__op_key(rsc->id, "notify", 0);
- key = local_key;
- goto retry;
+ config_name = crm_element_value(operation, "name");
+ if (pcmk__str_eq(action_name, config_name, pcmk__str_none)) {
+ return operation;
+ }
}
-
return NULL;
}
+/*!
+ * \internal
+ * \brief Find the XML configuration of a resource action
+ *
+ * \param[in] rsc Resource to find action configuration for
+ * \param[in] action_name Action name to search for
+ * \param[in] interval_ms Action interval (in milliseconds) to search for
+ * \param[in] include_disabled If false, do not return disabled actions
+ *
+ * \return XML configuration of desired action if any, otherwise NULL
+ */
xmlNode *
-find_rsc_op_entry(const pe_resource_t *rsc, const char *key)
+pcmk__find_action_config(const pcmk_resource_t *rsc, const char *action_name,
+ guint interval_ms, bool include_disabled)
{
- return find_rsc_op_entry_helper(rsc, key, FALSE);
+ xmlNode *action_config = NULL;
+
+ // Try requested action first
+ action_config = find_exact_action_config(rsc, action_name, interval_ms,
+ include_disabled);
+
+ // For migrate_to and migrate_from actions, retry with "migrate"
+ // @TODO This should be either documented or deprecated
+ if ((action_config == NULL)
+ && pcmk__str_any_of(action_name, PCMK_ACTION_MIGRATE_TO,
+ PCMK_ACTION_MIGRATE_FROM, NULL)) {
+ action_config = find_exact_action_config(rsc, "migrate", 0,
+ include_disabled);
+ }
+
+ return action_config;
}
/*!
@@ -156,98 +156,106 @@ find_rsc_op_entry(const pe_resource_t *rsc, const char *key)
* \param[in,out] rsc Resource that action is for (if any)
* \param[in] node Node that action is on (if any)
* \param[in] optional Whether action should be considered optional
- * \param[in] for_graph Whether action should be recorded in transition graph
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*
* \return Newly allocated action
* \note This function takes ownership of \p key. It is the caller's
* responsibility to free the return value with pe_free_action().
*/
-static pe_action_t *
-new_action(char *key, const char *task, pe_resource_t *rsc,
- const pe_node_t *node, bool optional, bool for_graph,
- pe_working_set_t *data_set)
+static pcmk_action_t *
+new_action(char *key, const char *task, pcmk_resource_t *rsc,
+ const pcmk_node_t *node, bool optional, pcmk_scheduler_t *scheduler)
{
- pe_action_t *action = calloc(1, sizeof(pe_action_t));
+ pcmk_action_t *action = calloc(1, sizeof(pcmk_action_t));
CRM_ASSERT(action != NULL);
action->rsc = rsc;
action->task = strdup(task); CRM_ASSERT(action->task != NULL);
action->uuid = key;
- action->extra = pcmk__strkey_table(free, free);
- action->meta = pcmk__strkey_table(free, free);
if (node) {
action->node = pe__copy_node(node);
}
- if (pcmk__str_eq(task, CRM_OP_LRM_DELETE, pcmk__str_casei)) {
+ if (pcmk__str_eq(task, PCMK_ACTION_LRM_DELETE, pcmk__str_casei)) {
// Resource history deletion for a node can be done on the DC
- pe__set_action_flags(action, pe_action_dc);
+ pe__set_action_flags(action, pcmk_action_on_dc);
}
- pe__set_action_flags(action, pe_action_runnable);
+ pe__set_action_flags(action, pcmk_action_runnable);
if (optional) {
- pe__set_action_flags(action, pe_action_optional);
+ pe__set_action_flags(action, pcmk_action_optional);
} else {
- pe__clear_action_flags(action, pe_action_optional);
+ pe__clear_action_flags(action, pcmk_action_optional);
}
- if (rsc != NULL) {
+ if (rsc == NULL) {
+ action->meta = pcmk__strkey_table(free, free);
+ } else {
guint interval_ms = 0;
- action->op_entry = find_rsc_op_entry_helper(rsc, key, TRUE);
parse_op_key(key, NULL, NULL, &interval_ms);
- unpack_operation(action, action->op_entry, rsc->container, data_set,
- interval_ms);
+ action->op_entry = pcmk__find_action_config(rsc, task, interval_ms,
+ true);
+
+ /* If the given key is for one of the many notification pseudo-actions
+ * (pre_notify_promote, etc.), the actual action name is "notify"
+ */
+ if ((action->op_entry == NULL) && (strstr(key, "_notify_") != NULL)) {
+ action->op_entry = find_exact_action_config(rsc, PCMK_ACTION_NOTIFY,
+ 0, true);
+ }
+
+ unpack_operation(action, action->op_entry, interval_ms);
}
- if (for_graph) {
- pe_rsc_trace(rsc, "Created %s action %d (%s): %s for %s on %s",
- (optional? "optional" : "required"),
- data_set->action_id, key, task,
- ((rsc == NULL)? "no resource" : rsc->id),
- pe__node_name(node));
- action->id = data_set->action_id++;
+ pe_rsc_trace(rsc, "Created %s action %d (%s): %s for %s on %s",
+ (optional? "optional" : "required"),
+ scheduler->action_id, key, task,
+ ((rsc == NULL)? "no resource" : rsc->id),
+ pe__node_name(node));
+ action->id = scheduler->action_id++;
- data_set->actions = g_list_prepend(data_set->actions, action);
- if (rsc == NULL) {
- add_singleton(data_set, action);
- } else {
- rsc->actions = g_list_prepend(rsc->actions, action);
- }
+ scheduler->actions = g_list_prepend(scheduler->actions, action);
+ if (rsc == NULL) {
+ add_singleton(scheduler, action);
+ } else {
+ rsc->actions = g_list_prepend(rsc->actions, action);
}
return action;
}
/*!
* \internal
- * \brief Evaluate node attribute values for an action
+ * \brief Unpack a resource's action-specific instance parameters
*
- * \param[in,out] action Action to unpack attributes for
- * \param[in,out] data_set Cluster working set
+ * \param[in] action_xml XML of action's configuration in CIB (if any)
+ * \param[in,out] node_attrs Table of node attributes (for rule evaluation)
+ * \param[in,out] scheduler Cluster working set (for rule evaluation)
+ *
+ * \return Newly allocated hash table of action-specific instance parameters
*/
-static void
-unpack_action_node_attributes(pe_action_t *action, pe_working_set_t *data_set)
+GHashTable *
+pcmk__unpack_action_rsc_params(const xmlNode *action_xml,
+ GHashTable *node_attrs,
+ pcmk_scheduler_t *scheduler)
{
- if (!pcmk_is_set(action->flags, pe_action_have_node_attrs)
- && (action->op_entry != NULL)) {
-
- pe_rule_eval_data_t rule_data = {
- .node_hash = action->node->details->attrs,
- .role = RSC_ROLE_UNKNOWN,
- .now = data_set->now,
- .match_data = NULL,
- .rsc_data = NULL,
- .op_data = NULL
- };
-
- pe__set_action_flags(action, pe_action_have_node_attrs);
- pe__unpack_dataset_nvpairs(action->op_entry, XML_TAG_ATTR_SETS,
- &rule_data, action->extra, NULL,
- FALSE, data_set);
- }
+ GHashTable *params = pcmk__strkey_table(free, free);
+
+ pe_rule_eval_data_t rule_data = {
+ .node_hash = node_attrs,
+ .role = pcmk_role_unknown,
+ .now = scheduler->now,
+ .match_data = NULL,
+ .rsc_data = NULL,
+ .op_data = NULL
+ };
+
+ pe__unpack_dataset_nvpairs(action_xml, XML_TAG_ATTR_SETS,
+ &rule_data, params, NULL,
+ FALSE, scheduler);
+ return params;
}
/*!
@@ -258,46 +266,46 @@ unpack_action_node_attributes(pe_action_t *action, pe_working_set_t *data_set)
* \param[in] optional Requested optional status
*/
static void
-update_action_optional(pe_action_t *action, gboolean optional)
+update_action_optional(pcmk_action_t *action, gboolean optional)
{
// Force a non-recurring action to be optional if its resource is unmanaged
if ((action->rsc != NULL) && (action->node != NULL)
- && !pcmk_is_set(action->flags, pe_action_pseudo)
- && !pcmk_is_set(action->rsc->flags, pe_rsc_managed)
+ && !pcmk_is_set(action->flags, pcmk_action_pseudo)
+ && !pcmk_is_set(action->rsc->flags, pcmk_rsc_managed)
&& (g_hash_table_lookup(action->meta,
XML_LRM_ATTR_INTERVAL_MS) == NULL)) {
pe_rsc_debug(action->rsc, "%s on %s is optional (%s is unmanaged)",
action->uuid, pe__node_name(action->node),
action->rsc->id);
- pe__set_action_flags(action, pe_action_optional);
+ pe__set_action_flags(action, pcmk_action_optional);
// We shouldn't clear runnable here because ... something
// Otherwise require the action if requested
} else if (!optional) {
- pe__clear_action_flags(action, pe_action_optional);
+ pe__clear_action_flags(action, pcmk_action_optional);
}
}
static enum pe_quorum_policy
-effective_quorum_policy(pe_resource_t *rsc, pe_working_set_t *data_set)
+effective_quorum_policy(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler)
{
- enum pe_quorum_policy policy = data_set->no_quorum_policy;
+ enum pe_quorum_policy policy = scheduler->no_quorum_policy;
- if (pcmk_is_set(data_set->flags, pe_flag_have_quorum)) {
- policy = no_quorum_ignore;
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_quorate)) {
+ policy = pcmk_no_quorum_ignore;
- } else if (data_set->no_quorum_policy == no_quorum_demote) {
+ } else if (scheduler->no_quorum_policy == pcmk_no_quorum_demote) {
switch (rsc->role) {
- case RSC_ROLE_PROMOTED:
- case RSC_ROLE_UNPROMOTED:
- if (rsc->next_role > RSC_ROLE_UNPROMOTED) {
- pe__set_next_role(rsc, RSC_ROLE_UNPROMOTED,
+ case pcmk_role_promoted:
+ case pcmk_role_unpromoted:
+ if (rsc->next_role > pcmk_role_unpromoted) {
+ pe__set_next_role(rsc, pcmk_role_unpromoted,
"no-quorum-policy=demote");
}
- policy = no_quorum_ignore;
+ policy = pcmk_no_quorum_ignore;
break;
default:
- policy = no_quorum_stop;
+ policy = pcmk_no_quorum_stop;
break;
}
}
@@ -309,50 +317,47 @@ effective_quorum_policy(pe_resource_t *rsc, pe_working_set_t *data_set)
* \brief Update a resource action's runnable flag
*
* \param[in,out] action Action to update
- * \param[in] for_graph Whether action should be recorded in transition graph
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*
* \note This may also schedule fencing if a stop is unrunnable.
*/
static void
-update_resource_action_runnable(pe_action_t *action, bool for_graph,
- pe_working_set_t *data_set)
+update_resource_action_runnable(pcmk_action_t *action,
+ pcmk_scheduler_t *scheduler)
{
- if (pcmk_is_set(action->flags, pe_action_pseudo)) {
+ if (pcmk_is_set(action->flags, pcmk_action_pseudo)) {
return;
}
if (action->node == NULL) {
pe_rsc_trace(action->rsc, "%s is unrunnable (unallocated)",
action->uuid);
- pe__clear_action_flags(action, pe_action_runnable);
+ pe__clear_action_flags(action, pcmk_action_runnable);
- } else if (!pcmk_is_set(action->flags, pe_action_dc)
+ } else if (!pcmk_is_set(action->flags, pcmk_action_on_dc)
&& !(action->node->details->online)
&& (!pe__is_guest_node(action->node)
|| action->node->details->remote_requires_reset)) {
- pe__clear_action_flags(action, pe_action_runnable);
- do_crm_log((for_graph? LOG_WARNING: LOG_TRACE),
- "%s on %s is unrunnable (node is offline)",
+ pe__clear_action_flags(action, pcmk_action_runnable);
+ do_crm_log(LOG_WARNING, "%s on %s is unrunnable (node is offline)",
action->uuid, pe__node_name(action->node));
- if (pcmk_is_set(action->rsc->flags, pe_rsc_managed)
- && for_graph
- && pcmk__str_eq(action->task, CRMD_ACTION_STOP, pcmk__str_casei)
+ if (pcmk_is_set(action->rsc->flags, pcmk_rsc_managed)
+ && pcmk__str_eq(action->task, PCMK_ACTION_STOP, pcmk__str_casei)
&& !(action->node->details->unclean)) {
- pe_fence_node(data_set, action->node, "stop is unrunnable", false);
+ pe_fence_node(scheduler, action->node, "stop is unrunnable", false);
}
- } else if (!pcmk_is_set(action->flags, pe_action_dc)
+ } else if (!pcmk_is_set(action->flags, pcmk_action_on_dc)
&& action->node->details->pending) {
- pe__clear_action_flags(action, pe_action_runnable);
- do_crm_log((for_graph? LOG_WARNING: LOG_TRACE),
+ pe__clear_action_flags(action, pcmk_action_runnable);
+ do_crm_log(LOG_WARNING,
"Action %s on %s is unrunnable (node is pending)",
action->uuid, pe__node_name(action->node));
- } else if (action->needs == rsc_req_nothing) {
+ } else if (action->needs == pcmk_requires_nothing) {
pe_action_set_reason(action, NULL, TRUE);
if (pe__is_guest_node(action->node)
- && !pe_can_fence(data_set, action->node)) {
+ && !pe_can_fence(scheduler, action->node)) {
/* An action that requires nothing usually does not require any
* fencing in order to be runnable. However, there is an exception:
* such an action cannot be completed if it is on a guest node whose
@@ -361,37 +366,37 @@ update_resource_action_runnable(pe_action_t *action, bool for_graph,
pe_rsc_debug(action->rsc, "%s on %s is unrunnable "
"(node's host cannot be fenced)",
action->uuid, pe__node_name(action->node));
- pe__clear_action_flags(action, pe_action_runnable);
+ pe__clear_action_flags(action, pcmk_action_runnable);
} else {
pe_rsc_trace(action->rsc,
"%s on %s does not require fencing or quorum",
action->uuid, pe__node_name(action->node));
- pe__set_action_flags(action, pe_action_runnable);
+ pe__set_action_flags(action, pcmk_action_runnable);
}
} else {
- switch (effective_quorum_policy(action->rsc, data_set)) {
- case no_quorum_stop:
+ switch (effective_quorum_policy(action->rsc, scheduler)) {
+ case pcmk_no_quorum_stop:
pe_rsc_debug(action->rsc, "%s on %s is unrunnable (no quorum)",
action->uuid, pe__node_name(action->node));
- pe__clear_action_flags(action, pe_action_runnable);
+ pe__clear_action_flags(action, pcmk_action_runnable);
pe_action_set_reason(action, "no quorum", true);
break;
- case no_quorum_freeze:
+ case pcmk_no_quorum_freeze:
if (!action->rsc->fns->active(action->rsc, TRUE)
|| (action->rsc->next_role > action->rsc->role)) {
pe_rsc_debug(action->rsc,
"%s on %s is unrunnable (no quorum)",
action->uuid, pe__node_name(action->node));
- pe__clear_action_flags(action, pe_action_runnable);
+ pe__clear_action_flags(action, pcmk_action_runnable);
pe_action_set_reason(action, "quorum freeze", true);
}
break;
default:
//pe_action_set_reason(action, NULL, TRUE);
- pe__set_action_flags(action, pe_action_runnable);
+ pe__set_action_flags(action, pcmk_action_runnable);
break;
}
}
@@ -405,19 +410,20 @@ update_resource_action_runnable(pe_action_t *action, bool for_graph,
* \param[in] action New action
*/
static void
-update_resource_flags_for_action(pe_resource_t *rsc, const pe_action_t *action)
+update_resource_flags_for_action(pcmk_resource_t *rsc,
+ const pcmk_action_t *action)
{
- /* @COMPAT pe_rsc_starting and pe_rsc_stopping are not actually used
- * within Pacemaker, and should be deprecated and eventually removed
+ /* @COMPAT pcmk_rsc_starting and pcmk_rsc_stopping are deprecated and unused
+ * within Pacemaker, and will eventually be removed
*/
- if (pcmk__str_eq(action->task, CRMD_ACTION_STOP, pcmk__str_casei)) {
- pe__set_resource_flags(rsc, pe_rsc_stopping);
+ if (pcmk__str_eq(action->task, PCMK_ACTION_STOP, pcmk__str_casei)) {
+ pe__set_resource_flags(rsc, pcmk_rsc_stopping);
- } else if (pcmk__str_eq(action->task, CRMD_ACTION_START, pcmk__str_casei)) {
- if (pcmk_is_set(action->flags, pe_action_runnable)) {
- pe__set_resource_flags(rsc, pe_rsc_starting);
+ } else if (pcmk__str_eq(action->task, PCMK_ACTION_START, pcmk__str_casei)) {
+ if (pcmk_is_set(action->flags, pcmk_action_runnable)) {
+ pe__set_resource_flags(rsc, pcmk_rsc_starting);
} else {
- pe__clear_resource_flags(rsc, pe_rsc_starting);
+ pe__clear_resource_flags(rsc, pcmk_rsc_starting);
}
}
}
@@ -428,80 +434,121 @@ valid_stop_on_fail(const char *value)
return !pcmk__strcase_any_of(value, "standby", "demote", "stop", NULL);
}
-static const char *
-unpack_operation_on_fail(pe_action_t * action)
+/*!
+ * \internal
+ * \brief Validate (and possibly reset) resource action's on_fail meta-attribute
+ *
+ * \param[in] rsc Resource that action is for
+ * \param[in] action_name Action name
+ * \param[in] action_config Action configuration XML from CIB (if any)
+ * \param[in,out] meta Table of action meta-attributes
+ */
+static void
+validate_on_fail(const pcmk_resource_t *rsc, const char *action_name,
+ const xmlNode *action_config, GHashTable *meta)
{
const char *name = NULL;
const char *role = NULL;
- const char *on_fail = NULL;
const char *interval_spec = NULL;
- const char *value = g_hash_table_lookup(action->meta, XML_OP_ATTR_ON_FAIL);
+ const char *value = g_hash_table_lookup(meta, XML_OP_ATTR_ON_FAIL);
+ char *key = NULL;
+ char *new_value = NULL;
- if (pcmk__str_eq(action->task, CRMD_ACTION_STOP, pcmk__str_casei)
+ // Stop actions can only use certain on-fail values
+ if (pcmk__str_eq(action_name, PCMK_ACTION_STOP, pcmk__str_none)
&& !valid_stop_on_fail(value)) {
pcmk__config_err("Resetting '" XML_OP_ATTR_ON_FAIL "' for %s stop "
"action to default value because '%s' is not "
- "allowed for stop", action->rsc->id, value);
- return NULL;
-
- } else if (pcmk__str_eq(action->task, CRMD_ACTION_DEMOTE, pcmk__str_casei) && !value) {
- // demote on_fail defaults to monitor value for promoted role if present
- xmlNode *operation = NULL;
+ "allowed for stop", rsc->id, value);
+ g_hash_table_remove(meta, XML_OP_ATTR_ON_FAIL);
+ return;
+ }
- CRM_CHECK(action->rsc != NULL, return NULL);
+ /* Demote actions default on-fail to the on-fail value for the first
+ * recurring monitor for the promoted role (if any).
+ */
+ if (pcmk__str_eq(action_name, PCMK_ACTION_DEMOTE, pcmk__str_none)
+ && (value == NULL)) {
- for (operation = pcmk__xe_first_child(action->rsc->ops_xml);
- (operation != NULL) && (value == NULL);
- operation = pcmk__xe_next(operation)) {
+ /* @TODO This does not consider promote options set in a meta-attribute
+ * block (which may have rules that need to be evaluated) rather than
+ * XML properties.
+ */
+ for (xmlNode *operation = first_named_child(rsc->ops_xml, XML_ATTR_OP);
+ operation != NULL; operation = crm_next_same_xml(operation)) {
bool enabled = false;
+ const char *promote_on_fail = NULL;
- if (!pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) {
+ /* We only care about explicit on-fail (if promote uses default, so
+ * can demote)
+ */
+ promote_on_fail = crm_element_value(operation, XML_OP_ATTR_ON_FAIL);
+ if (promote_on_fail == NULL) {
continue;
}
+
+ // We only care about recurring monitors for the promoted role
name = crm_element_value(operation, "name");
role = crm_element_value(operation, "role");
- on_fail = crm_element_value(operation, XML_OP_ATTR_ON_FAIL);
- interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
- if (!on_fail) {
- continue;
- } else if (pcmk__xe_get_bool_attr(operation, "enabled", &enabled) == pcmk_rc_ok && !enabled) {
+ if (!pcmk__str_eq(name, PCMK_ACTION_MONITOR, pcmk__str_none)
+ || !pcmk__strcase_any_of(role, PCMK__ROLE_PROMOTED,
+ PCMK__ROLE_PROMOTED_LEGACY, NULL)) {
continue;
- } else if (!pcmk__str_eq(name, "monitor", pcmk__str_casei)
- || !pcmk__strcase_any_of(role, RSC_ROLE_PROMOTED_S,
- RSC_ROLE_PROMOTED_LEGACY_S,
- NULL)) {
+ }
+ interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
+ if (crm_parse_interval_spec(interval_spec) == 0) {
continue;
- } else if (crm_parse_interval_spec(interval_spec) == 0) {
+ }
+
+ // We only care about enabled monitors
+ if ((pcmk__xe_get_bool_attr(operation, "enabled",
+ &enabled) == pcmk_rc_ok) && !enabled) {
continue;
- } else if (pcmk__str_eq(on_fail, "demote", pcmk__str_casei)) {
+ }
+
+ // Demote actions can't default to on-fail="demote"
+ if (pcmk__str_eq(promote_on_fail, "demote", pcmk__str_casei)) {
continue;
}
- value = on_fail;
+ // Use value from first applicable promote action found
+ key = strdup(XML_OP_ATTR_ON_FAIL);
+ new_value = strdup(promote_on_fail);
+ CRM_ASSERT((key != NULL) && (new_value != NULL));
+ g_hash_table_insert(meta, key, new_value);
}
- } else if (pcmk__str_eq(action->task, CRM_OP_LRM_DELETE, pcmk__str_casei)) {
- value = "ignore";
+ return;
+ }
- } else if (pcmk__str_eq(value, "demote", pcmk__str_casei)) {
- name = crm_element_value(action->op_entry, "name");
- role = crm_element_value(action->op_entry, "role");
- interval_spec = crm_element_value(action->op_entry,
+ if (pcmk__str_eq(action_name, PCMK_ACTION_LRM_DELETE, pcmk__str_none)
+ && !pcmk__str_eq(value, "ignore", pcmk__str_casei)) {
+ key = strdup(XML_OP_ATTR_ON_FAIL);
+ new_value = strdup("ignore");
+ CRM_ASSERT((key != NULL) && (new_value != NULL));
+ g_hash_table_insert(meta, key, new_value);
+ return;
+ }
+
+ // on-fail="demote" is allowed only for certain actions
+ if (pcmk__str_eq(value, "demote", pcmk__str_casei)) {
+ name = crm_element_value(action_config, "name");
+ role = crm_element_value(action_config, "role");
+ interval_spec = crm_element_value(action_config,
XML_LRM_ATTR_INTERVAL);
- if (!pcmk__str_eq(name, CRMD_ACTION_PROMOTE, pcmk__str_casei)
- && (!pcmk__str_eq(name, CRMD_ACTION_STATUS, pcmk__str_casei)
- || !pcmk__strcase_any_of(role, RSC_ROLE_PROMOTED_S,
- RSC_ROLE_PROMOTED_LEGACY_S, NULL)
+ if (!pcmk__str_eq(name, PCMK_ACTION_PROMOTE, pcmk__str_none)
+ && (!pcmk__str_eq(name, PCMK_ACTION_MONITOR, pcmk__str_none)
+ || !pcmk__strcase_any_of(role, PCMK__ROLE_PROMOTED,
+ PCMK__ROLE_PROMOTED_LEGACY, NULL)
|| (crm_parse_interval_spec(interval_spec) == 0))) {
pcmk__config_err("Resetting '" XML_OP_ATTR_ON_FAIL "' for %s %s "
"action to default value because 'demote' is not "
- "allowed for it", action->rsc->id, name);
- return NULL;
+ "allowed for it", rsc->id, name);
+ g_hash_table_remove(meta, XML_OP_ATTR_ON_FAIL);
+ return;
}
}
-
- return value;
}
static int
@@ -510,7 +557,7 @@ unpack_timeout(const char *value)
int timeout_ms = crm_get_msec(value);
if (timeout_ms < 0) {
- timeout_ms = crm_get_msec(CRM_DEFAULT_OP_TIMEOUT_S);
+ timeout_ms = PCMK_DEFAULT_ACTION_TIMEOUT_MS;
}
return timeout_ms;
}
@@ -579,346 +626,475 @@ unpack_start_delay(const char *value, GHashTable *meta)
return start_delay;
}
+/*!
+ * \internal
+ * \brief Find a resource's most frequent recurring monitor
+ *
+ * \param[in] rsc Resource to check
+ *
+ * \return Operation XML configured for most frequent recurring monitor for
+ * \p rsc (if any)
+ */
static xmlNode *
-find_min_interval_mon(pe_resource_t * rsc, gboolean include_disabled)
+most_frequent_monitor(const pcmk_resource_t *rsc)
{
- guint interval_ms = 0;
guint min_interval_ms = G_MAXUINT;
- const char *name = NULL;
- const char *interval_spec = NULL;
xmlNode *op = NULL;
- xmlNode *operation = NULL;
-
- for (operation = pcmk__xe_first_child(rsc->ops_xml);
- operation != NULL;
- operation = pcmk__xe_next(operation)) {
- if (pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) {
- bool enabled = false;
-
- name = crm_element_value(operation, "name");
- interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
- if (!include_disabled && pcmk__xe_get_bool_attr(operation, "enabled", &enabled) == pcmk_rc_ok &&
- !enabled) {
- continue;
- }
+ for (xmlNode *operation = first_named_child(rsc->ops_xml, XML_ATTR_OP);
+ operation != NULL; operation = crm_next_same_xml(operation)) {
+ bool enabled = false;
+ guint interval_ms = 0;
+ const char *interval_spec = crm_element_value(operation,
+ XML_LRM_ATTR_INTERVAL);
- if (!pcmk__str_eq(name, RSC_STATUS, pcmk__str_casei)) {
- continue;
- }
+ // We only care about enabled recurring monitors
+ if (!pcmk__str_eq(crm_element_value(operation, "name"),
+ PCMK_ACTION_MONITOR, pcmk__str_none)) {
+ continue;
+ }
+ interval_ms = crm_parse_interval_spec(interval_spec);
+ if (interval_ms == 0) {
+ continue;
+ }
- interval_ms = crm_parse_interval_spec(interval_spec);
+ // @TODO This does not account for rules, defaults, etc.
+ if ((pcmk__xe_get_bool_attr(operation, "enabled",
+ &enabled) == pcmk_rc_ok) && !enabled) {
+ continue;
+ }
- if (interval_ms && (interval_ms < min_interval_ms)) {
- min_interval_ms = interval_ms;
- op = operation;
- }
+ if (interval_ms < min_interval_ms) {
+ min_interval_ms = interval_ms;
+ op = operation;
}
}
-
return op;
}
/*!
- * \brief Unpack operation XML into an action structure
+ * \internal
+ * \brief Unpack action meta-attributes
*
- * Unpack an operation's meta-attributes (normalizing the interval, timeout,
- * and start delay values as integer milliseconds), requirements, and
- * failure policy.
+ * \param[in,out] rsc Resource that action is for
+ * \param[in] node Node that action is on
+ * \param[in] action_name Action name
+ * \param[in] interval_ms Action interval (in milliseconds)
+ * \param[in] action_config Action XML configuration from CIB (if any)
*
- * \param[in,out] action Action to unpack into
- * \param[in] xml_obj Operation XML (or NULL if all defaults)
- * \param[in] container Resource that contains affected resource, if any
- * \param[in,out] data_set Cluster state
- * \param[in] interval_ms How frequently to perform the operation
+ * Unpack a resource action's meta-attributes (normalizing the interval,
+ * timeout, and start delay values as integer milliseconds) from its CIB XML
+ * configuration (including defaults).
+ *
+ * \return Newly allocated hash table with normalized action meta-attributes
*/
-static void
-unpack_operation(pe_action_t *action, const xmlNode *xml_obj,
- const pe_resource_t *container,
- pe_working_set_t *data_set, guint interval_ms)
+GHashTable *
+pcmk__unpack_action_meta(pcmk_resource_t *rsc, const pcmk_node_t *node,
+ const char *action_name, guint interval_ms,
+ const xmlNode *action_config)
{
- int timeout_ms = 0;
- const char *value = NULL;
- bool is_probe = false;
+ GHashTable *meta = NULL;
+ char *name = NULL;
+ char *value = NULL;
+ const char *timeout_spec = NULL;
+ const char *str = NULL;
pe_rsc_eval_data_t rsc_rule_data = {
- .standard = crm_element_value(action->rsc->xml, XML_AGENT_ATTR_CLASS),
- .provider = crm_element_value(action->rsc->xml, XML_AGENT_ATTR_PROVIDER),
- .agent = crm_element_value(action->rsc->xml, XML_EXPR_ATTR_TYPE)
+ .standard = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS),
+ .provider = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER),
+ .agent = crm_element_value(rsc->xml, XML_EXPR_ATTR_TYPE),
};
pe_op_eval_data_t op_rule_data = {
- .op_name = action->task,
- .interval = interval_ms
+ .op_name = action_name,
+ .interval = interval_ms,
};
pe_rule_eval_data_t rule_data = {
- .node_hash = NULL,
- .role = RSC_ROLE_UNKNOWN,
- .now = data_set->now,
+ .node_hash = (node == NULL)? NULL : node->details->attrs,
+ .role = pcmk_role_unknown,
+ .now = rsc->cluster->now,
.match_data = NULL,
.rsc_data = &rsc_rule_data,
- .op_data = &op_rule_data
+ .op_data = &op_rule_data,
};
- CRM_CHECK(action && action->rsc, return);
-
- is_probe = pcmk_is_probe(action->task, interval_ms);
+ meta = pcmk__strkey_table(free, free);
// Cluster-wide <op_defaults> <meta_attributes>
- pe__unpack_dataset_nvpairs(data_set->op_defaults, XML_TAG_META_SETS, &rule_data,
- action->meta, NULL, FALSE, data_set);
-
- // Determine probe default timeout differently
- if (is_probe) {
- xmlNode *min_interval_mon = find_min_interval_mon(action->rsc, FALSE);
-
- if (min_interval_mon) {
- value = crm_element_value(min_interval_mon, XML_ATTR_TIMEOUT);
- if (value) {
- crm_trace("\t%s: Setting default timeout to minimum-interval "
- "monitor's timeout '%s'", action->uuid, value);
- g_hash_table_replace(action->meta, strdup(XML_ATTR_TIMEOUT),
- strdup(value));
+ pe__unpack_dataset_nvpairs(rsc->cluster->op_defaults, XML_TAG_META_SETS,
+ &rule_data, meta, NULL, FALSE, rsc->cluster);
+
+ // Derive default timeout for probes from recurring monitor timeouts
+ if (pcmk_is_probe(action_name, interval_ms)) {
+ xmlNode *min_interval_mon = most_frequent_monitor(rsc);
+
+ if (min_interval_mon != NULL) {
+ /* @TODO This does not consider timeouts set in meta_attributes
+ * blocks (which may also have rules that need to be evaluated).
+ */
+ timeout_spec = crm_element_value(min_interval_mon,
+ XML_ATTR_TIMEOUT);
+ if (timeout_spec != NULL) {
+ pe_rsc_trace(rsc,
+ "Setting default timeout for %s probe to "
+ "most frequent monitor's timeout '%s'",
+ rsc->id, timeout_spec);
+ name = strdup(XML_ATTR_TIMEOUT);
+ value = strdup(timeout_spec);
+ CRM_ASSERT((name != NULL) && (value != NULL));
+ g_hash_table_insert(meta, name, value);
}
}
}
- if (xml_obj) {
- xmlAttrPtr xIter = NULL;
-
+ if (action_config != NULL) {
// <op> <meta_attributes> take precedence over defaults
- pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_META_SETS, &rule_data,
- action->meta, NULL, TRUE, data_set);
+ pe__unpack_dataset_nvpairs(action_config, XML_TAG_META_SETS, &rule_data,
+ meta, NULL, TRUE, rsc->cluster);
/* Anything set as an <op> XML property has highest precedence.
* This ensures we use the name and interval from the <op> tag.
+ * (See below for the only exception, fence device start/probe timeout.)
*/
- for (xIter = xml_obj->properties; xIter; xIter = xIter->next) {
- const char *prop_name = (const char *)xIter->name;
- const char *prop_value = crm_element_value(xml_obj, prop_name);
+ for (xmlAttrPtr attr = action_config->properties;
+ attr != NULL; attr = attr->next) {
+ name = strdup((const char *) attr->name);
+ value = strdup(pcmk__xml_attr_value(attr));
- g_hash_table_replace(action->meta, strdup(prop_name), strdup(prop_value));
+ CRM_ASSERT((name != NULL) && (value != NULL));
+ g_hash_table_insert(meta, name, value);
}
}
- g_hash_table_remove(action->meta, "id");
+ g_hash_table_remove(meta, XML_ATTR_ID);
// Normalize interval to milliseconds
if (interval_ms > 0) {
- g_hash_table_replace(action->meta, strdup(XML_LRM_ATTR_INTERVAL),
- crm_strdup_printf("%u", interval_ms));
+ name = strdup(XML_LRM_ATTR_INTERVAL);
+ CRM_ASSERT(name != NULL);
+ value = crm_strdup_printf("%u", interval_ms);
+ g_hash_table_insert(meta, name, value);
} else {
- g_hash_table_remove(action->meta, XML_LRM_ATTR_INTERVAL);
- }
-
- /*
- * Timeout order of precedence:
- * 1. pcmk_monitor_timeout (if rsc has pcmk_ra_cap_fence_params
- * and task is start or a probe; pcmk_monitor_timeout works
- * by default for a recurring monitor)
- * 2. explicit op timeout on the primitive
- * 3. default op timeout
- * a. if probe, then min-interval monitor's timeout
- * b. else, in XML_CIB_TAG_OPCONFIG
- * 4. CRM_DEFAULT_OP_TIMEOUT_S
- *
- * #1 overrides general rule of <op> XML property having highest
- * precedence.
+ g_hash_table_remove(meta, XML_LRM_ATTR_INTERVAL);
+ }
+
+ /* Timeout order of precedence (highest to lowest):
+ * 1. pcmk_monitor_timeout resource parameter (only for starts and probes
+ * when rsc has pcmk_ra_cap_fence_params; this gets used for recurring
+ * monitors via the executor instead)
+ * 2. timeout configured in <op> (with <op timeout> taking precedence over
+ * <op> <meta_attributes>)
+ * 3. timeout configured in <op_defaults> <meta_attributes>
+ * 4. PCMK_DEFAULT_ACTION_TIMEOUT_MS
*/
+
+ // Check for pcmk_monitor_timeout
if (pcmk_is_set(pcmk_get_ra_caps(rsc_rule_data.standard),
pcmk_ra_cap_fence_params)
- && (pcmk__str_eq(action->task, RSC_START, pcmk__str_casei)
- || is_probe)) {
-
- GHashTable *params = pe_rsc_params(action->rsc, action->node, data_set);
+ && (pcmk__str_eq(action_name, PCMK_ACTION_START, pcmk__str_none)
+ || pcmk_is_probe(action_name, interval_ms))) {
+
+ GHashTable *params = pe_rsc_params(rsc, node, rsc->cluster);
+
+ timeout_spec = g_hash_table_lookup(params, "pcmk_monitor_timeout");
+ if (timeout_spec != NULL) {
+ pe_rsc_trace(rsc,
+ "Setting timeout for %s %s to "
+ "pcmk_monitor_timeout (%s)",
+ rsc->id, action_name, timeout_spec);
+ name = strdup(XML_ATTR_TIMEOUT);
+ value = strdup(timeout_spec);
+ CRM_ASSERT((name != NULL) && (value != NULL));
+ g_hash_table_insert(meta, name, value);
+ }
+ }
- value = g_hash_table_lookup(params, "pcmk_monitor_timeout");
+ // Normalize timeout to positive milliseconds
+ name = strdup(XML_ATTR_TIMEOUT);
+ CRM_ASSERT(name != NULL);
+ timeout_spec = g_hash_table_lookup(meta, XML_ATTR_TIMEOUT);
+ g_hash_table_insert(meta, name, pcmk__itoa(unpack_timeout(timeout_spec)));
+
+ // Ensure on-fail has a valid value
+ validate_on_fail(rsc, action_name, action_config, meta);
+
+ // Normalize start-delay
+ str = g_hash_table_lookup(meta, XML_OP_ATTR_START_DELAY);
+ if (str != NULL) {
+ unpack_start_delay(str, meta);
+ } else {
+ long long start_delay = 0;
- if (value) {
- crm_trace("\t%s: Setting timeout to pcmk_monitor_timeout '%s', "
- "overriding default", action->uuid, value);
- g_hash_table_replace(action->meta, strdup(XML_ATTR_TIMEOUT),
- strdup(value));
+ str = g_hash_table_lookup(meta, XML_OP_ATTR_ORIGIN);
+ if (unpack_interval_origin(str, action_config, interval_ms,
+ rsc->cluster->now, &start_delay)) {
+ name = strdup(XML_OP_ATTR_START_DELAY);
+ CRM_ASSERT(name != NULL);
+ g_hash_table_insert(meta, name,
+ crm_strdup_printf("%lld", start_delay));
}
}
+ return meta;
+}
- // Normalize timeout to positive milliseconds
- value = g_hash_table_lookup(action->meta, XML_ATTR_TIMEOUT);
- timeout_ms = unpack_timeout(value);
- g_hash_table_replace(action->meta, strdup(XML_ATTR_TIMEOUT),
- pcmk__itoa(timeout_ms));
+/*!
+ * \internal
+ * \brief Determine an action's quorum and fencing dependency
+ *
+ * \param[in] rsc Resource that action is for
+ * \param[in] action_name Name of action being unpacked
+ *
+ * \return Quorum and fencing dependency appropriate to action
+ */
+enum rsc_start_requirement
+pcmk__action_requires(const pcmk_resource_t *rsc, const char *action_name)
+{
+ const char *value = NULL;
+ enum rsc_start_requirement requires = pcmk_requires_nothing;
+
+ CRM_CHECK((rsc != NULL) && (action_name != NULL), return requires);
- if (!pcmk__strcase_any_of(action->task, RSC_START, RSC_PROMOTE, NULL)) {
- action->needs = rsc_req_nothing;
+ if (!pcmk__strcase_any_of(action_name, PCMK_ACTION_START,
+ PCMK_ACTION_PROMOTE, NULL)) {
value = "nothing (not start or promote)";
- } else if (pcmk_is_set(action->rsc->flags, pe_rsc_needs_fencing)) {
- action->needs = rsc_req_stonith;
+ } else if (pcmk_is_set(rsc->flags, pcmk_rsc_needs_fencing)) {
+ requires = pcmk_requires_fencing;
value = "fencing";
- } else if (pcmk_is_set(action->rsc->flags, pe_rsc_needs_quorum)) {
- action->needs = rsc_req_quorum;
+ } else if (pcmk_is_set(rsc->flags, pcmk_rsc_needs_quorum)) {
+ requires = pcmk_requires_quorum;
value = "quorum";
} else {
- action->needs = rsc_req_nothing;
value = "nothing";
}
- pe_rsc_trace(action->rsc, "%s requires %s", action->uuid, value);
+ pe_rsc_trace(rsc, "%s of %s requires %s", action_name, rsc->id, value);
+ return requires;
+}
- value = unpack_operation_on_fail(action);
+/*!
+ * \internal
+ * \brief Parse action failure response from a user-provided string
+ *
+ * \param[in] rsc Resource that action is for
+ * \param[in] action_name Name of action
+ * \param[in] interval_ms Action interval (in milliseconds)
+ * \param[in] value User-provided configuration value for on-fail
+ *
+ * \return Action failure response parsed from \p text
+ */
+enum action_fail_response
+pcmk__parse_on_fail(const pcmk_resource_t *rsc, const char *action_name,
+ guint interval_ms, const char *value)
+{
+ const char *desc = NULL;
+ bool needs_remote_reset = false;
+ enum action_fail_response on_fail = pcmk_on_fail_ignore;
if (value == NULL) {
+ // Use default
} else if (pcmk__str_eq(value, "block", pcmk__str_casei)) {
- action->on_fail = action_fail_block;
- g_hash_table_insert(action->meta, strdup(XML_OP_ATTR_ON_FAIL), strdup("block"));
- value = "block"; // The above could destroy the original string
+ on_fail = pcmk_on_fail_block;
+ desc = "block";
} else if (pcmk__str_eq(value, "fence", pcmk__str_casei)) {
- action->on_fail = action_fail_fence;
- value = "node fencing";
-
- if (!pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
+ if (pcmk_is_set(rsc->cluster->flags, pcmk_sched_fencing_enabled)) {
+ on_fail = pcmk_on_fail_fence_node;
+ desc = "node fencing";
+ } else {
pcmk__config_err("Resetting '" XML_OP_ATTR_ON_FAIL "' for "
- "operation '%s' to 'stop' because 'fence' is not "
- "valid when fencing is disabled", action->uuid);
- action->on_fail = action_fail_stop;
- action->fail_role = RSC_ROLE_STOPPED;
- value = "stop resource";
+ "%s of %s to 'stop' because 'fence' is not "
+ "valid when fencing is disabled",
+ action_name, rsc->id);
+ on_fail = pcmk_on_fail_stop;
+ desc = "stop resource";
}
} else if (pcmk__str_eq(value, "standby", pcmk__str_casei)) {
- action->on_fail = action_fail_standby;
- value = "node standby";
+ on_fail = pcmk_on_fail_standby_node;
+ desc = "node standby";
} else if (pcmk__strcase_any_of(value, "ignore", PCMK__VALUE_NOTHING,
NULL)) {
- action->on_fail = action_fail_ignore;
- value = "ignore";
+ desc = "ignore";
} else if (pcmk__str_eq(value, "migrate", pcmk__str_casei)) {
- action->on_fail = action_fail_migrate;
- value = "force migration";
+ on_fail = pcmk_on_fail_ban;
+ desc = "force migration";
} else if (pcmk__str_eq(value, "stop", pcmk__str_casei)) {
- action->on_fail = action_fail_stop;
- action->fail_role = RSC_ROLE_STOPPED;
- value = "stop resource";
+ on_fail = pcmk_on_fail_stop;
+ desc = "stop resource";
} else if (pcmk__str_eq(value, "restart", pcmk__str_casei)) {
- action->on_fail = action_fail_recover;
- value = "restart (and possibly migrate)";
+ on_fail = pcmk_on_fail_restart;
+ desc = "restart (and possibly migrate)";
} else if (pcmk__str_eq(value, "restart-container", pcmk__str_casei)) {
- if (container) {
- action->on_fail = action_fail_restart_container;
- value = "restart container (and possibly migrate)";
-
+ if (rsc->container == NULL) {
+ pe_rsc_debug(rsc,
+ "Using default " XML_OP_ATTR_ON_FAIL
+ " for %s of %s because it does not have a container",
+ action_name, rsc->id);
} else {
- value = NULL;
+ on_fail = pcmk_on_fail_restart_container;
+ desc = "restart container (and possibly migrate)";
}
} else if (pcmk__str_eq(value, "demote", pcmk__str_casei)) {
- action->on_fail = action_fail_demote;
- value = "demote instance";
+ on_fail = pcmk_on_fail_demote;
+ desc = "demote instance";
} else {
- pe_err("Resource %s: Unknown failure type (%s)", action->rsc->id, value);
- value = NULL;
+ pcmk__config_err("Using default '" XML_OP_ATTR_ON_FAIL "' for "
+ "%s of %s because '%s' is not valid",
+ action_name, rsc->id, value);
}
- /* defaults */
- if (value == NULL && container) {
- action->on_fail = action_fail_restart_container;
- value = "restart container (and possibly migrate) (default)";
+ /* Remote node connections are handled specially. Failures that result
+ * in dropping an active connection must result in fencing. The only
+ * failures that don't are probes and starts. The user can explicitly set
+ * on-fail="fence" to fence after start failures.
+ */
+ if (pe__resource_is_remote_conn(rsc)
+ && !pcmk_is_probe(action_name, interval_ms)
+ && !pcmk__str_eq(action_name, PCMK_ACTION_START, pcmk__str_none)) {
+ needs_remote_reset = true;
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
+ desc = NULL; // Force default for unmanaged connections
+ }
+ }
- /* For remote nodes, ensure that any failure that results in dropping an
- * active connection to the node results in fencing of the node.
- *
- * There are only two action failures that don't result in fencing.
- * 1. probes - probe failures are expected.
- * 2. start - a start failure indicates that an active connection does not already
- * exist. The user can set op on-fail=fence if they really want to fence start
- * failures. */
- } else if (((value == NULL) || !pcmk_is_set(action->rsc->flags, pe_rsc_managed))
- && pe__resource_is_remote_conn(action->rsc, data_set)
- && !(pcmk__str_eq(action->task, CRMD_ACTION_STATUS, pcmk__str_casei)
- && (interval_ms == 0))
- && !pcmk__str_eq(action->task, CRMD_ACTION_START, pcmk__str_casei)) {
-
- if (!pcmk_is_set(action->rsc->flags, pe_rsc_managed)) {
- action->on_fail = action_fail_stop;
- action->fail_role = RSC_ROLE_STOPPED;
- value = "stop unmanaged remote node (enforcing default)";
+ if (desc != NULL) {
+ // Explicit value used, default not needed
- } else {
- if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
- value = "fence remote node (default)";
- } else {
- value = "recover remote node connection (default)";
- }
+ } else if (rsc->container != NULL) {
+ on_fail = pcmk_on_fail_restart_container;
+ desc = "restart container (and possibly migrate) (default)";
- if (action->rsc->remote_reconnect_ms) {
- action->fail_role = RSC_ROLE_STOPPED;
+ } else if (needs_remote_reset) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
+ if (pcmk_is_set(rsc->cluster->flags,
+ pcmk_sched_fencing_enabled)) {
+ desc = "fence remote node (default)";
+ } else {
+ desc = "recover remote node connection (default)";
}
- action->on_fail = action_fail_reset_remote;
+ on_fail = pcmk_on_fail_reset_remote;
+ } else {
+ on_fail = pcmk_on_fail_stop;
+ desc = "stop unmanaged remote node (enforcing default)";
}
- } else if (value == NULL && pcmk__str_eq(action->task, CRMD_ACTION_STOP, pcmk__str_casei)) {
- if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
- action->on_fail = action_fail_fence;
- value = "resource fence (default)";
-
+ } else if (pcmk__str_eq(action_name, PCMK_ACTION_STOP, pcmk__str_none)) {
+ if (pcmk_is_set(rsc->cluster->flags, pcmk_sched_fencing_enabled)) {
+ on_fail = pcmk_on_fail_fence_node;
+ desc = "resource fence (default)";
} else {
- action->on_fail = action_fail_block;
- value = "resource block (default)";
+ on_fail = pcmk_on_fail_block;
+ desc = "resource block (default)";
}
- } else if (value == NULL) {
- action->on_fail = action_fail_recover;
- value = "restart (and possibly migrate) (default)";
+ } else {
+ on_fail = pcmk_on_fail_restart;
+ desc = "restart (and possibly migrate) (default)";
}
- pe_rsc_trace(action->rsc, "%s failure handling: %s",
- action->uuid, value);
+ pe_rsc_trace(rsc, "Failure handling for %s-interval %s of %s: %s",
+ pcmk__readable_interval(interval_ms), action_name,
+ rsc->id, desc);
+ return on_fail;
+}
- value = NULL;
- if (xml_obj != NULL) {
- value = g_hash_table_lookup(action->meta, "role_after_failure");
- if (value) {
- pe_warn_once(pe_wo_role_after,
- "Support for role_after_failure is deprecated and will be removed in a future release");
- }
+/*!
+ * \internal
+ * \brief Determine a resource's role after failure of an action
+ *
+ * \param[in] rsc Resource that action is for
+ * \param[in] action_name Action name
+ * \param[in] on_fail Failure handling for action
+ * \param[in] meta Unpacked action meta-attributes
+ *
+ * \return Resource role that results from failure of action
+ */
+enum rsc_role_e
+pcmk__role_after_failure(const pcmk_resource_t *rsc, const char *action_name,
+ enum action_fail_response on_fail, GHashTable *meta)
+{
+ const char *value = NULL;
+ enum rsc_role_e role = pcmk_role_unknown;
+
+ // Set default for role after failure specially in certain circumstances
+ switch (on_fail) {
+ case pcmk_on_fail_stop:
+ role = pcmk_role_stopped;
+ break;
+
+ case pcmk_on_fail_reset_remote:
+ if (rsc->remote_reconnect_ms != 0) {
+ role = pcmk_role_stopped;
+ }
+ break;
+
+ default:
+ break;
}
- if (value != NULL && action->fail_role == RSC_ROLE_UNKNOWN) {
- action->fail_role = text2role(value);
+
+ // @COMPAT Check for explicitly configured role (deprecated)
+ value = g_hash_table_lookup(meta, "role_after_failure");
+ if (value != NULL) {
+ pe_warn_once(pcmk__wo_role_after,
+ "Support for role_after_failure is deprecated "
+ "and will be removed in a future release");
+ if (role == pcmk_role_unknown) {
+ role = text2role(value);
+ }
}
- /* defaults */
- if (action->fail_role == RSC_ROLE_UNKNOWN) {
- if (pcmk__str_eq(action->task, CRMD_ACTION_PROMOTE, pcmk__str_casei)) {
- action->fail_role = RSC_ROLE_UNPROMOTED;
+
+ if (role == pcmk_role_unknown) {
+ // Use default
+ if (pcmk__str_eq(action_name, PCMK_ACTION_PROMOTE, pcmk__str_none)) {
+ role = pcmk_role_unpromoted;
} else {
- action->fail_role = RSC_ROLE_STARTED;
+ role = pcmk_role_started;
}
}
- pe_rsc_trace(action->rsc, "%s failure results in: %s",
- action->uuid, role2text(action->fail_role));
+ pe_rsc_trace(rsc, "Role after %s %s failure is: %s",
+ rsc->id, action_name, role2text(role));
+ return role;
+}
- value = g_hash_table_lookup(action->meta, XML_OP_ATTR_START_DELAY);
- if (value) {
- unpack_start_delay(value, action->meta);
- } else {
- long long start_delay = 0;
+/*!
+ * \internal
+ * \brief Unpack action configuration
+ *
+ * Unpack a resource action's meta-attributes (normalizing the interval,
+ * timeout, and start delay values as integer milliseconds), requirements, and
+ * failure policy from its CIB XML configuration (including defaults).
+ *
+ * \param[in,out] action Resource action to unpack into
+ * \param[in] xml_obj Action configuration XML (NULL for defaults only)
+ * \param[in] interval_ms How frequently to perform the operation
+ */
+static void
+unpack_operation(pcmk_action_t *action, const xmlNode *xml_obj,
+ guint interval_ms)
+{
+ const char *value = NULL;
- value = g_hash_table_lookup(action->meta, XML_OP_ATTR_ORIGIN);
- if (unpack_interval_origin(value, xml_obj, interval_ms, data_set->now,
- &start_delay)) {
- g_hash_table_replace(action->meta, strdup(XML_OP_ATTR_START_DELAY),
- crm_strdup_printf("%lld", start_delay));
- }
- }
+ action->meta = pcmk__unpack_action_meta(action->rsc, action->node,
+ action->task, interval_ms, xml_obj);
+ action->needs = pcmk__action_requires(action->rsc, action->task);
+
+ value = g_hash_table_lookup(action->meta, XML_OP_ATTR_ON_FAIL);
+ action->on_fail = pcmk__parse_on_fail(action->rsc, action->task,
+ interval_ms, value);
+
+ action->fail_role = pcmk__role_after_failure(action->rsc, action->task,
+ action->on_fail, action->meta);
}
/*!
@@ -929,31 +1105,26 @@ unpack_operation(pe_action_t *action, const xmlNode *xml_obj,
* \param[in] task Action name (must be non-NULL)
* \param[in] on_node Node that action is on (if any)
* \param[in] optional Whether action should be considered optional
- * \param[in] save_action Whether action should be recorded in transition graph
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*
- * \return Action object corresponding to arguments
- * \note This function takes ownership of (and might free) \p key. If
- * \p save_action is true, \p data_set will own the returned action,
- * otherwise it is the caller's responsibility to free the return value
- * with pe_free_action().
+ * \return Action object corresponding to arguments (guaranteed not to be
+ * \c NULL)
+ * \note This function takes ownership of (and might free) \p key, and
+ * \p scheduler takes ownership of the returned action (the caller should
+ * not free it).
*/
-pe_action_t *
-custom_action(pe_resource_t *rsc, char *key, const char *task,
- const pe_node_t *on_node, gboolean optional, gboolean save_action,
- pe_working_set_t *data_set)
+pcmk_action_t *
+custom_action(pcmk_resource_t *rsc, char *key, const char *task,
+ const pcmk_node_t *on_node, gboolean optional,
+ pcmk_scheduler_t *scheduler)
{
- pe_action_t *action = NULL;
+ pcmk_action_t *action = NULL;
- CRM_ASSERT((key != NULL) && (task != NULL) && (data_set != NULL));
-
- if (save_action) {
- action = find_existing_action(key, rsc, on_node, data_set);
- }
+ CRM_ASSERT((key != NULL) && (task != NULL) && (scheduler != NULL));
+ action = find_existing_action(key, rsc, on_node, scheduler);
if (action == NULL) {
- action = new_action(key, task, rsc, on_node, optional, save_action,
- data_set);
+ action = new_action(key, task, rsc, on_node, optional, scheduler);
} else {
free(key);
}
@@ -961,28 +1132,38 @@ custom_action(pe_resource_t *rsc, char *key, const char *task,
update_action_optional(action, optional);
if (rsc != NULL) {
- if (action->node != NULL) {
- unpack_action_node_attributes(action, data_set);
- }
+ if ((action->node != NULL) && (action->op_entry != NULL)
+ && !pcmk_is_set(action->flags, pcmk_action_attrs_evaluated)) {
- update_resource_action_runnable(action, save_action, data_set);
+ GHashTable *attrs = action->node->details->attrs;
- if (save_action) {
- update_resource_flags_for_action(rsc, action);
+ if (action->extra != NULL) {
+ g_hash_table_destroy(action->extra);
+ }
+ action->extra = pcmk__unpack_action_rsc_params(action->op_entry,
+ attrs, scheduler);
+ pe__set_action_flags(action, pcmk_action_attrs_evaluated);
}
+
+ update_resource_action_runnable(action, scheduler);
+ update_resource_flags_for_action(rsc, action);
+ }
+
+ if (action->extra == NULL) {
+ action->extra = pcmk__strkey_table(free, free);
}
return action;
}
-pe_action_t *
-get_pseudo_op(const char *name, pe_working_set_t * data_set)
+pcmk_action_t *
+get_pseudo_op(const char *name, pcmk_scheduler_t *scheduler)
{
- pe_action_t *op = lookup_singleton(data_set, name);
+ pcmk_action_t *op = lookup_singleton(scheduler, name);
if (op == NULL) {
- op = custom_action(NULL, strdup(name), name, NULL, TRUE, TRUE, data_set);
- pe__set_action_flags(op, pe_action_pseudo|pe_action_runnable);
+ op = custom_action(NULL, strdup(name), name, NULL, TRUE, scheduler);
+ pe__set_action_flags(op, pcmk_action_pseudo|pcmk_action_runnable);
}
return op;
}
@@ -991,15 +1172,15 @@ static GList *
find_unfencing_devices(GList *candidates, GList *matches)
{
for (GList *gIter = candidates; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *candidate = gIter->data;
+ pcmk_resource_t *candidate = gIter->data;
if (candidate->children != NULL) {
matches = find_unfencing_devices(candidate->children, matches);
- } else if (!pcmk_is_set(candidate->flags, pe_rsc_fence_device)) {
+ } else if (!pcmk_is_set(candidate->flags, pcmk_rsc_fence_device)) {
continue;
- } else if (pcmk_is_set(candidate->flags, pe_rsc_needs_unfencing)) {
+ } else if (pcmk_is_set(candidate->flags, pcmk_rsc_needs_unfencing)) {
matches = g_list_prepend(matches, candidate);
} else if (pcmk__str_eq(g_hash_table_lookup(candidate->meta,
@@ -1013,8 +1194,8 @@ find_unfencing_devices(GList *candidates, GList *matches)
}
static int
-node_priority_fencing_delay(const pe_node_t *node,
- const pe_working_set_t *data_set)
+node_priority_fencing_delay(const pcmk_node_t *node,
+ const pcmk_scheduler_t *scheduler)
{
int member_count = 0;
int online_count = 0;
@@ -1023,13 +1204,13 @@ node_priority_fencing_delay(const pe_node_t *node,
GList *gIter = NULL;
// `priority-fencing-delay` is disabled
- if (data_set->priority_fencing_delay <= 0) {
+ if (scheduler->priority_fencing_delay <= 0) {
return 0;
}
/* No need to request a delay if the fencing target is not a normal cluster
* member, for example if it's a remote node or a guest node. */
- if (node->details->type != node_member) {
+ if (node->details->type != pcmk_node_variant_cluster) {
return 0;
}
@@ -1038,10 +1219,10 @@ node_priority_fencing_delay(const pe_node_t *node,
return 0;
}
- for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
- pe_node_t *n = gIter->data;
+ for (gIter = scheduler->nodes; gIter != NULL; gIter = gIter->next) {
+ pcmk_node_t *n = gIter->data;
- if (n->details->type != node_member) {
+ if (n->details->type != pcmk_node_variant_cluster) {
continue;
}
@@ -1077,54 +1258,58 @@ node_priority_fencing_delay(const pe_node_t *node,
return 0;
}
- return data_set->priority_fencing_delay;
+ return scheduler->priority_fencing_delay;
}
-pe_action_t *
-pe_fence_op(pe_node_t *node, const char *op, bool optional,
- const char *reason, bool priority_delay, pe_working_set_t *data_set)
+pcmk_action_t *
+pe_fence_op(pcmk_node_t *node, const char *op, bool optional,
+ const char *reason, bool priority_delay,
+ pcmk_scheduler_t *scheduler)
{
char *op_key = NULL;
- pe_action_t *stonith_op = NULL;
+ pcmk_action_t *stonith_op = NULL;
if(op == NULL) {
- op = data_set->stonith_action;
+ op = scheduler->stonith_action;
}
- op_key = crm_strdup_printf("%s-%s-%s", CRM_OP_FENCE, node->details->uname, op);
+ op_key = crm_strdup_printf("%s-%s-%s",
+ PCMK_ACTION_STONITH, node->details->uname, op);
- stonith_op = lookup_singleton(data_set, op_key);
+ stonith_op = lookup_singleton(scheduler, op_key);
if(stonith_op == NULL) {
- stonith_op = custom_action(NULL, op_key, CRM_OP_FENCE, node, TRUE, TRUE, data_set);
+ stonith_op = custom_action(NULL, op_key, PCMK_ACTION_STONITH, node,
+ TRUE, scheduler);
add_hash_param(stonith_op->meta, XML_LRM_ATTR_TARGET, node->details->uname);
add_hash_param(stonith_op->meta, XML_LRM_ATTR_TARGET_UUID, node->details->id);
add_hash_param(stonith_op->meta, "stonith_action", op);
- if (pcmk_is_set(data_set->flags, pe_flag_enable_unfencing)) {
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_enable_unfencing)) {
/* Extra work to detect device changes
*/
GString *digests_all = g_string_sized_new(1024);
GString *digests_secure = g_string_sized_new(1024);
- GList *matches = find_unfencing_devices(data_set->resources, NULL);
+ GList *matches = find_unfencing_devices(scheduler->resources, NULL);
char *key = NULL;
char *value = NULL;
for (GList *gIter = matches; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *match = gIter->data;
+ pcmk_resource_t *match = gIter->data;
const char *agent = g_hash_table_lookup(match->meta,
XML_ATTR_TYPE);
op_digest_cache_t *data = NULL;
- data = pe__compare_fencing_digest(match, agent, node, data_set);
- if(data->rc == RSC_DIGEST_ALL) {
+ data = pe__compare_fencing_digest(match, agent, node,
+ scheduler);
+ if (data->rc == pcmk__digest_mismatch) {
optional = FALSE;
crm_notice("Unfencing node %s because the definition of "
"%s changed", pe__node_name(node), match->id);
- if (!pcmk__is_daemon && data_set->priv != NULL) {
- pcmk__output_t *out = data_set->priv;
+ if (!pcmk__is_daemon && scheduler->priv != NULL) {
+ pcmk__output_t *out = scheduler->priv;
out->info(out,
"notice: Unfencing node %s because the "
@@ -1157,7 +1342,7 @@ pe_fence_op(pe_node_t *node, const char *op, bool optional,
free(op_key);
}
- if (data_set->priority_fencing_delay > 0
+ if (scheduler->priority_fencing_delay > 0
/* It's a suitable case where `priority-fencing-delay` applies.
* At least add `priority-fencing-delay` field as an indicator. */
@@ -1174,15 +1359,16 @@ pe_fence_op(pe_node_t *node, const char *op, bool optional,
* the targeting node. So that it takes precedence over any possible
* `pcmk_delay_base/max`.
*/
- char *delay_s = pcmk__itoa(node_priority_fencing_delay(node, data_set));
+ char *delay_s = pcmk__itoa(node_priority_fencing_delay(node,
+ scheduler));
g_hash_table_insert(stonith_op->meta,
strdup(XML_CONFIG_ATTR_PRIORITY_FENCING_DELAY),
delay_s);
}
- if(optional == FALSE && pe_can_fence(data_set, node)) {
- pe__clear_action_flags(stonith_op, pe_action_optional);
+ if(optional == FALSE && pe_can_fence(scheduler, node)) {
+ pe__clear_action_flags(stonith_op, pcmk_action_optional);
pe_action_set_reason(stonith_op, reason, false);
} else if(reason && stonith_op->reason == NULL) {
@@ -1193,13 +1379,13 @@ pe_fence_op(pe_node_t *node, const char *op, bool optional,
}
void
-pe_free_action(pe_action_t * action)
+pe_free_action(pcmk_action_t *action)
{
if (action == NULL) {
return;
}
- g_list_free_full(action->actions_before, free); /* pe_action_wrapper_t* */
- g_list_free_full(action->actions_after, free); /* pe_action_wrapper_t* */
+ g_list_free_full(action->actions_before, free);
+ g_list_free_full(action->actions_after, free);
if (action->extra) {
g_hash_table_destroy(action->extra);
}
@@ -1215,7 +1401,8 @@ pe_free_action(pe_action_t * action)
}
int
-pe_get_configured_timeout(pe_resource_t *rsc, const char *action, pe_working_set_t *data_set)
+pe_get_configured_timeout(pcmk_resource_t *rsc, const char *action,
+ pcmk_scheduler_t *scheduler)
{
xmlNode *child = NULL;
GHashTable *action_meta = NULL;
@@ -1224,8 +1411,8 @@ pe_get_configured_timeout(pe_resource_t *rsc, const char *action, pe_working_set
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
- .role = RSC_ROLE_UNKNOWN,
- .now = data_set->now,
+ .role = pcmk_role_unknown,
+ .now = scheduler->now,
.match_data = NULL,
.rsc_data = NULL,
.op_data = NULL
@@ -1240,10 +1427,11 @@ pe_get_configured_timeout(pe_resource_t *rsc, const char *action, pe_working_set
}
}
- if (timeout_spec == NULL && data_set->op_defaults) {
+ if (timeout_spec == NULL && scheduler->op_defaults) {
action_meta = pcmk__strkey_table(free, free);
- pe__unpack_dataset_nvpairs(data_set->op_defaults, XML_TAG_META_SETS,
- &rule_data, action_meta, NULL, FALSE, data_set);
+ pe__unpack_dataset_nvpairs(scheduler->op_defaults, XML_TAG_META_SETS,
+ &rule_data, action_meta, NULL, FALSE,
+ scheduler);
timeout_spec = g_hash_table_lookup(action_meta, XML_ATTR_TIMEOUT);
}
@@ -1252,7 +1440,7 @@ pe_get_configured_timeout(pe_resource_t *rsc, const char *action, pe_working_set
timeout_ms = crm_get_msec(timeout_spec);
if (timeout_ms < 0) {
- timeout_ms = crm_get_msec(CRM_DEFAULT_OP_TIMEOUT_S);
+ timeout_ms = PCMK_DEFAULT_ACTION_TIMEOUT_MS;
}
if (action_meta != NULL) {
@@ -1262,16 +1450,16 @@ pe_get_configured_timeout(pe_resource_t *rsc, const char *action, pe_working_set
}
enum action_tasks
-get_complex_task(const pe_resource_t *rsc, const char *name)
+get_complex_task(const pcmk_resource_t *rsc, const char *name)
{
enum action_tasks task = text2task(name);
- if ((rsc != NULL) && (rsc->variant == pe_native)) {
+ if ((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_primitive)) {
switch (task) {
- case stopped_rsc:
- case started_rsc:
- case action_demoted:
- case action_promoted:
+ case pcmk_action_stopped:
+ case pcmk_action_started:
+ case pcmk_action_demoted:
+ case pcmk_action_promoted:
crm_trace("Folding %s back into its atomic counterpart for %s",
name, rsc->id);
--task;
@@ -1294,14 +1482,14 @@ get_complex_task(const pe_resource_t *rsc, const char *name)
*
* \return First action in list that matches criteria, or NULL if none
*/
-pe_action_t *
+pcmk_action_t *
find_first_action(const GList *input, const char *uuid, const char *task,
- const pe_node_t *on_node)
+ const pcmk_node_t *on_node)
{
CRM_CHECK(uuid || task, return NULL);
for (const GList *gIter = input; gIter != NULL; gIter = gIter->next) {
- pe_action_t *action = (pe_action_t *) gIter->data;
+ pcmk_action_t *action = (pcmk_action_t *) gIter->data;
if (uuid != NULL && !pcmk__str_eq(uuid, action->uuid, pcmk__str_casei)) {
continue;
@@ -1324,7 +1512,7 @@ find_first_action(const GList *input, const char *uuid, const char *task,
}
GList *
-find_actions(GList *input, const char *key, const pe_node_t *on_node)
+find_actions(GList *input, const char *key, const pcmk_node_t *on_node)
{
GList *gIter = input;
GList *result = NULL;
@@ -1332,7 +1520,7 @@ find_actions(GList *input, const char *key, const pe_node_t *on_node)
CRM_CHECK(key != NULL, return NULL);
for (; gIter != NULL; gIter = gIter->next) {
- pe_action_t *action = (pe_action_t *) gIter->data;
+ pcmk_action_t *action = (pcmk_action_t *) gIter->data;
if (!pcmk__str_eq(key, action->uuid, pcmk__str_casei)) {
continue;
@@ -1358,7 +1546,7 @@ find_actions(GList *input, const char *key, const pe_node_t *on_node)
}
GList *
-find_actions_exact(GList *input, const char *key, const pe_node_t *on_node)
+find_actions_exact(GList *input, const char *key, const pcmk_node_t *on_node)
{
GList *result = NULL;
@@ -1369,7 +1557,7 @@ find_actions_exact(GList *input, const char *key, const pe_node_t *on_node)
}
for (GList *gIter = input; gIter != NULL; gIter = gIter->next) {
- pe_action_t *action = (pe_action_t *) gIter->data;
+ pcmk_action_t *action = (pcmk_action_t *) gIter->data;
if ((action->node != NULL)
&& pcmk__str_eq(key, action->uuid, pcmk__str_casei)
@@ -1397,7 +1585,7 @@ find_actions_exact(GList *input, const char *key, const pe_node_t *on_node)
* without a node will be assigned to node.
*/
GList *
-pe__resource_actions(const pe_resource_t *rsc, const pe_node_t *node,
+pe__resource_actions(const pcmk_resource_t *rsc, const pcmk_node_t *node,
const char *task, bool require_node)
{
GList *result = NULL;
@@ -1423,16 +1611,18 @@ pe__resource_actions(const pe_resource_t *rsc, const pe_node_t *node,
* \note It is the caller's responsibility to free() the result.
*/
char *
-pe__action2reason(const pe_action_t *action, enum pe_action_flags flag)
+pe__action2reason(const pcmk_action_t *action, enum pe_action_flags flag)
{
const char *change = NULL;
switch (flag) {
- case pe_action_runnable:
- case pe_action_migrate_runnable:
+ case pcmk_action_runnable:
change = "unrunnable";
break;
- case pe_action_optional:
+ case pcmk_action_migratable:
+ change = "unmigrateable";
+ break;
+ case pcmk_action_optional:
change = "required";
break;
default:
@@ -1446,7 +1636,8 @@ pe__action2reason(const pe_action_t *action, enum pe_action_flags flag)
action->task);
}
-void pe_action_set_reason(pe_action_t *action, const char *reason, bool overwrite)
+void pe_action_set_reason(pcmk_action_t *action, const char *reason,
+ bool overwrite)
{
if (action->reason != NULL && overwrite) {
pe_rsc_trace(action->rsc, "Changing %s reason from '%s' to '%s'",
@@ -1468,20 +1659,14 @@ void pe_action_set_reason(pe_action_t *action, const char *reason, bool overwrit
*
* \param[in,out] rsc Resource to clear
* \param[in] node Node to clear history on
- * \param[in,out] data_set Cluster working set
- *
- * \return New action to clear resource history
*/
-pe_action_t *
-pe__clear_resource_history(pe_resource_t *rsc, const pe_node_t *node,
- pe_working_set_t *data_set)
+void
+pe__clear_resource_history(pcmk_resource_t *rsc, const pcmk_node_t *node)
{
- char *key = NULL;
+ CRM_ASSERT((rsc != NULL) && (node != NULL));
- CRM_ASSERT(rsc && node);
- key = pcmk__op_key(rsc->id, CRM_OP_LRM_DELETE, 0);
- return custom_action(rsc, key, CRM_OP_LRM_DELETE, node, FALSE, TRUE,
- data_set);
+ custom_action(rsc, pcmk__op_key(rsc->id, PCMK_ACTION_LRM_DELETE, 0),
+ PCMK_ACTION_LRM_DELETE, node, FALSE, rsc->cluster);
}
#define sort_return(an_int, why) do { \
@@ -1646,19 +1831,19 @@ sort_op_by_callid(gconstpointer a, gconstpointer b)
*
* \return New action object corresponding to arguments
*/
-pe_action_t *
-pe__new_rsc_pseudo_action(pe_resource_t *rsc, const char *task, bool optional,
+pcmk_action_t *
+pe__new_rsc_pseudo_action(pcmk_resource_t *rsc, const char *task, bool optional,
bool runnable)
{
- pe_action_t *action = NULL;
+ pcmk_action_t *action = NULL;
CRM_ASSERT((rsc != NULL) && (task != NULL));
action = custom_action(rsc, pcmk__op_key(rsc->id, task, 0), task, NULL,
- optional, TRUE, rsc->cluster);
- pe__set_action_flags(action, pe_action_pseudo);
+ optional, rsc->cluster);
+ pe__set_action_flags(action, pcmk_action_pseudo);
if (runnable) {
- pe__set_action_flags(action, pe_action_runnable);
+ pe__set_action_flags(action, pcmk_action_runnable);
}
return action;
}
@@ -1673,7 +1858,7 @@ pe__new_rsc_pseudo_action(pe_resource_t *rsc, const char *task, bool optional,
* \note This is more efficient than calling add_hash_param().
*/
void
-pe__add_action_expected_result(pe_action_t *action, int expected_result)
+pe__add_action_expected_result(pcmk_action_t *action, int expected_result)
{
char *name = NULL;
diff --git a/lib/pengine/pe_digest.c b/lib/pengine/pe_digest.c
index b8047da..546a2a7 100644
--- a/lib/pengine/pe_digest.c
+++ b/lib/pengine/pe_digest.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2004-2022 the Pacemaker project contributors
+ * Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -93,27 +93,27 @@ attr_in_string(xmlAttrPtr a, void *user_data)
* \param[in] xml_op Unused
* \param[in] op_version CRM feature set to use for digest calculation
* \param[in] overrides Key/value table to override resource parameters
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*/
static void
-calculate_main_digest(op_digest_cache_t *data, pe_resource_t *rsc,
- const pe_node_t *node, GHashTable *params,
+calculate_main_digest(op_digest_cache_t *data, pcmk_resource_t *rsc,
+ const pcmk_node_t *node, GHashTable *params,
const char *task, guint *interval_ms,
const xmlNode *xml_op, const char *op_version,
- GHashTable *overrides, pe_working_set_t *data_set)
+ GHashTable *overrides, pcmk_scheduler_t *scheduler)
{
- pe_action_t *action = NULL;
+ xmlNode *action_config = NULL;
data->params_all = create_xml_node(NULL, XML_TAG_PARAMS);
/* REMOTE_CONTAINER_HACK: Allow Pacemaker Remote nodes to run containers
* that themselves are Pacemaker Remote nodes
*/
- (void) pe__add_bundle_remote_name(rsc, data_set, data->params_all,
+ (void) pe__add_bundle_remote_name(rsc, scheduler, data->params_all,
XML_RSC_ATTR_REMOTE_RA_ADDR);
- // If interval was overridden, reset it
if (overrides != NULL) {
+ // If interval was overridden, reset it
const char *interval_s = g_hash_table_lookup(overrides, CRM_META "_"
XML_LRM_ATTR_INTERVAL);
@@ -125,34 +125,42 @@ calculate_main_digest(op_digest_cache_t *data, pe_resource_t *rsc,
*interval_ms = (guint) value_ll;
}
}
- }
- action = custom_action(rsc, pcmk__op_key(rsc->id, task, *interval_ms),
- task, node, TRUE, FALSE, data_set);
- if (overrides != NULL) {
+ // Add overrides to list of all parameters
g_hash_table_foreach(overrides, hash2field, data->params_all);
}
- g_hash_table_foreach(params, hash2field, data->params_all);
- g_hash_table_foreach(action->extra, hash2field, data->params_all);
- g_hash_table_foreach(action->meta, hash2metafield, data->params_all);
- pcmk__filter_op_for_digest(data->params_all);
+ // Add provided instance parameters
+ g_hash_table_foreach(params, hash2field, data->params_all);
- /* Given a non-recurring operation with extra parameters configured,
- * in case that the main digest doesn't match, even if the restart
- * digest matches, enforce a restart rather than a reload-agent anyway.
- * So that it ensures any changes of the extra parameters get applied
- * for this specific operation, and the digests calculated for the
- * resulting lrm_rsc_op will be correct.
- * Mark the implied rc RSC_DIGEST_RESTART for the case that the main
- * digest doesn't match.
+ // Find action configuration XML in CIB
+ action_config = pcmk__find_action_config(rsc, task, *interval_ms, true);
+
+ /* Add action-specific resource instance attributes to the digest list.
+ *
+ * If this is a one-time action with action-specific instance attributes,
+ * enforce a restart instead of reload-agent in case the main digest doesn't
+ * match, even if the restart digest does. This ensures any changes of the
+ * action-specific parameters get applied for this specific action, and
+ * digests calculated for the resulting history will be correct. Default the
+ * result to RSC_DIGEST_RESTART for the case where the main digest doesn't
+ * match.
*/
- if (*interval_ms == 0
- && g_hash_table_size(action->extra) > 0) {
- data->rc = RSC_DIGEST_RESTART;
+ params = pcmk__unpack_action_rsc_params(action_config, node->details->attrs,
+ scheduler);
+ if ((*interval_ms == 0) && (g_hash_table_size(params) > 0)) {
+ data->rc = pcmk__digest_restart;
}
+ g_hash_table_foreach(params, hash2field, data->params_all);
+ g_hash_table_destroy(params);
+
+ // Add action meta-attributes
+ params = pcmk__unpack_action_meta(rsc, node, task, *interval_ms,
+ action_config);
+ g_hash_table_foreach(params, hash2metafield, data->params_all);
+ g_hash_table_destroy(params);
- pe_free_action(action);
+ pcmk__filter_op_for_digest(data->params_all);
data->digest_all_calc = calculate_operation_digest(data->params_all,
op_version);
@@ -177,7 +185,7 @@ is_fence_param(xmlAttrPtr attr, void *user_data)
* \param[in] overrides Key/value hash table to override resource parameters
*/
static void
-calculate_secure_digest(op_digest_cache_t *data, const pe_resource_t *rsc,
+calculate_secure_digest(op_digest_cache_t *data, const pcmk_resource_t *rsc,
GHashTable *params, const xmlNode *xml_op,
const char *op_version, GHashTable *overrides)
{
@@ -288,17 +296,17 @@ calculate_restart_digest(op_digest_cache_t *data, const xmlNode *xml_op,
* \param[in] xml_op XML of operation in CIB status (if available)
* \param[in] overrides Key/value table to override resource parameters
* \param[in] calc_secure Whether to calculate secure digest
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*
* \return Pointer to new digest cache entry (or NULL on memory error)
* \note It is the caller's responsibility to free the result using
* pe__free_digests().
*/
op_digest_cache_t *
-pe__calculate_digests(pe_resource_t *rsc, const char *task, guint *interval_ms,
- const pe_node_t *node, const xmlNode *xml_op,
- GHashTable *overrides, bool calc_secure,
- pe_working_set_t *data_set)
+pe__calculate_digests(pcmk_resource_t *rsc, const char *task,
+ guint *interval_ms, const pcmk_node_t *node,
+ const xmlNode *xml_op, GHashTable *overrides,
+ bool calc_secure, pcmk_scheduler_t *scheduler)
{
op_digest_cache_t *data = calloc(1, sizeof(op_digest_cache_t));
const char *op_version = NULL;
@@ -308,23 +316,23 @@ pe__calculate_digests(pe_resource_t *rsc, const char *task, guint *interval_ms,
return NULL;
}
- data->rc = RSC_DIGEST_MATCH;
+ data->rc = pcmk__digest_match;
if (xml_op != NULL) {
op_version = crm_element_value(xml_op, XML_ATTR_CRM_VERSION);
}
- if (op_version == NULL && data_set != NULL && data_set->input != NULL) {
- op_version = crm_element_value(data_set->input, XML_ATTR_CRM_VERSION);
+ if (op_version == NULL && scheduler != NULL && scheduler->input != NULL) {
+ op_version = crm_element_value(scheduler->input, XML_ATTR_CRM_VERSION);
}
if (op_version == NULL) {
op_version = CRM_FEATURE_SET;
}
- params = pe_rsc_params(rsc, node, data_set);
+ params = pe_rsc_params(rsc, node, scheduler);
calculate_main_digest(data, rsc, node, params, task, interval_ms, xml_op,
- op_version, overrides, data_set);
+ op_version, overrides, scheduler);
if (calc_secure) {
calculate_secure_digest(data, rsc, params, xml_op, op_version,
overrides);
@@ -343,14 +351,14 @@ pe__calculate_digests(pe_resource_t *rsc, const char *task, guint *interval_ms,
* \param[in,out] node Node action was performed on
* \param[in] xml_op XML of operation in CIB status (if available)
* \param[in] calc_secure Whether to calculate secure digest
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*
* \return Pointer to node's digest cache entry
*/
static op_digest_cache_t *
-rsc_action_digest(pe_resource_t *rsc, const char *task, guint interval_ms,
- pe_node_t *node, const xmlNode *xml_op,
- bool calc_secure, pe_working_set_t *data_set)
+rsc_action_digest(pcmk_resource_t *rsc, const char *task, guint interval_ms,
+ pcmk_node_t *node, const xmlNode *xml_op,
+ bool calc_secure, pcmk_scheduler_t *scheduler)
{
op_digest_cache_t *data = NULL;
char *key = pcmk__op_key(rsc->id, task, interval_ms);
@@ -358,7 +366,7 @@ rsc_action_digest(pe_resource_t *rsc, const char *task, guint interval_ms,
data = g_hash_table_lookup(node->details->digest_cache, key);
if (data == NULL) {
data = pe__calculate_digests(rsc, task, &interval_ms, node, xml_op,
- NULL, calc_secure, data_set);
+ NULL, calc_secure, scheduler);
CRM_ASSERT(data != NULL);
g_hash_table_insert(node->details->digest_cache, strdup(key), data);
}
@@ -370,16 +378,16 @@ rsc_action_digest(pe_resource_t *rsc, const char *task, guint interval_ms,
* \internal
* \brief Calculate operation digests and compare against an XML history entry
*
- * \param[in,out] rsc Resource to check
- * \param[in] xml_op Resource history XML
- * \param[in,out] node Node to use for digest calculation
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] rsc Resource to check
+ * \param[in] xml_op Resource history XML
+ * \param[in,out] node Node to use for digest calculation
+ * \param[in,out] scheduler Scheduler data
*
* \return Pointer to node's digest cache entry, with comparison result set
*/
op_digest_cache_t *
-rsc_action_digest_cmp(pe_resource_t *rsc, const xmlNode *xml_op,
- pe_node_t *node, pe_working_set_t *data_set)
+rsc_action_digest_cmp(pcmk_resource_t *rsc, const xmlNode *xml_op,
+ pcmk_node_t *node, pcmk_scheduler_t *scheduler)
{
op_digest_cache_t *data = NULL;
guint interval_ms = 0;
@@ -397,8 +405,9 @@ rsc_action_digest_cmp(pe_resource_t *rsc, const xmlNode *xml_op,
crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms);
data = rsc_action_digest(rsc, task, interval_ms, node, xml_op,
- pcmk_is_set(data_set->flags, pe_flag_sanitized),
- data_set);
+ pcmk_is_set(scheduler->flags,
+ pcmk_sched_sanitized),
+ scheduler);
if (digest_restart && data->digest_restart_calc && strcmp(data->digest_restart_calc, digest_restart) != 0) {
pe_rsc_info(rsc, "Parameters to %ums-interval %s action for %s on %s "
@@ -408,11 +417,11 @@ rsc_action_digest_cmp(pe_resource_t *rsc, const xmlNode *xml_op,
data->digest_restart_calc,
op_version,
crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC));
- data->rc = RSC_DIGEST_RESTART;
+ data->rc = pcmk__digest_restart;
} else if (digest_all == NULL) {
/* it is unknown what the previous op digest was */
- data->rc = RSC_DIGEST_UNKNOWN;
+ data->rc = pcmk__digest_unknown;
} else if (strcmp(digest_all, data->digest_all_calc) != 0) {
/* Given a non-recurring operation with extra parameters configured,
@@ -421,11 +430,10 @@ rsc_action_digest_cmp(pe_resource_t *rsc, const xmlNode *xml_op,
* So that it ensures any changes of the extra parameters get applied
* for this specific operation, and the digests calculated for the
* resulting lrm_rsc_op will be correct.
- * Preserve the implied rc RSC_DIGEST_RESTART for the case that the main
- * digest doesn't match.
+ * Preserve the implied rc pcmk__digest_restart for the case that the
+ * main digest doesn't match.
*/
- if (interval_ms == 0
- && data->rc == RSC_DIGEST_RESTART) {
+ if ((interval_ms == 0) && (data->rc == pcmk__digest_restart)) {
pe_rsc_info(rsc, "Parameters containing extra ones to %ums-interval"
" %s action for %s on %s "
"changed: hash was %s vs. now %s (restart:%s) %s",
@@ -442,11 +450,11 @@ rsc_action_digest_cmp(pe_resource_t *rsc, const xmlNode *xml_op,
(interval_ms > 0)? "reschedule" : "reload",
op_version,
crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC));
- data->rc = RSC_DIGEST_ALL;
+ data->rc = pcmk__digest_mismatch;
}
} else {
- data->rc = RSC_DIGEST_MATCH;
+ data->rc = pcmk__digest_match;
}
return data;
}
@@ -522,34 +530,34 @@ unfencing_digest_matches(const char *rsc_id, const char *agent,
* \internal
* \brief Calculate fence device digests and digest comparison result
*
- * \param[in,out] rsc Fence device resource
- * \param[in] agent Fence device's agent type
- * \param[in,out] node Node with digest cache to use
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] rsc Fence device resource
+ * \param[in] agent Fence device's agent type
+ * \param[in,out] node Node with digest cache to use
+ * \param[in,out] scheduler Scheduler data
*
* \return Node's digest cache entry
*/
op_digest_cache_t *
-pe__compare_fencing_digest(pe_resource_t *rsc, const char *agent,
- pe_node_t *node, pe_working_set_t *data_set)
+pe__compare_fencing_digest(pcmk_resource_t *rsc, const char *agent,
+ pcmk_node_t *node, pcmk_scheduler_t *scheduler)
{
const char *node_summary = NULL;
// Calculate device's current parameter digests
op_digest_cache_t *data = rsc_action_digest(rsc, STONITH_DIGEST_TASK, 0U,
- node, NULL, TRUE, data_set);
+ node, NULL, TRUE, scheduler);
// Check whether node has special unfencing summary node attribute
node_summary = pe_node_attribute_raw(node, CRM_ATTR_DIGESTS_ALL);
if (node_summary == NULL) {
- data->rc = RSC_DIGEST_UNKNOWN;
+ data->rc = pcmk__digest_unknown;
return data;
}
// Check whether full parameter digest matches
if (unfencing_digest_matches(rsc->id, agent, data->digest_all_calc,
node_summary)) {
- data->rc = RSC_DIGEST_MATCH;
+ data->rc = pcmk__digest_match;
return data;
}
@@ -557,9 +565,9 @@ pe__compare_fencing_digest(pe_resource_t *rsc, const char *agent,
node_summary = pe_node_attribute_raw(node, CRM_ATTR_DIGESTS_SECURE);
if (unfencing_digest_matches(rsc->id, agent, data->digest_secure_calc,
node_summary)) {
- data->rc = RSC_DIGEST_MATCH;
- if (!pcmk__is_daemon && data_set->priv != NULL) {
- pcmk__output_t *out = data_set->priv;
+ data->rc = pcmk__digest_match;
+ if (!pcmk__is_daemon && scheduler->priv != NULL) {
+ pcmk__output_t *out = scheduler->priv;
out->info(out, "Only 'private' parameters to %s "
"for unfencing %s changed", rsc->id,
pe__node_name(node));
@@ -568,10 +576,12 @@ pe__compare_fencing_digest(pe_resource_t *rsc, const char *agent,
}
// Parameters don't match
- data->rc = RSC_DIGEST_ALL;
- if (pcmk_is_set(data_set->flags, pe_flag_sanitized) && data->digest_secure_calc) {
- if (data_set->priv != NULL) {
- pcmk__output_t *out = data_set->priv;
+ data->rc = pcmk__digest_mismatch;
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_sanitized)
+ && (data->digest_secure_calc != NULL)) {
+
+ if (scheduler->priv != NULL) {
+ pcmk__output_t *out = scheduler->priv;
char *digest = create_unfencing_summary(rsc->id, agent,
data->digest_secure_calc);
diff --git a/lib/pengine/pe_health.c b/lib/pengine/pe_health.c
index 6419fdf..93028ae 100644
--- a/lib/pengine/pe_health.c
+++ b/lib/pengine/pe_health.c
@@ -17,12 +17,12 @@
* \internal
* \brief Set the node health values to use for "red", "yellow", and "green"
*
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*/
void
-pe__unpack_node_health_scores(pe_working_set_t *data_set)
+pe__unpack_node_health_scores(pcmk_scheduler_t *scheduler)
{
- switch (pe__health_strategy(data_set)) {
+ switch (pe__health_strategy(scheduler)) {
case pcmk__health_strategy_none:
pcmk__score_red = 0;
pcmk__score_yellow = 0;
@@ -43,11 +43,11 @@ pe__unpack_node_health_scores(pe_working_set_t *data_set)
default: // progressive or custom
pcmk__score_red = pe__health_score(PCMK__OPT_NODE_HEALTH_RED,
- data_set);
+ scheduler);
pcmk__score_green = pe__health_score(PCMK__OPT_NODE_HEALTH_GREEN,
- data_set);
+ scheduler);
pcmk__score_yellow = pe__health_score(PCMK__OPT_NODE_HEALTH_YELLOW,
- data_set);
+ scheduler);
break;
}
@@ -93,7 +93,7 @@ add_node_health_value(gpointer key, gpointer value, gpointer user_data)
* \return Sum of all health attribute scores of \p node plus \p base_health
*/
int
-pe__sum_node_health_scores(const pe_node_t *node, int base_health)
+pe__sum_node_health_scores(const pcmk_node_t *node, int base_health)
{
CRM_ASSERT(node != NULL);
g_hash_table_foreach(node->details->attrs, add_node_health_value,
@@ -111,7 +111,7 @@ pe__sum_node_health_scores(const pe_node_t *node, int base_health)
* otherwise 0 if any attribute is yellow, otherwise a positive value.
*/
int
-pe__node_health(pe_node_t *node)
+pe__node_health(pcmk_node_t *node)
{
GHashTableIter iter;
const char *name = NULL;
diff --git a/lib/pengine/pe_notif.c b/lib/pengine/pe_notif.c
index 7ed490f..0e1e239 100644
--- a/lib/pengine/pe_notif.c
+++ b/lib/pengine/pe_notif.c
@@ -9,13 +9,15 @@
#include <crm_internal.h>
#include <crm/msg_xml.h>
+
+#include <crm/pengine/internal.h>
#include <pacemaker-internal.h>
#include "pe_status_private.h"
typedef struct notify_entry_s {
- const pe_resource_t *rsc;
- const pe_node_t *node;
+ const pcmk_resource_t *rsc;
+ const pcmk_node_t *node;
} notify_entry_t;
/*!
@@ -105,7 +107,7 @@ dup_notify_entry(const notify_entry_t *entry)
* \internal
* \brief Given a list of nodes, create strings with node names
*
- * \param[in] list List of nodes (as pe_node_t *)
+ * \param[in] list List of nodes (as pcmk_node_t *)
* \param[out] all_node_names If not NULL, will be set to space-separated list
* of the names of all nodes in \p list
* \param[out] host_node_names Same as \p all_node_names, except active
@@ -126,7 +128,7 @@ get_node_names(const GList *list, GString **all_node_names,
}
for (const GList *iter = list; iter != NULL; iter = iter->next) {
- const pe_node_t *node = (const pe_node_t *) iter->data;
+ const pcmk_node_t *node = (const pcmk_node_t *) iter->data;
if (node->details->uname == NULL) {
continue;
@@ -242,7 +244,7 @@ notify_entries_to_strings(GList *list, GString **rsc_names,
static void
copy_meta_to_notify(gpointer key, gpointer value, gpointer user_data)
{
- pe_action_t *notify = (pe_action_t *) user_data;
+ pcmk_action_t *notify = (pcmk_action_t *) user_data;
/* Any existing meta-attributes (for example, the action timeout) are for
* the notify action itself, so don't override those.
@@ -256,7 +258,8 @@ copy_meta_to_notify(gpointer key, gpointer value, gpointer user_data)
}
static void
-add_notify_data_to_action_meta(const notify_data_t *n_data, pe_action_t *action)
+add_notify_data_to_action_meta(const notify_data_t *n_data,
+ pcmk_action_t *action)
{
for (const GSList *item = n_data->keys; item; item = item->next) {
const pcmk_nvpair_t *nvpair = (const pcmk_nvpair_t *) item->data;
@@ -271,23 +274,23 @@ add_notify_data_to_action_meta(const notify_data_t *n_data, pe_action_t *action)
*
* \param[in,out] rsc Clone resource that notification is for
* \param[in] action Action to use in notify action key
- * \param[in] notif_action RSC_NOTIFY or RSC_NOTIFIED
+ * \param[in] notif_action PCMK_ACTION_NOTIFY or PCMK_ACTION_NOTIFIED
* \param[in] notif_type "pre", "post", "confirmed-pre", "confirmed-post"
*
* \return Newly created notify pseudo-action
*/
-static pe_action_t *
-new_notify_pseudo_action(pe_resource_t *rsc, const pe_action_t *action,
+static pcmk_action_t *
+new_notify_pseudo_action(pcmk_resource_t *rsc, const pcmk_action_t *action,
const char *notif_action, const char *notif_type)
{
- pe_action_t *notify = NULL;
+ pcmk_action_t *notify = NULL;
notify = custom_action(rsc,
pcmk__notify_key(rsc->id, notif_type, action->task),
notif_action, NULL,
- pcmk_is_set(action->flags, pe_action_optional),
- TRUE, rsc->cluster);
- pe__set_action_flags(notify, pe_action_pseudo);
+ pcmk_is_set(action->flags, pcmk_action_optional),
+ rsc->cluster);
+ pe__set_action_flags(notify, pcmk_action_pseudo);
add_hash_param(notify->meta, "notify_key_type", notif_type);
add_hash_param(notify->meta, "notify_key_operation", action->task);
return notify;
@@ -305,12 +308,13 @@ new_notify_pseudo_action(pe_resource_t *rsc, const pe_action_t *action,
*
* \return Newly created notify action
*/
-static pe_action_t *
-new_notify_action(pe_resource_t *rsc, const pe_node_t *node, pe_action_t *op,
- pe_action_t *notify_done, const notify_data_t *n_data)
+static pcmk_action_t *
+new_notify_action(pcmk_resource_t *rsc, const pcmk_node_t *node,
+ pcmk_action_t *op, pcmk_action_t *notify_done,
+ const notify_data_t *n_data)
{
char *key = NULL;
- pe_action_t *notify_action = NULL;
+ pcmk_action_t *notify_action = NULL;
const char *value = NULL;
const char *task = NULL;
const char *skip_reason = NULL;
@@ -324,7 +328,7 @@ new_notify_action(pe_resource_t *rsc, const pe_node_t *node, pe_action_t *op,
skip_reason = "no parent notification";
} else if (!node->details->online) {
skip_reason = "node offline";
- } else if (!pcmk_is_set(op->flags, pe_action_runnable)) {
+ } else if (!pcmk_is_set(op->flags, pcmk_action_runnable)) {
skip_reason = "original action not runnable";
}
if (skip_reason != NULL) {
@@ -342,16 +346,16 @@ new_notify_action(pe_resource_t *rsc, const pe_node_t *node, pe_action_t *op,
// Create the notify action
key = pcmk__notify_key(rsc->id, value, task);
notify_action = custom_action(rsc, key, op->task, node,
- pcmk_is_set(op->flags, pe_action_optional),
- TRUE, rsc->cluster);
+ pcmk_is_set(op->flags, pcmk_action_optional),
+ rsc->cluster);
// Add meta-data to notify action
g_hash_table_foreach(op->meta, copy_meta_to_notify, notify_action);
add_notify_data_to_action_meta(n_data, notify_action);
// Order notify after original action and before parent notification
- order_actions(op, notify_action, pe_order_optional);
- order_actions(notify_action, notify_done, pe_order_optional);
+ order_actions(op, notify_action, pcmk__ar_ordered);
+ order_actions(notify_action, notify_done, pcmk__ar_ordered);
return notify_action;
}
@@ -364,10 +368,10 @@ new_notify_action(pe_resource_t *rsc, const pe_node_t *node, pe_action_t *op,
* \param[in,out] n_data Notification values to add to action meta-data
*/
static void
-new_post_notify_action(pe_resource_t *rsc, const pe_node_t *node,
+new_post_notify_action(pcmk_resource_t *rsc, const pcmk_node_t *node,
notify_data_t *n_data)
{
- pe_action_t *notify = NULL;
+ pcmk_action_t *notify = NULL;
CRM_ASSERT(n_data != NULL);
@@ -383,16 +387,16 @@ new_post_notify_action(pe_resource_t *rsc, const pe_node_t *node,
return;
}
for (GList *iter = rsc->actions; iter != NULL; iter = iter->next) {
- pe_action_t *mon = (pe_action_t *) iter->data;
+ pcmk_action_t *mon = (pcmk_action_t *) iter->data;
const char *interval_ms_s = NULL;
interval_ms_s = g_hash_table_lookup(mon->meta,
XML_LRM_ATTR_INTERVAL_MS);
if (pcmk__str_eq(interval_ms_s, "0", pcmk__str_null_matches)
- || pcmk__str_eq(mon->task, RSC_CANCEL, pcmk__str_none)) {
+ || pcmk__str_eq(mon->task, PCMK_ACTION_CANCEL, pcmk__str_none)) {
continue; // Not a recurring monitor
}
- order_actions(n_data->post_done, mon, pe_order_optional);
+ order_actions(n_data->post_done, mon, pcmk__ar_ordered);
}
}
@@ -428,12 +432,12 @@ new_post_notify_action(pe_resource_t *rsc, const pe_node_t *node,
* \return Newly created notification data
*/
notify_data_t *
-pe__action_notif_pseudo_ops(pe_resource_t *rsc, const char *task,
- pe_action_t *action, pe_action_t *complete)
+pe__action_notif_pseudo_ops(pcmk_resource_t *rsc, const char *task,
+ pcmk_action_t *action, pcmk_action_t *complete)
{
notify_data_t *n_data = NULL;
- if (!pcmk_is_set(rsc->flags, pe_rsc_notify)) {
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_notify)) {
return NULL;
}
@@ -445,60 +449,63 @@ pe__action_notif_pseudo_ops(pe_resource_t *rsc, const char *task,
if (action != NULL) { // Need "pre-" pseudo-actions
// Create "pre-" notify pseudo-action for clone
- n_data->pre = new_notify_pseudo_action(rsc, action, RSC_NOTIFY, "pre");
- pe__set_action_flags(n_data->pre, pe_action_runnable);
+ n_data->pre = new_notify_pseudo_action(rsc, action, PCMK_ACTION_NOTIFY,
+ "pre");
+ pe__set_action_flags(n_data->pre, pcmk_action_runnable);
add_hash_param(n_data->pre->meta, "notify_type", "pre");
add_hash_param(n_data->pre->meta, "notify_operation", n_data->action);
// Create "pre-" notifications complete pseudo-action for clone
- n_data->pre_done = new_notify_pseudo_action(rsc, action, RSC_NOTIFIED,
+ n_data->pre_done = new_notify_pseudo_action(rsc, action,
+ PCMK_ACTION_NOTIFIED,
"confirmed-pre");
- pe__set_action_flags(n_data->pre_done, pe_action_runnable);
+ pe__set_action_flags(n_data->pre_done, pcmk_action_runnable);
add_hash_param(n_data->pre_done->meta, "notify_type", "pre");
add_hash_param(n_data->pre_done->meta,
"notify_operation", n_data->action);
// Order "pre-" -> "pre-" complete -> original action
- order_actions(n_data->pre, n_data->pre_done, pe_order_optional);
- order_actions(n_data->pre_done, action, pe_order_optional);
+ order_actions(n_data->pre, n_data->pre_done, pcmk__ar_ordered);
+ order_actions(n_data->pre_done, action, pcmk__ar_ordered);
}
if (complete != NULL) { // Need "post-" pseudo-actions
// Create "post-" notify pseudo-action for clone
- n_data->post = new_notify_pseudo_action(rsc, complete, RSC_NOTIFY,
- "post");
+ n_data->post = new_notify_pseudo_action(rsc, complete,
+ PCMK_ACTION_NOTIFY, "post");
n_data->post->priority = INFINITY;
- if (pcmk_is_set(complete->flags, pe_action_runnable)) {
- pe__set_action_flags(n_data->post, pe_action_runnable);
+ if (pcmk_is_set(complete->flags, pcmk_action_runnable)) {
+ pe__set_action_flags(n_data->post, pcmk_action_runnable);
} else {
- pe__clear_action_flags(n_data->post, pe_action_runnable);
+ pe__clear_action_flags(n_data->post, pcmk_action_runnable);
}
add_hash_param(n_data->post->meta, "notify_type", "post");
add_hash_param(n_data->post->meta, "notify_operation", n_data->action);
// Create "post-" notifications complete pseudo-action for clone
n_data->post_done = new_notify_pseudo_action(rsc, complete,
- RSC_NOTIFIED,
+ PCMK_ACTION_NOTIFIED,
"confirmed-post");
n_data->post_done->priority = INFINITY;
- if (pcmk_is_set(complete->flags, pe_action_runnable)) {
- pe__set_action_flags(n_data->post_done, pe_action_runnable);
+ if (pcmk_is_set(complete->flags, pcmk_action_runnable)) {
+ pe__set_action_flags(n_data->post_done, pcmk_action_runnable);
} else {
- pe__clear_action_flags(n_data->post_done, pe_action_runnable);
+ pe__clear_action_flags(n_data->post_done, pcmk_action_runnable);
}
add_hash_param(n_data->post_done->meta, "notify_type", "post");
add_hash_param(n_data->post_done->meta,
"notify_operation", n_data->action);
// Order original action complete -> "post-" -> "post-" complete
- order_actions(complete, n_data->post, pe_order_implies_then);
- order_actions(n_data->post, n_data->post_done, pe_order_implies_then);
+ order_actions(complete, n_data->post, pcmk__ar_first_implies_then);
+ order_actions(n_data->post, n_data->post_done,
+ pcmk__ar_first_implies_then);
}
// If we created both, order "pre-" complete -> "post-"
if ((action != NULL) && (complete != NULL)) {
- order_actions(n_data->pre_done, n_data->post, pe_order_optional);
+ order_actions(n_data->pre_done, n_data->post, pcmk__ar_ordered);
}
return n_data;
}
@@ -514,7 +521,7 @@ pe__action_notif_pseudo_ops(pe_resource_t *rsc, const char *task,
* \note The caller is responsible for freeing the return value.
*/
static notify_entry_t *
-new_notify_entry(const pe_resource_t *rsc, const pe_node_t *node)
+new_notify_entry(const pcmk_resource_t *rsc, const pcmk_node_t *node)
{
notify_entry_t *entry = calloc(1, sizeof(notify_entry_t));
@@ -533,12 +540,12 @@ new_notify_entry(const pe_resource_t *rsc, const pe_node_t *node)
* \param[in,out] n_data Notification data for clone
*/
static void
-collect_resource_data(const pe_resource_t *rsc, bool activity,
+collect_resource_data(const pcmk_resource_t *rsc, bool activity,
notify_data_t *n_data)
{
const GList *iter = NULL;
notify_entry_t *entry = NULL;
- const pe_node_t *node = NULL;
+ const pcmk_node_t *node = NULL;
if (n_data == NULL) {
return;
@@ -551,7 +558,7 @@ collect_resource_data(const pe_resource_t *rsc, bool activity,
// If this is a clone, call recursively for each instance
if (rsc->children != NULL) {
for (iter = rsc->children; iter != NULL; iter = iter->next) {
- const pe_resource_t *child = (const pe_resource_t *) iter->data;
+ const pcmk_resource_t *child = (const pcmk_resource_t *) iter->data;
collect_resource_data(child, activity, n_data);
}
@@ -567,21 +574,21 @@ collect_resource_data(const pe_resource_t *rsc, bool activity,
// Add notification indicating the resource state
switch (rsc->role) {
- case RSC_ROLE_STOPPED:
+ case pcmk_role_stopped:
n_data->inactive = g_list_prepend(n_data->inactive, entry);
break;
- case RSC_ROLE_STARTED:
+ case pcmk_role_started:
n_data->active = g_list_prepend(n_data->active, entry);
break;
- case RSC_ROLE_UNPROMOTED:
+ case pcmk_role_unpromoted:
n_data->unpromoted = g_list_prepend(n_data->unpromoted, entry);
n_data->active = g_list_prepend(n_data->active,
dup_notify_entry(entry));
break;
- case RSC_ROLE_PROMOTED:
+ case pcmk_role_promoted:
n_data->promoted = g_list_prepend(n_data->promoted, entry);
n_data->active = g_list_prepend(n_data->active,
dup_notify_entry(entry));
@@ -601,30 +608,31 @@ collect_resource_data(const pe_resource_t *rsc, bool activity,
// Add notification entries for each of the resource's actions
for (iter = rsc->actions; iter != NULL; iter = iter->next) {
- const pe_action_t *op = (const pe_action_t *) iter->data;
+ const pcmk_action_t *op = (const pcmk_action_t *) iter->data;
- if (!pcmk_is_set(op->flags, pe_action_optional) && (op->node != NULL)) {
+ if (!pcmk_is_set(op->flags, pcmk_action_optional)
+ && (op->node != NULL)) {
enum action_tasks task = text2task(op->task);
- if ((task == stop_rsc) && op->node->details->unclean) {
+ if ((task == pcmk_action_stop) && op->node->details->unclean) {
// Create anyway (additional noise if node can't be fenced)
- } else if (!pcmk_is_set(op->flags, pe_action_runnable)) {
+ } else if (!pcmk_is_set(op->flags, pcmk_action_runnable)) {
continue;
}
entry = new_notify_entry(rsc, op->node);
switch (task) {
- case start_rsc:
+ case pcmk_action_start:
n_data->start = g_list_prepend(n_data->start, entry);
break;
- case stop_rsc:
+ case pcmk_action_stop:
n_data->stop = g_list_prepend(n_data->stop, entry);
break;
- case action_promote:
+ case pcmk_action_promote:
n_data->promote = g_list_prepend(n_data->promote, entry);
break;
- case action_demote:
+ case pcmk_action_demote:
n_data->demote = g_list_prepend(n_data->demote, entry);
break;
default:
@@ -661,7 +669,7 @@ collect_resource_data(const pe_resource_t *rsc, bool activity,
* \param[in,out] n_data Notification data
*/
static void
-add_notif_keys(const pe_resource_t *rsc, notify_data_t *n_data)
+add_notif_keys(const pcmk_resource_t *rsc, notify_data_t *n_data)
{
bool required = false; // Whether to make notify actions required
GString *rsc_list = NULL;
@@ -673,14 +681,14 @@ add_notif_keys(const pe_resource_t *rsc, notify_data_t *n_data)
n_data->stop = notify_entries_to_strings(n_data->stop,
&rsc_list, &node_list);
if ((strcmp(" ", (const char *) rsc_list->str) != 0)
- && pcmk__str_eq(n_data->action, RSC_STOP, pcmk__str_none)) {
+ && pcmk__str_eq(n_data->action, PCMK_ACTION_STOP, pcmk__str_none)) {
required = true;
}
add_notify_env_free_gs(n_data, "notify_stop_resource", rsc_list);
add_notify_env_free_gs(n_data, "notify_stop_uname", node_list);
if ((n_data->start != NULL)
- && pcmk__str_eq(n_data->action, RSC_START, pcmk__str_none)) {
+ && pcmk__str_eq(n_data->action, PCMK_ACTION_START, pcmk__str_none)) {
required = true;
}
n_data->start = notify_entries_to_strings(n_data->start,
@@ -689,7 +697,7 @@ add_notif_keys(const pe_resource_t *rsc, notify_data_t *n_data)
add_notify_env_free_gs(n_data, "notify_start_uname", node_list);
if ((n_data->demote != NULL)
- && pcmk__str_eq(n_data->action, RSC_DEMOTE, pcmk__str_none)) {
+ && pcmk__str_eq(n_data->action, PCMK_ACTION_DEMOTE, pcmk__str_none)) {
required = true;
}
n_data->demote = notify_entries_to_strings(n_data->demote,
@@ -698,7 +706,7 @@ add_notif_keys(const pe_resource_t *rsc, notify_data_t *n_data)
add_notify_env_free_gs(n_data, "notify_demote_uname", node_list);
if ((n_data->promote != NULL)
- && pcmk__str_eq(n_data->action, RSC_PROMOTE, pcmk__str_none)) {
+ && pcmk__str_eq(n_data->action, PCMK_ACTION_PROMOTE, pcmk__str_none)) {
required = true;
}
n_data->promote = notify_entries_to_strings(n_data->promote,
@@ -755,13 +763,13 @@ add_notif_keys(const pe_resource_t *rsc, notify_data_t *n_data)
add_notify_env_free_gs(n_data, "notify_all_uname", node_list);
if (required && (n_data->pre != NULL)) {
- pe__clear_action_flags(n_data->pre, pe_action_optional);
- pe__clear_action_flags(n_data->pre_done, pe_action_optional);
+ pe__clear_action_flags(n_data->pre, pcmk_action_optional);
+ pe__clear_action_flags(n_data->pre_done, pcmk_action_optional);
}
if (required && (n_data->post != NULL)) {
- pe__clear_action_flags(n_data->post, pe_action_optional);
- pe__clear_action_flags(n_data->post_done, pe_action_optional);
+ pe__clear_action_flags(n_data->post, pcmk_action_optional);
+ pe__clear_action_flags(n_data->post_done, pcmk_action_optional);
}
}
@@ -773,14 +781,15 @@ add_notif_keys(const pe_resource_t *rsc, notify_data_t *n_data)
*
* \return If action is behind a remote connection, connection's start
*/
-static pe_action_t *
-find_remote_start(pe_action_t *action)
+static pcmk_action_t *
+find_remote_start(pcmk_action_t *action)
{
if ((action != NULL) && (action->node != NULL)) {
- pe_resource_t *remote_rsc = action->node->details->remote_rsc;
+ pcmk_resource_t *remote_rsc = action->node->details->remote_rsc;
if (remote_rsc != NULL) {
- return find_first_action(remote_rsc->actions, NULL, RSC_START,
+ return find_first_action(remote_rsc->actions, NULL,
+ PCMK_ACTION_START,
NULL);
}
}
@@ -795,11 +804,11 @@ find_remote_start(pe_action_t *action)
* \param[in,out] n_data Clone notification data for some action
*/
static void
-create_notify_actions(pe_resource_t *rsc, notify_data_t *n_data)
+create_notify_actions(pcmk_resource_t *rsc, notify_data_t *n_data)
{
GList *iter = NULL;
- pe_action_t *stop = NULL;
- pe_action_t *start = NULL;
+ pcmk_action_t *stop = NULL;
+ pcmk_action_t *start = NULL;
enum action_tasks task = text2task(n_data->action);
// If this is a clone, call recursively for each instance
@@ -810,14 +819,15 @@ create_notify_actions(pe_resource_t *rsc, notify_data_t *n_data)
// Add notification meta-attributes to original actions
for (iter = rsc->actions; iter != NULL; iter = iter->next) {
- pe_action_t *op = (pe_action_t *) iter->data;
+ pcmk_action_t *op = (pcmk_action_t *) iter->data;
- if (!pcmk_is_set(op->flags, pe_action_optional) && (op->node != NULL)) {
+ if (!pcmk_is_set(op->flags, pcmk_action_optional)
+ && (op->node != NULL)) {
switch (text2task(op->task)) {
- case start_rsc:
- case stop_rsc:
- case action_promote:
- case action_demote:
+ case pcmk_action_start:
+ case pcmk_action_stop:
+ case pcmk_action_promote:
+ case pcmk_action_demote:
add_notify_data_to_action_meta(n_data, op);
break;
default:
@@ -828,7 +838,7 @@ create_notify_actions(pe_resource_t *rsc, notify_data_t *n_data)
// Skip notify action itself if original action was not needed
switch (task) {
- case start_rsc:
+ case pcmk_action_start:
if (n_data->start == NULL) {
pe_rsc_trace(rsc, "No notify action needed for %s %s",
rsc->id, n_data->action);
@@ -836,7 +846,7 @@ create_notify_actions(pe_resource_t *rsc, notify_data_t *n_data)
}
break;
- case action_promote:
+ case pcmk_action_promote:
if (n_data->promote == NULL) {
pe_rsc_trace(rsc, "No notify action needed for %s %s",
rsc->id, n_data->action);
@@ -844,7 +854,7 @@ create_notify_actions(pe_resource_t *rsc, notify_data_t *n_data)
}
break;
- case action_demote:
+ case pcmk_action_demote:
if (n_data->demote == NULL) {
pe_rsc_trace(rsc, "No notify action needed for %s %s",
rsc->id, n_data->action);
@@ -861,18 +871,19 @@ create_notify_actions(pe_resource_t *rsc, notify_data_t *n_data)
rsc->id, n_data->action);
// Create notify actions for stop or demote
- if ((rsc->role != RSC_ROLE_STOPPED)
- && ((task == stop_rsc) || (task == action_demote))) {
+ if ((rsc->role != pcmk_role_stopped)
+ && ((task == pcmk_action_stop) || (task == pcmk_action_demote))) {
- stop = find_first_action(rsc->actions, NULL, RSC_STOP, NULL);
+ stop = find_first_action(rsc->actions, NULL, PCMK_ACTION_STOP, NULL);
for (iter = rsc->running_on; iter != NULL; iter = iter->next) {
- pe_node_t *current_node = (pe_node_t *) iter->data;
+ pcmk_node_t *current_node = (pcmk_node_t *) iter->data;
/* If a stop is a pseudo-action implied by fencing, don't try to
* notify the node getting fenced.
*/
- if ((stop != NULL) && pcmk_is_set(stop->flags, pe_action_pseudo)
+ if ((stop != NULL)
+ && pcmk_is_set(stop->flags, pcmk_action_pseudo)
&& (current_node->details->unclean
|| current_node->details->remote_requires_reset)) {
continue;
@@ -881,23 +892,23 @@ create_notify_actions(pe_resource_t *rsc, notify_data_t *n_data)
new_notify_action(rsc, current_node, n_data->pre,
n_data->pre_done, n_data);
- if ((task == action_demote) || (stop == NULL)
- || pcmk_is_set(stop->flags, pe_action_optional)) {
+ if ((task == pcmk_action_demote) || (stop == NULL)
+ || pcmk_is_set(stop->flags, pcmk_action_optional)) {
new_post_notify_action(rsc, current_node, n_data);
}
}
}
// Create notify actions for start or promote
- if ((rsc->next_role != RSC_ROLE_STOPPED)
- && ((task == start_rsc) || (task == action_promote))) {
+ if ((rsc->next_role != pcmk_role_stopped)
+ && ((task == pcmk_action_start) || (task == pcmk_action_promote))) {
- start = find_first_action(rsc->actions, NULL, RSC_START, NULL);
+ start = find_first_action(rsc->actions, NULL, PCMK_ACTION_START, NULL);
if (start != NULL) {
- pe_action_t *remote_start = find_remote_start(start);
+ pcmk_action_t *remote_start = find_remote_start(start);
if ((remote_start != NULL)
- && !pcmk_is_set(remote_start->flags, pe_action_runnable)) {
+ && !pcmk_is_set(remote_start->flags, pcmk_action_runnable)) {
/* Start and promote actions for a clone instance behind
* a Pacemaker Remote connection happen after the
* connection starts. If the connection start is blocked, do
@@ -911,8 +922,8 @@ create_notify_actions(pe_resource_t *rsc, notify_data_t *n_data)
role2text(rsc->next_role), rsc->id);
return;
}
- if ((task != start_rsc) || (start == NULL)
- || pcmk_is_set(start->flags, pe_action_optional)) {
+ if ((task != pcmk_action_start) || (start == NULL)
+ || pcmk_is_set(start->flags, pcmk_action_optional)) {
new_notify_action(rsc, rsc->allocated_to, n_data->pre,
n_data->pre_done, n_data);
@@ -929,7 +940,7 @@ create_notify_actions(pe_resource_t *rsc, notify_data_t *n_data)
* \param[in,out] n_data Clone notification data for some action
*/
void
-pe__create_action_notifications(pe_resource_t *rsc, notify_data_t *n_data)
+pe__create_action_notifications(pcmk_resource_t *rsc, notify_data_t *n_data)
{
if ((rsc == NULL) || (n_data == NULL)) {
return;
@@ -978,13 +989,14 @@ pe__free_action_notification_data(notify_data_t *n_data)
* \param[in,out] stonith_op Fencing action that implies \p stop
*/
void
-pe__order_notifs_after_fencing(const pe_action_t *stop, pe_resource_t *rsc,
- pe_action_t *stonith_op)
+pe__order_notifs_after_fencing(const pcmk_action_t *stop, pcmk_resource_t *rsc,
+ pcmk_action_t *stonith_op)
{
notify_data_t *n_data;
crm_info("Ordering notifications for implied %s after fencing", stop->uuid);
- n_data = pe__action_notif_pseudo_ops(rsc, RSC_STOP, NULL, stonith_op);
+ n_data = pe__action_notif_pseudo_ops(rsc, PCMK_ACTION_STOP, NULL,
+ stonith_op);
if (n_data != NULL) {
collect_resource_data(rsc, false, n_data);
diff --git a/lib/pengine/pe_output.c b/lib/pengine/pe_output.c
index 68cc867..65f3c18 100644
--- a/lib/pengine/pe_output.c
+++ b/lib/pengine/pe_output.c
@@ -8,28 +8,31 @@
*/
#include <crm_internal.h>
+
#include <stdint.h>
+
#include <crm/common/xml_internal.h>
#include <crm/common/output.h>
+#include <crm/common/scheduler_internal.h>
#include <crm/cib/util.h>
#include <crm/msg_xml.h>
#include <crm/pengine/internal.h>
const char *
-pe__resource_description(const pe_resource_t *rsc, uint32_t show_opts)
+pe__resource_description(const pcmk_resource_t *rsc, uint32_t show_opts)
{
const char * desc = NULL;
// User-supplied description
- if (pcmk_any_flags_set(show_opts, pcmk_show_rsc_only|pcmk_show_description)
- || pcmk__list_of_multiple(rsc->running_on)) {
+ if (pcmk_any_flags_set(show_opts, pcmk_show_rsc_only|pcmk_show_description)) {
desc = crm_element_value(rsc->xml, XML_ATTR_DESC);
}
return desc;
}
/* Never display node attributes whose name starts with one of these prefixes */
-#define FILTER_STR { PCMK__FAIL_COUNT_PREFIX, PCMK__LAST_FAILURE_PREFIX, \
- "shutdown", "terminate", "standby", "#", NULL }
+#define FILTER_STR { PCMK__FAIL_COUNT_PREFIX, PCMK__LAST_FAILURE_PREFIX, \
+ "shutdown", PCMK_NODE_ATTR_TERMINATE, "standby", "#", \
+ NULL }
static int
compare_attribute(gconstpointer a, gconstpointer b)
@@ -47,7 +50,7 @@ compare_attribute(gconstpointer a, gconstpointer b)
*
* \param[in] node Node that ran this resource
* \param[in,out] rsc_list List of resources for this node
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
* \param[in] attrname Attribute to find
* \param[out] expected_score Expected value for this attribute
*
@@ -57,19 +60,20 @@ compare_attribute(gconstpointer a, gconstpointer b)
* or degraded.
*/
static bool
-add_extra_info(const pe_node_t *node, GList *rsc_list, pe_working_set_t *data_set,
- const char *attrname, int *expected_score)
+add_extra_info(const pcmk_node_t *node, GList *rsc_list,
+ pcmk_scheduler_t *scheduler, const char *attrname,
+ int *expected_score)
{
GList *gIter = NULL;
for (gIter = rsc_list; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *rsc = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) gIter->data;
const char *type = g_hash_table_lookup(rsc->meta, "type");
const char *name = NULL;
GHashTable *params = NULL;
if (rsc->children != NULL) {
- if (add_extra_info(node, rsc->children, data_set, attrname,
+ if (add_extra_info(node, rsc->children, scheduler, attrname,
expected_score)) {
return true;
}
@@ -79,7 +83,7 @@ add_extra_info(const pe_node_t *node, GList *rsc_list, pe_working_set_t *data_se
continue;
}
- params = pe_rsc_params(rsc, node, data_set);
+ params = pe_rsc_params(rsc, node, scheduler);
name = g_hash_table_lookup(params, "name");
if (name == NULL) {
@@ -150,13 +154,15 @@ get_operation_list(xmlNode *rsc_entry) {
pcmk__scan_min_int(op_rc, &op_rc_i, 0);
/* Display 0-interval monitors as "probe" */
- if (pcmk__str_eq(task, CRMD_ACTION_STATUS, pcmk__str_casei)
+ if (pcmk__str_eq(task, PCMK_ACTION_MONITOR, pcmk__str_casei)
&& pcmk__str_eq(interval_ms_s, "0", pcmk__str_null_matches | pcmk__str_casei)) {
task = "probe";
}
/* Ignore notifies and some probes */
- if (pcmk__str_eq(task, CRMD_ACTION_NOTIFY, pcmk__str_casei) || (pcmk__str_eq(task, "probe", pcmk__str_casei) && (op_rc_i == 7))) {
+ if (pcmk__str_eq(task, PCMK_ACTION_NOTIFY, pcmk__str_none)
+ || (pcmk__str_eq(task, "probe", pcmk__str_none)
+ && (op_rc_i == CRM_EX_NOT_RUNNING))) {
continue;
}
@@ -188,10 +194,10 @@ append_dump_text(gpointer key, gpointer value, gpointer user_data)
}
static const char *
-get_cluster_stack(pe_working_set_t *data_set)
+get_cluster_stack(pcmk_scheduler_t *scheduler)
{
xmlNode *stack = get_xpath_object("//nvpair[@name='cluster-infrastructure']",
- data_set->input, LOG_DEBUG);
+ scheduler->input, LOG_DEBUG);
return stack? crm_element_value(stack, XML_NVPAIR_ATTR_VALUE) : "unknown";
}
@@ -290,7 +296,7 @@ op_history_string(xmlNode *xml_op, const char *task, const char *interval_ms_s,
}
static char *
-resource_history_string(pe_resource_t *rsc, const char *rsc_id, bool all,
+resource_history_string(pcmk_resource_t *rsc, const char *rsc_id, bool all,
int failcount, time_t last_failure) {
char *buf = NULL;
@@ -325,27 +331,39 @@ resource_history_string(pe_resource_t *rsc, const char *rsc_id, bool all,
return buf;
}
+/*!
+ * \internal
+ * \brief Get a node's feature set for status display purposes
+ *
+ * \param[in] node Node to check
+ *
+ * \return String representation of feature set if the node is fully up (using
+ * "<3.15.1" for older nodes that don't set the #feature-set attribute),
+ * otherwise NULL
+ */
static const char *
-get_node_feature_set(pe_node_t *node) {
- const char *feature_set = NULL;
+get_node_feature_set(const pcmk_node_t *node)
+{
+ if (node->details->online && node->details->expected_up
+ && !pe__is_guest_or_remote_node(node)) {
- if (node->details->online && !pe__is_guest_or_remote_node(node)) {
- feature_set = g_hash_table_lookup(node->details->attrs,
- CRM_ATTR_FEATURE_SET);
- /* The feature set attribute is present since 3.15.1. If it is missing
- * then the node must be running an earlier version. */
- if (feature_set == NULL) {
- feature_set = "<3.15.1";
- }
+ const char *feature_set = g_hash_table_lookup(node->details->attrs,
+ CRM_ATTR_FEATURE_SET);
+
+ /* The feature set attribute is present since 3.15.1. If it is missing,
+ * then the node must be running an earlier version.
+ */
+ return pcmk__s(feature_set, "<3.15.1");
}
- return feature_set;
+ return NULL;
}
static bool
-is_mixed_version(pe_working_set_t *data_set) {
+is_mixed_version(pcmk_scheduler_t *scheduler)
+{
const char *feature_set = NULL;
- for (GList *gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
- pe_node_t *node = gIter->data;
+ for (GList *gIter = scheduler->nodes; gIter != NULL; gIter = gIter->next) {
+ pcmk_node_t *node = gIter->data;
const char *node_feature_set = get_node_feature_set(node);
if (node_feature_set != NULL) {
if (feature_set == NULL) {
@@ -359,7 +377,7 @@ is_mixed_version(pe_working_set_t *data_set) {
}
static char *
-formatted_xml_buf(pe_resource_t *rsc, bool raw)
+formatted_xml_buf(const pcmk_resource_t *rsc, bool raw)
{
if (raw) {
return dump_xml_formatted(rsc->orig_xml ? rsc->orig_xml : rsc->xml);
@@ -368,18 +386,18 @@ formatted_xml_buf(pe_resource_t *rsc, bool raw)
}
}
-PCMK__OUTPUT_ARGS("cluster-summary", "pe_working_set_t *",
+PCMK__OUTPUT_ARGS("cluster-summary", "pcmk_scheduler_t *",
"enum pcmk_pacemakerd_state", "uint32_t", "uint32_t")
static int
cluster_summary(pcmk__output_t *out, va_list args) {
- pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+ pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *);
enum pcmk_pacemakerd_state pcmkd_state =
(enum pcmk_pacemakerd_state) va_arg(args, int);
uint32_t section_opts = va_arg(args, uint32_t);
uint32_t show_opts = va_arg(args, uint32_t);
int rc = pcmk_rc_no_output;
- const char *stack_s = get_cluster_stack(data_set);
+ const char *stack_s = get_cluster_stack(scheduler);
if (pcmk_is_set(section_opts, pcmk_section_stack)) {
PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary");
@@ -388,47 +406,52 @@ cluster_summary(pcmk__output_t *out, va_list args) {
if (pcmk_is_set(section_opts, pcmk_section_dc)) {
xmlNode *dc_version = get_xpath_object("//nvpair[@name='dc-version']",
- data_set->input, LOG_DEBUG);
+ scheduler->input, LOG_DEBUG);
const char *dc_version_s = dc_version?
crm_element_value(dc_version, XML_NVPAIR_ATTR_VALUE)
: NULL;
- const char *quorum = crm_element_value(data_set->input, XML_ATTR_HAVE_QUORUM);
- char *dc_name = data_set->dc_node ? pe__node_display_name(data_set->dc_node, pcmk_is_set(show_opts, pcmk_show_node_id)) : NULL;
- bool mixed_version = is_mixed_version(data_set);
+ const char *quorum = crm_element_value(scheduler->input,
+ XML_ATTR_HAVE_QUORUM);
+ char *dc_name = scheduler->dc_node? pe__node_display_name(scheduler->dc_node, pcmk_is_set(show_opts, pcmk_show_node_id)) : NULL;
+ bool mixed_version = is_mixed_version(scheduler);
PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary");
- out->message(out, "cluster-dc", data_set->dc_node, quorum,
+ out->message(out, "cluster-dc", scheduler->dc_node, quorum,
dc_version_s, dc_name, mixed_version);
free(dc_name);
}
if (pcmk_is_set(section_opts, pcmk_section_times)) {
- const char *last_written = crm_element_value(data_set->input, XML_CIB_ATTR_WRITTEN);
- const char *user = crm_element_value(data_set->input, XML_ATTR_UPDATE_USER);
- const char *client = crm_element_value(data_set->input, XML_ATTR_UPDATE_CLIENT);
- const char *origin = crm_element_value(data_set->input, XML_ATTR_UPDATE_ORIG);
+ const char *last_written = crm_element_value(scheduler->input,
+ XML_CIB_ATTR_WRITTEN);
+ const char *user = crm_element_value(scheduler->input,
+ XML_ATTR_UPDATE_USER);
+ const char *client = crm_element_value(scheduler->input,
+ XML_ATTR_UPDATE_CLIENT);
+ const char *origin = crm_element_value(scheduler->input,
+ XML_ATTR_UPDATE_ORIG);
PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary");
out->message(out, "cluster-times",
- data_set->localhost, last_written, user, client, origin);
+ scheduler->localhost, last_written, user, client, origin);
}
if (pcmk_is_set(section_opts, pcmk_section_counts)) {
PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary");
- out->message(out, "cluster-counts", g_list_length(data_set->nodes),
- data_set->ninstances, data_set->disabled_resources,
- data_set->blocked_resources);
+ out->message(out, "cluster-counts", g_list_length(scheduler->nodes),
+ scheduler->ninstances, scheduler->disabled_resources,
+ scheduler->blocked_resources);
}
if (pcmk_is_set(section_opts, pcmk_section_options)) {
PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary");
- out->message(out, "cluster-options", data_set);
+ out->message(out, "cluster-options", scheduler);
}
PCMK__OUTPUT_LIST_FOOTER(out, rc);
if (pcmk_is_set(section_opts, pcmk_section_maint_mode)) {
- if (out->message(out, "maint-mode", data_set->flags) == pcmk_rc_ok) {
+ if (out->message(out, "maint-mode", scheduler->flags) == pcmk_rc_ok) {
rc = pcmk_rc_ok;
}
}
@@ -436,18 +459,18 @@ cluster_summary(pcmk__output_t *out, va_list args) {
return rc;
}
-PCMK__OUTPUT_ARGS("cluster-summary", "pe_working_set_t *",
+PCMK__OUTPUT_ARGS("cluster-summary", "pcmk_scheduler_t *",
"enum pcmk_pacemakerd_state", "uint32_t", "uint32_t")
static int
cluster_summary_html(pcmk__output_t *out, va_list args) {
- pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+ pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *);
enum pcmk_pacemakerd_state pcmkd_state =
(enum pcmk_pacemakerd_state) va_arg(args, int);
uint32_t section_opts = va_arg(args, uint32_t);
uint32_t show_opts = va_arg(args, uint32_t);
int rc = pcmk_rc_no_output;
- const char *stack_s = get_cluster_stack(data_set);
+ const char *stack_s = get_cluster_stack(scheduler);
if (pcmk_is_set(section_opts, pcmk_section_stack)) {
PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary");
@@ -455,38 +478,44 @@ cluster_summary_html(pcmk__output_t *out, va_list args) {
}
/* Always print DC if none, even if not requested */
- if (data_set->dc_node == NULL || pcmk_is_set(section_opts, pcmk_section_dc)) {
+ if ((scheduler->dc_node == NULL)
+ || pcmk_is_set(section_opts, pcmk_section_dc)) {
xmlNode *dc_version = get_xpath_object("//nvpair[@name='dc-version']",
- data_set->input, LOG_DEBUG);
+ scheduler->input, LOG_DEBUG);
const char *dc_version_s = dc_version?
crm_element_value(dc_version, XML_NVPAIR_ATTR_VALUE)
: NULL;
- const char *quorum = crm_element_value(data_set->input, XML_ATTR_HAVE_QUORUM);
- char *dc_name = data_set->dc_node ? pe__node_display_name(data_set->dc_node, pcmk_is_set(show_opts, pcmk_show_node_id)) : NULL;
- bool mixed_version = is_mixed_version(data_set);
+ const char *quorum = crm_element_value(scheduler->input,
+ XML_ATTR_HAVE_QUORUM);
+ char *dc_name = scheduler->dc_node? pe__node_display_name(scheduler->dc_node, pcmk_is_set(show_opts, pcmk_show_node_id)) : NULL;
+ bool mixed_version = is_mixed_version(scheduler);
PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary");
- out->message(out, "cluster-dc", data_set->dc_node, quorum,
+ out->message(out, "cluster-dc", scheduler->dc_node, quorum,
dc_version_s, dc_name, mixed_version);
free(dc_name);
}
if (pcmk_is_set(section_opts, pcmk_section_times)) {
- const char *last_written = crm_element_value(data_set->input, XML_CIB_ATTR_WRITTEN);
- const char *user = crm_element_value(data_set->input, XML_ATTR_UPDATE_USER);
- const char *client = crm_element_value(data_set->input, XML_ATTR_UPDATE_CLIENT);
- const char *origin = crm_element_value(data_set->input, XML_ATTR_UPDATE_ORIG);
+ const char *last_written = crm_element_value(scheduler->input,
+ XML_CIB_ATTR_WRITTEN);
+ const char *user = crm_element_value(scheduler->input,
+ XML_ATTR_UPDATE_USER);
+ const char *client = crm_element_value(scheduler->input,
+ XML_ATTR_UPDATE_CLIENT);
+ const char *origin = crm_element_value(scheduler->input,
+ XML_ATTR_UPDATE_ORIG);
PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary");
out->message(out, "cluster-times",
- data_set->localhost, last_written, user, client, origin);
+ scheduler->localhost, last_written, user, client, origin);
}
if (pcmk_is_set(section_opts, pcmk_section_counts)) {
PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary");
- out->message(out, "cluster-counts", g_list_length(data_set->nodes),
- data_set->ninstances, data_set->disabled_resources,
- data_set->blocked_resources);
+ out->message(out, "cluster-counts", g_list_length(scheduler->nodes),
+ scheduler->ninstances, scheduler->disabled_resources,
+ scheduler->blocked_resources);
}
if (pcmk_is_set(section_opts, pcmk_section_options)) {
@@ -497,13 +526,13 @@ cluster_summary_html(pcmk__output_t *out, va_list args) {
PCMK__OUTPUT_LIST_FOOTER(out, rc);
out->begin_list(out, NULL, NULL, "Config Options");
- out->message(out, "cluster-options", data_set);
+ out->message(out, "cluster-options", scheduler);
}
PCMK__OUTPUT_LIST_FOOTER(out, rc);
if (pcmk_is_set(section_opts, pcmk_section_maint_mode)) {
- if (out->message(out, "maint-mode", data_set->flags) == pcmk_rc_ok) {
+ if (out->message(out, "maint-mode", scheduler->flags) == pcmk_rc_ok) {
rc = pcmk_rc_ok;
}
}
@@ -512,7 +541,7 @@ cluster_summary_html(pcmk__output_t *out, va_list args) {
}
char *
-pe__node_display_name(pe_node_t *node, bool print_detail)
+pe__node_display_name(pcmk_node_t *node, bool print_detail)
{
char *node_name;
const char *node_host = NULL;
@@ -523,8 +552,8 @@ pe__node_display_name(pe_node_t *node, bool print_detail)
/* Host is displayed only if this is a guest node and detail is requested */
if (print_detail && pe__is_guest_node(node)) {
- const pe_resource_t *container = node->details->remote_rsc->container;
- const pe_node_t *host_node = pe__current_node(container);
+ const pcmk_resource_t *container = node->details->remote_rsc->container;
+ const pcmk_node_t *host_node = pe__current_node(container);
if (host_node && host_node->details) {
node_host = host_node->details->uname;
@@ -575,9 +604,7 @@ pe__name_and_nvpairs_xml(pcmk__output_t *out, bool is_list, const char *tag_name
xml_node = pcmk__output_xml_peek_parent(out);
CRM_ASSERT(xml_node != NULL);
- xml_node = is_list
- ? create_xml_node(xml_node, tag_name)
- : xmlNewChild(xml_node, NULL, (pcmkXmlStr) tag_name, NULL);
+ xml_node = create_xml_node(xml_node, tag_name);
va_start(args, pairs_count);
while(pairs_count--) {
@@ -598,20 +625,20 @@ pe__name_and_nvpairs_xml(pcmk__output_t *out, bool is_list, const char *tag_name
static const char *
role_desc(enum rsc_role_e role)
{
- if (role == RSC_ROLE_PROMOTED) {
+ if (role == pcmk_role_promoted) {
#ifdef PCMK__COMPAT_2_0
- return "as " RSC_ROLE_PROMOTED_LEGACY_S " ";
+ return "as " PCMK__ROLE_PROMOTED_LEGACY " ";
#else
- return "in " RSC_ROLE_PROMOTED_S " role ";
+ return "in " PCMK__ROLE_PROMOTED " role ";
#endif
}
return "";
}
-PCMK__OUTPUT_ARGS("ban", "pe_node_t *", "pe__location_t *", "uint32_t")
+PCMK__OUTPUT_ARGS("ban", "pcmk_node_t *", "pe__location_t *", "uint32_t")
static int
ban_html(pcmk__output_t *out, va_list args) {
- pe_node_t *pe_node = va_arg(args, pe_node_t *);
+ pcmk_node_t *pe_node = va_arg(args, pcmk_node_t *);
pe__location_t *location = va_arg(args, pe__location_t *);
uint32_t show_opts = va_arg(args, uint32_t);
@@ -628,10 +655,10 @@ ban_html(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("ban", "pe_node_t *", "pe__location_t *", "uint32_t")
+PCMK__OUTPUT_ARGS("ban", "pcmk_node_t *", "pe__location_t *", "uint32_t")
static int
ban_text(pcmk__output_t *out, va_list args) {
- pe_node_t *pe_node = va_arg(args, pe_node_t *);
+ pcmk_node_t *pe_node = va_arg(args, pcmk_node_t *);
pe__location_t *location = va_arg(args, pe__location_t *);
uint32_t show_opts = va_arg(args, uint32_t);
@@ -645,14 +672,14 @@ ban_text(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("ban", "pe_node_t *", "pe__location_t *", "uint32_t")
+PCMK__OUTPUT_ARGS("ban", "pcmk_node_t *", "pe__location_t *", "uint32_t")
static int
ban_xml(pcmk__output_t *out, va_list args) {
- pe_node_t *pe_node = va_arg(args, pe_node_t *);
+ pcmk_node_t *pe_node = va_arg(args, pcmk_node_t *);
pe__location_t *location = va_arg(args, pe__location_t *);
uint32_t show_opts G_GNUC_UNUSED = va_arg(args, uint32_t);
- const char *promoted_only = pcmk__btoa(location->role_filter == RSC_ROLE_PROMOTED);
+ const char *promoted_only = pcmk__btoa(location->role_filter == pcmk_role_promoted);
char *weight_s = pcmk__itoa(pe_node->weight);
pcmk__output_create_xml_node(out, "ban",
@@ -674,11 +701,11 @@ ban_xml(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("ban-list", "pe_working_set_t *", "const char *", "GList *",
+PCMK__OUTPUT_ARGS("ban-list", "pcmk_scheduler_t *", "const char *", "GList *",
"uint32_t", "bool")
static int
ban_list(pcmk__output_t *out, va_list args) {
- pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+ pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *);
const char *prefix = va_arg(args, const char *);
GList *only_rsc = va_arg(args, GList *);
uint32_t show_opts = va_arg(args, uint32_t);
@@ -688,9 +715,10 @@ ban_list(pcmk__output_t *out, va_list args) {
int rc = pcmk_rc_no_output;
/* Print each ban */
- for (gIter = data_set->placement_constraints; gIter != NULL; gIter = gIter->next) {
+ for (gIter = scheduler->placement_constraints;
+ gIter != NULL; gIter = gIter->next) {
pe__location_t *location = gIter->data;
- const pe_resource_t *rsc = location->rsc_lh;
+ const pcmk_resource_t *rsc = location->rsc_lh;
if (prefix != NULL && !g_str_has_prefix(location->id, prefix)) {
continue;
@@ -704,7 +732,7 @@ ban_list(pcmk__output_t *out, va_list args) {
}
for (gIter2 = location->node_list_rh; gIter2 != NULL; gIter2 = gIter2->next) {
- pe_node_t *node = (pe_node_t *) gIter2->data;
+ pcmk_node_t *node = (pcmk_node_t *) gIter2->data;
if (node->weight < 0) {
PCMK__OUTPUT_LIST_HEADER(out, print_spacer, rc, "Negative Location Constraints");
@@ -843,11 +871,11 @@ cluster_counts_xml(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("cluster-dc", "pe_node_t *", "const char *", "const char *",
+PCMK__OUTPUT_ARGS("cluster-dc", "pcmk_node_t *", "const char *", "const char *",
"char *", "int")
static int
cluster_dc_html(pcmk__output_t *out, va_list args) {
- pe_node_t *dc = va_arg(args, pe_node_t *);
+ pcmk_node_t *dc = va_arg(args, pcmk_node_t *);
const char *quorum = va_arg(args, const char *);
const char *dc_version_s = va_arg(args, const char *);
char *dc_name = va_arg(args, char *);
@@ -881,11 +909,11 @@ cluster_dc_html(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("cluster-dc", "pe_node_t *", "const char *", "const char *",
+PCMK__OUTPUT_ARGS("cluster-dc", "pcmk_node_t *", "const char *", "const char *",
"char *", "int")
static int
cluster_dc_text(pcmk__output_t *out, va_list args) {
- pe_node_t *dc = va_arg(args, pe_node_t *);
+ pcmk_node_t *dc = va_arg(args, pcmk_node_t *);
const char *quorum = va_arg(args, const char *);
const char *dc_version_s = va_arg(args, const char *);
char *dc_name = va_arg(args, char *);
@@ -904,11 +932,11 @@ cluster_dc_text(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("cluster-dc", "pe_node_t *", "const char *", "const char *",
+PCMK__OUTPUT_ARGS("cluster-dc", "pcmk_node_t *", "const char *", "const char *",
"char *", "int")
static int
cluster_dc_xml(pcmk__output_t *out, va_list args) {
- pe_node_t *dc = va_arg(args, pe_node_t *);
+ pcmk_node_t *dc = va_arg(args, pcmk_node_t *);
const char *quorum = va_arg(args, const char *);
const char *dc_version_s = va_arg(args, const char *);
char *dc_name G_GNUC_UNUSED = va_arg(args, char *);
@@ -937,11 +965,11 @@ static int
cluster_maint_mode_text(pcmk__output_t *out, va_list args) {
unsigned long long flags = va_arg(args, unsigned long long);
- if (pcmk_is_set(flags, pe_flag_maintenance_mode)) {
+ if (pcmk_is_set(flags, pcmk_sched_in_maintenance)) {
pcmk__formatted_printf(out, "\n *** Resource management is DISABLED ***\n");
pcmk__formatted_printf(out, " The cluster will not attempt to start, stop or recover services\n");
return pcmk_rc_ok;
- } else if (pcmk_is_set(flags, pe_flag_stop_everything)) {
+ } else if (pcmk_is_set(flags, pcmk_sched_stop_all)) {
pcmk__formatted_printf(out, "\n *** Resource management is DISABLED ***\n");
pcmk__formatted_printf(out, " The cluster will keep all resources stopped\n");
return pcmk_rc_ok;
@@ -950,48 +978,54 @@ cluster_maint_mode_text(pcmk__output_t *out, va_list args) {
}
}
-PCMK__OUTPUT_ARGS("cluster-options", "pe_working_set_t *")
+PCMK__OUTPUT_ARGS("cluster-options", "pcmk_scheduler_t *")
static int
cluster_options_html(pcmk__output_t *out, va_list args) {
- pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+ pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *);
- out->list_item(out, NULL, "STONITH of failed nodes %s",
- pcmk_is_set(data_set->flags, pe_flag_stonith_enabled) ? "enabled" : "disabled");
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
+ out->list_item(out, NULL, "STONITH of failed nodes enabled");
+ } else {
+ out->list_item(out, NULL, "STONITH of failed nodes disabled");
+ }
- out->list_item(out, NULL, "Cluster is %s",
- pcmk_is_set(data_set->flags, pe_flag_symmetric_cluster) ? "symmetric" : "asymmetric");
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_symmetric_cluster)) {
+ out->list_item(out, NULL, "Cluster is symmetric");
+ } else {
+ out->list_item(out, NULL, "Cluster is asymmetric");
+ }
- switch (data_set->no_quorum_policy) {
- case no_quorum_freeze:
+ switch (scheduler->no_quorum_policy) {
+ case pcmk_no_quorum_freeze:
out->list_item(out, NULL, "No quorum policy: Freeze resources");
break;
- case no_quorum_stop:
+ case pcmk_no_quorum_stop:
out->list_item(out, NULL, "No quorum policy: Stop ALL resources");
break;
- case no_quorum_demote:
+ case pcmk_no_quorum_demote:
out->list_item(out, NULL, "No quorum policy: Demote promotable "
"resources and stop all other resources");
break;
- case no_quorum_ignore:
+ case pcmk_no_quorum_ignore:
out->list_item(out, NULL, "No quorum policy: Ignore");
break;
- case no_quorum_suicide:
+ case pcmk_no_quorum_fence:
out->list_item(out, NULL, "No quorum policy: Suicide");
break;
}
- if (pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)) {
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_in_maintenance)) {
xmlNodePtr node = pcmk__output_create_xml_node(out, "li", NULL);
pcmk_create_html_node(node, "span", NULL, NULL, "Resource management: ");
pcmk_create_html_node(node, "span", NULL, "bold", "DISABLED");
pcmk_create_html_node(node, "span", NULL, NULL,
" (the cluster will not attempt to start, stop, or recover services)");
- } else if (pcmk_is_set(data_set->flags, pe_flag_stop_everything)) {
+ } else if (pcmk_is_set(scheduler->flags, pcmk_sched_stop_all)) {
xmlNodePtr node = pcmk__output_create_xml_node(out, "li", NULL);
pcmk_create_html_node(node, "span", NULL, NULL, "Resource management: ");
@@ -1005,50 +1039,56 @@ cluster_options_html(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("cluster-options", "pe_working_set_t *")
+PCMK__OUTPUT_ARGS("cluster-options", "pcmk_scheduler_t *")
static int
cluster_options_log(pcmk__output_t *out, va_list args) {
- pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+ pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *);
- if (pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)) {
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_in_maintenance)) {
return out->info(out, "Resource management is DISABLED. The cluster will not attempt to start, stop or recover services.");
- } else if (pcmk_is_set(data_set->flags, pe_flag_stop_everything)) {
+ } else if (pcmk_is_set(scheduler->flags, pcmk_sched_stop_all)) {
return out->info(out, "Resource management is DISABLED. The cluster has stopped all resources.");
} else {
return pcmk_rc_no_output;
}
}
-PCMK__OUTPUT_ARGS("cluster-options", "pe_working_set_t *")
+PCMK__OUTPUT_ARGS("cluster-options", "pcmk_scheduler_t *")
static int
cluster_options_text(pcmk__output_t *out, va_list args) {
- pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+ pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *);
- out->list_item(out, NULL, "STONITH of failed nodes %s",
- pcmk_is_set(data_set->flags, pe_flag_stonith_enabled) ? "enabled" : "disabled");
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
+ out->list_item(out, NULL, "STONITH of failed nodes enabled");
+ } else {
+ out->list_item(out, NULL, "STONITH of failed nodes disabled");
+ }
- out->list_item(out, NULL, "Cluster is %s",
- pcmk_is_set(data_set->flags, pe_flag_symmetric_cluster) ? "symmetric" : "asymmetric");
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_symmetric_cluster)) {
+ out->list_item(out, NULL, "Cluster is symmetric");
+ } else {
+ out->list_item(out, NULL, "Cluster is asymmetric");
+ }
- switch (data_set->no_quorum_policy) {
- case no_quorum_freeze:
+ switch (scheduler->no_quorum_policy) {
+ case pcmk_no_quorum_freeze:
out->list_item(out, NULL, "No quorum policy: Freeze resources");
break;
- case no_quorum_stop:
+ case pcmk_no_quorum_stop:
out->list_item(out, NULL, "No quorum policy: Stop ALL resources");
break;
- case no_quorum_demote:
+ case pcmk_no_quorum_demote:
out->list_item(out, NULL, "No quorum policy: Demote promotable "
"resources and stop all other resources");
break;
- case no_quorum_ignore:
+ case pcmk_no_quorum_ignore:
out->list_item(out, NULL, "No quorum policy: Ignore");
break;
- case no_quorum_suicide:
+ case pcmk_no_quorum_fence:
out->list_item(out, NULL, "No quorum policy: Suicide");
break;
}
@@ -1056,43 +1096,48 @@ cluster_options_text(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("cluster-options", "pe_working_set_t *")
+#define bv(flag) pcmk__btoa(pcmk_is_set(scheduler->flags, (flag)))
+
+PCMK__OUTPUT_ARGS("cluster-options", "pcmk_scheduler_t *")
static int
cluster_options_xml(pcmk__output_t *out, va_list args) {
- pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+ pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *);
const char *no_quorum_policy = NULL;
- char *stonith_timeout_str = pcmk__itoa(data_set->stonith_timeout);
- char *priority_fencing_delay_str = pcmk__itoa(data_set->priority_fencing_delay * 1000);
+ char *stonith_timeout_str = pcmk__itoa(scheduler->stonith_timeout);
+ char *priority_fencing_delay_str = pcmk__itoa(scheduler->priority_fencing_delay * 1000);
- switch (data_set->no_quorum_policy) {
- case no_quorum_freeze:
+ switch (scheduler->no_quorum_policy) {
+ case pcmk_no_quorum_freeze:
no_quorum_policy = "freeze";
break;
- case no_quorum_stop:
+ case pcmk_no_quorum_stop:
no_quorum_policy = "stop";
break;
- case no_quorum_demote:
+ case pcmk_no_quorum_demote:
no_quorum_policy = "demote";
break;
- case no_quorum_ignore:
+ case pcmk_no_quorum_ignore:
no_quorum_policy = "ignore";
break;
- case no_quorum_suicide:
+ case pcmk_no_quorum_fence:
no_quorum_policy = "suicide";
break;
}
pcmk__output_create_xml_node(out, "cluster_options",
- "stonith-enabled", pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)),
- "symmetric-cluster", pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_symmetric_cluster)),
+ "stonith-enabled",
+ bv(pcmk_sched_fencing_enabled),
+ "symmetric-cluster",
+ bv(pcmk_sched_symmetric_cluster),
"no-quorum-policy", no_quorum_policy,
- "maintenance-mode", pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)),
- "stop-all-resources", pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_stop_everything)),
+ "maintenance-mode",
+ bv(pcmk_sched_in_maintenance),
+ "stop-all-resources", bv(pcmk_sched_stop_all),
"stonith-timeout-ms", stonith_timeout_str,
"priority-fencing-delay-ms", priority_fencing_delay_str,
NULL);
@@ -1288,8 +1333,8 @@ failed_action_friendly(pcmk__output_t *out, const xmlNode *xml_op,
pcmk__g_strcat(str, pcmk__readable_interval(interval_ms), "-interval ",
NULL);
}
- pcmk__g_strcat(str, crm_action_str(task, interval_ms), " on ", node_name,
- NULL);
+ pcmk__g_strcat(str, pcmk__readable_action(task, interval_ms), " on ",
+ node_name, NULL);
if (status == PCMK_EXEC_DONE) {
pcmk__g_strcat(str, " returned '", services_ocf_exitcode_str(rc), "'",
@@ -1496,11 +1541,11 @@ failed_action_xml(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("failed-action-list", "pe_working_set_t *", "GList *",
+PCMK__OUTPUT_ARGS("failed-action-list", "pcmk_scheduler_t *", "GList *",
"GList *", "uint32_t", "bool")
static int
failed_action_list(pcmk__output_t *out, va_list args) {
- pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+ pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *);
GList *only_node = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
uint32_t show_opts = va_arg(args, uint32_t);
@@ -1509,11 +1554,11 @@ failed_action_list(pcmk__output_t *out, va_list args) {
xmlNode *xml_op = NULL;
int rc = pcmk_rc_no_output;
- if (xmlChildElementCount(data_set->failed) == 0) {
+ if (xmlChildElementCount(scheduler->failed) == 0) {
return rc;
}
- for (xml_op = pcmk__xml_first_child(data_set->failed); xml_op != NULL;
+ for (xml_op = pcmk__xml_first_child(scheduler->failed); xml_op != NULL;
xml_op = pcmk__xml_next(xml_op)) {
char *rsc = NULL;
@@ -1546,7 +1591,7 @@ failed_action_list(pcmk__output_t *out, va_list args) {
}
static void
-status_node(pe_node_t *node, xmlNodePtr parent, uint32_t show_opts)
+status_node(pcmk_node_t *node, xmlNodePtr parent, uint32_t show_opts)
{
int health = pe__node_health(node);
@@ -1598,11 +1643,11 @@ status_node(pe_node_t *node, xmlNodePtr parent, uint32_t show_opts)
}
}
-PCMK__OUTPUT_ARGS("node", "pe_node_t *", "uint32_t", "bool",
+PCMK__OUTPUT_ARGS("node", "pcmk_node_t *", "uint32_t", "bool",
"GList *", "GList *")
static int
node_html(pcmk__output_t *out, va_list args) {
- pe_node_t *node = va_arg(args, pe_node_t *);
+ pcmk_node_t *node = va_arg(args, pcmk_node_t *);
uint32_t show_opts = va_arg(args, uint32_t);
bool full = va_arg(args, int);
GList *only_node = va_arg(args, GList *);
@@ -1641,7 +1686,7 @@ node_html(pcmk__output_t *out, va_list args) {
status_node(node, item_node, show_opts);
for (lpc2 = node->details->running_rsc; lpc2 != NULL; lpc2 = lpc2->next) {
- pe_resource_t *rsc = (pe_resource_t *) lpc2->data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) lpc2->data;
PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Resources");
show_opts |= pcmk_show_rsc_only;
@@ -1679,7 +1724,7 @@ node_html(pcmk__output_t *out, va_list args) {
* \return String representation of node's status
*/
static const char *
-node_text_status(const pe_node_t *node)
+node_text_status(const pcmk_node_t *node)
{
if (node->details->unclean) {
if (node->details->online) {
@@ -1723,10 +1768,11 @@ node_text_status(const pe_node_t *node)
return "OFFLINE";
}
-PCMK__OUTPUT_ARGS("node", "pe_node_t *", "uint32_t", "bool", "GList *", "GList *")
+PCMK__OUTPUT_ARGS("node", "pcmk_node_t *", "uint32_t", "bool", "GList *",
+ "GList *")
static int
node_text(pcmk__output_t *out, va_list args) {
- pe_node_t *node = va_arg(args, pe_node_t *);
+ pcmk_node_t *node = va_arg(args, pcmk_node_t *);
uint32_t show_opts = va_arg(args, uint32_t);
bool full = va_arg(args, int);
GList *only_node = va_arg(args, GList *);
@@ -1784,7 +1830,7 @@ node_text(pcmk__output_t *out, va_list args) {
out->begin_list(out, NULL, NULL, "Resources");
for (gIter2 = node->details->running_rsc; gIter2 != NULL; gIter2 = gIter2->next) {
- pe_resource_t *rsc = (pe_resource_t *) gIter2->data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) gIter2->data;
show_opts |= pcmk_show_rsc_only;
out->message(out, crm_map_element_name(rsc->xml), show_opts,
@@ -1809,10 +1855,11 @@ node_text(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("node", "pe_node_t *", "uint32_t", "bool", "GList *", "GList *")
+PCMK__OUTPUT_ARGS("node", "pcmk_node_t *", "uint32_t", "bool", "GList *",
+ "GList *")
static int
node_xml(pcmk__output_t *out, va_list args) {
- pe_node_t *node = va_arg(args, pe_node_t *);
+ pcmk_node_t *node = va_arg(args, pcmk_node_t *);
uint32_t show_opts G_GNUC_UNUSED = va_arg(args, uint32_t);
bool full = va_arg(args, int);
GList *only_node = va_arg(args, GList *);
@@ -1826,10 +1873,10 @@ node_xml(pcmk__output_t *out, va_list args) {
const char *feature_set;
switch (node->details->type) {
- case node_member:
+ case pcmk_node_variant_cluster:
node_type = "member";
break;
- case node_remote:
+ case pcmk_node_variant_remote:
node_type = "remote";
break;
case node_ping:
@@ -1873,7 +1920,7 @@ node_xml(pcmk__output_t *out, va_list args) {
GList *lpc = NULL;
for (lpc = node->details->running_rsc; lpc != NULL; lpc = lpc->next) {
- pe_resource_t *rsc = (pe_resource_t *) lpc->data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) lpc->data;
show_opts |= pcmk_show_rsc_only;
out->message(out, crm_map_element_name(rsc->xml), show_opts,
@@ -1959,13 +2006,13 @@ node_attribute_html(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("node-and-op", "pe_working_set_t *", "xmlNodePtr")
+PCMK__OUTPUT_ARGS("node-and-op", "pcmk_scheduler_t *", "xmlNodePtr")
static int
node_and_op(pcmk__output_t *out, va_list args) {
- pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+ pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *);
xmlNodePtr xml_op = va_arg(args, xmlNodePtr);
- pe_resource_t *rsc = NULL;
+ pcmk_resource_t *rsc = NULL;
gchar *node_str = NULL;
char *last_change_str = NULL;
@@ -1976,10 +2023,10 @@ node_and_op(pcmk__output_t *out, va_list args) {
pcmk__scan_min_int(crm_element_value(xml_op, XML_LRM_ATTR_OPSTATUS),
&status, PCMK_EXEC_UNKNOWN);
- rsc = pe_find_resource(data_set->resources, op_rsc);
+ rsc = pe_find_resource(scheduler->resources, op_rsc);
if (rsc) {
- const pe_node_t *node = pe__current_node(rsc);
+ const pcmk_node_t *node = pe__current_node(rsc);
const char *target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
uint32_t show_opts = pcmk_show_rsc_only | pcmk_show_pending;
@@ -2014,13 +2061,13 @@ node_and_op(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("node-and-op", "pe_working_set_t *", "xmlNodePtr")
+PCMK__OUTPUT_ARGS("node-and-op", "pcmk_scheduler_t *", "xmlNodePtr")
static int
node_and_op_xml(pcmk__output_t *out, va_list args) {
- pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+ pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *);
xmlNodePtr xml_op = va_arg(args, xmlNodePtr);
- pe_resource_t *rsc = NULL;
+ pcmk_resource_t *rsc = NULL;
const char *op_rsc = crm_element_value(xml_op, "resource");
int status;
time_t last_change = 0;
@@ -2036,7 +2083,7 @@ node_and_op_xml(pcmk__output_t *out, va_list args) {
"status", pcmk_exec_status_str(status),
NULL);
- rsc = pe_find_resource(data_set->resources, op_rsc);
+ rsc = pe_find_resource(scheduler->resources, op_rsc);
if (rsc) {
const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
@@ -2086,11 +2133,11 @@ node_attribute_xml(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("node-attribute-list", "pe_working_set_t *", "uint32_t",
+PCMK__OUTPUT_ARGS("node-attribute-list", "pcmk_scheduler_t *", "uint32_t",
"bool", "GList *", "GList *")
static int
node_attribute_list(pcmk__output_t *out, va_list args) {
- pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+ pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *);
uint32_t show_opts = va_arg(args, uint32_t);
bool print_spacer = va_arg(args, int);
GList *only_node = va_arg(args, GList *);
@@ -2099,8 +2146,8 @@ node_attribute_list(pcmk__output_t *out, va_list args) {
int rc = pcmk_rc_no_output;
/* Display each node's attributes */
- for (GList *gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
- pe_node_t *node = gIter->data;
+ for (GList *gIter = scheduler->nodes; gIter != NULL; gIter = gIter->next) {
+ pcmk_node_t *node = gIter->data;
GList *attr_list = NULL;
GHashTableIter iter;
@@ -2137,7 +2184,7 @@ node_attribute_list(pcmk__output_t *out, va_list args) {
value = pe_node_attribute_raw(node, name);
add_extra = add_extra_info(node, node->details->running_rsc,
- data_set, name, &expected_score);
+ scheduler, name, &expected_score);
/* Print attribute name and value */
out->message(out, "node-attribute", name, value, add_extra,
@@ -2152,11 +2199,11 @@ node_attribute_list(pcmk__output_t *out, va_list args) {
return rc;
}
-PCMK__OUTPUT_ARGS("node-capacity", "const pe_node_t *", "const char *")
+PCMK__OUTPUT_ARGS("node-capacity", "const pcmk_node_t *", "const char *")
static int
node_capacity(pcmk__output_t *out, va_list args)
{
- const pe_node_t *node = va_arg(args, pe_node_t *);
+ const pcmk_node_t *node = va_arg(args, pcmk_node_t *);
const char *comment = va_arg(args, const char *);
char *dump_text = crm_strdup_printf("%s: %s capacity:",
@@ -2169,11 +2216,11 @@ node_capacity(pcmk__output_t *out, va_list args)
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("node-capacity", "const pe_node_t *", "const char *")
+PCMK__OUTPUT_ARGS("node-capacity", "const pcmk_node_t *", "const char *")
static int
node_capacity_xml(pcmk__output_t *out, va_list args)
{
- const pe_node_t *node = va_arg(args, pe_node_t *);
+ const pcmk_node_t *node = va_arg(args, pcmk_node_t *);
const char *comment = va_arg(args, const char *);
xmlNodePtr xml_node = pcmk__output_create_xml_node(out, "capacity",
@@ -2185,12 +2232,12 @@ node_capacity_xml(pcmk__output_t *out, va_list args)
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("node-history-list", "pe_working_set_t *", "pe_node_t *", "xmlNodePtr",
- "GList *", "GList *", "uint32_t", "uint32_t")
+PCMK__OUTPUT_ARGS("node-history-list", "pcmk_scheduler_t *", "pcmk_node_t *",
+ "xmlNodePtr", "GList *", "GList *", "uint32_t", "uint32_t")
static int
node_history_list(pcmk__output_t *out, va_list args) {
- pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
- pe_node_t *node = va_arg(args, pe_node_t *);
+ pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *);
+ pcmk_node_t *node = va_arg(args, pcmk_node_t *);
xmlNode *node_state = va_arg(args, xmlNode *);
GList *only_node = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
@@ -2208,8 +2255,8 @@ node_history_list(pcmk__output_t *out, va_list args) {
for (rsc_entry = first_named_child(lrm_rsc, XML_LRM_TAG_RESOURCE);
rsc_entry != NULL; rsc_entry = crm_next_same_xml(rsc_entry)) {
const char *rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID);
- pe_resource_t *rsc = pe_find_resource(data_set->resources, rsc_id);
- const pe_resource_t *parent = pe__const_top_resource(rsc, false);
+ pcmk_resource_t *rsc = pe_find_resource(scheduler->resources, rsc_id);
+ const pcmk_resource_t *parent = pe__const_top_resource(rsc, false);
/* We can't use is_filtered here to filter group resources. For is_filtered,
* we have to decide whether to check the parent or not. If we check the
@@ -2219,7 +2266,7 @@ node_history_list(pcmk__output_t *out, va_list args) {
*
* For other resource types, is_filtered is okay.
*/
- if (parent->variant == pe_group) {
+ if (parent->variant == pcmk_rsc_variant_group) {
if (!pcmk__str_in_list(rsc_printable_id(rsc), only_rsc,
pcmk__str_star_matches)
&& !pcmk__str_in_list(rsc_printable_id(parent), only_rsc,
@@ -2234,8 +2281,8 @@ node_history_list(pcmk__output_t *out, va_list args) {
if (!pcmk_is_set(section_opts, pcmk_section_operations)) {
time_t last_failure = 0;
- int failcount = pe_get_failcount(node, rsc, &last_failure, pe_fc_default,
- NULL);
+ int failcount = pe_get_failcount(node, rsc, &last_failure,
+ pcmk__fc_default, NULL);
if (failcount <= 0) {
continue;
@@ -2251,7 +2298,7 @@ node_history_list(pcmk__output_t *out, va_list args) {
failcount, last_failure, false);
} else {
GList *op_list = get_operation_list(rsc_entry);
- pe_resource_t *rsc = pe_find_resource(data_set->resources,
+ pcmk_resource_t *rsc = pe_find_resource(scheduler->resources,
crm_element_value(rsc_entry, XML_ATTR_ID));
if (op_list == NULL) {
@@ -2264,7 +2311,7 @@ node_history_list(pcmk__output_t *out, va_list args) {
only_rsc);
}
- out->message(out, "resource-operation-list", data_set, rsc, node,
+ out->message(out, "resource-operation-list", scheduler, rsc, node,
op_list, show_opts);
}
}
@@ -2285,7 +2332,7 @@ node_list_html(pcmk__output_t *out, va_list args) {
int rc = pcmk_rc_no_output;
for (GList *gIter = nodes; gIter != NULL; gIter = gIter->next) {
- pe_node_t *node = (pe_node_t *) gIter->data;
+ pcmk_node_t *node = (pcmk_node_t *) gIter->data;
if (!pcmk__str_in_list(node->details->uname, only_node,
pcmk__str_star_matches|pcmk__str_casei)) {
@@ -2320,7 +2367,7 @@ node_list_text(pcmk__output_t *out, va_list args) {
int rc = pcmk_rc_no_output;
for (GList *gIter = nodes; gIter != NULL; gIter = gIter->next) {
- pe_node_t *node = (pe_node_t *) gIter->data;
+ pcmk_node_t *node = (pcmk_node_t *) gIter->data;
char *node_name = pe__node_display_name(node, pcmk_is_set(show_opts, pcmk_show_node_id));
if (!pcmk__str_in_list(node->details->uname, only_node,
@@ -2416,7 +2463,7 @@ node_list_xml(pcmk__output_t *out, va_list args) {
out->begin_list(out, NULL, NULL, "nodes");
for (GList *gIter = nodes; gIter != NULL; gIter = gIter->next) {
- pe_node_t *node = (pe_node_t *) gIter->data;
+ pcmk_node_t *node = (pcmk_node_t *) gIter->data;
if (!pcmk__str_in_list(node->details->uname, only_node,
pcmk__str_star_matches|pcmk__str_casei)) {
@@ -2430,11 +2477,11 @@ node_list_xml(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("node-summary", "pe_working_set_t *", "GList *", "GList *",
+PCMK__OUTPUT_ARGS("node-summary", "pcmk_scheduler_t *", "GList *", "GList *",
"uint32_t", "uint32_t", "bool")
static int
node_summary(pcmk__output_t *out, va_list args) {
- pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+ pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *);
GList *only_node = va_arg(args, GList *);
GList *only_rsc = va_arg(args, GList *);
uint32_t section_opts = va_arg(args, uint32_t);
@@ -2442,7 +2489,7 @@ node_summary(pcmk__output_t *out, va_list args) {
bool print_spacer = va_arg(args, int);
xmlNode *node_state = NULL;
- xmlNode *cib_status = pcmk_find_cib_element(data_set->input,
+ xmlNode *cib_status = pcmk_find_cib_element(scheduler->input,
XML_CIB_TAG_STATUS);
int rc = pcmk_rc_no_output;
@@ -2452,7 +2499,7 @@ node_summary(pcmk__output_t *out, va_list args) {
for (node_state = first_named_child(cib_status, XML_CIB_TAG_STATE);
node_state != NULL; node_state = crm_next_same_xml(node_state)) {
- pe_node_t *node = pe_find_node_id(data_set->nodes, ID(node_state));
+ pcmk_node_t *node = pe_find_node_id(scheduler->nodes, ID(node_state));
if (!node || !node->details || !node->details->online) {
continue;
@@ -2466,7 +2513,7 @@ node_summary(pcmk__output_t *out, va_list args) {
PCMK__OUTPUT_LIST_HEADER(out, print_spacer, rc,
pcmk_is_set(section_opts, pcmk_section_operations) ? "Operations" : "Migration Summary");
- out->message(out, "node-history-list", data_set, node, node_state,
+ out->message(out, "node-history-list", scheduler, node, node_state,
only_node, only_rsc, section_opts, show_opts);
}
@@ -2474,12 +2521,12 @@ node_summary(pcmk__output_t *out, va_list args) {
return rc;
}
-PCMK__OUTPUT_ARGS("node-weight", "const pe_resource_t *", "const char *",
+PCMK__OUTPUT_ARGS("node-weight", "const pcmk_resource_t *", "const char *",
"const char *", "const char *")
static int
node_weight(pcmk__output_t *out, va_list args)
{
- const pe_resource_t *rsc = va_arg(args, const pe_resource_t *);
+ const pcmk_resource_t *rsc = va_arg(args, const pcmk_resource_t *);
const char *prefix = va_arg(args, const char *);
const char *uname = va_arg(args, const char *);
const char *score = va_arg(args, const char *);
@@ -2494,12 +2541,12 @@ node_weight(pcmk__output_t *out, va_list args)
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("node-weight", "const pe_resource_t *", "const char *",
+PCMK__OUTPUT_ARGS("node-weight", "const pcmk_resource_t *", "const char *",
"const char *", "const char *")
static int
node_weight_xml(pcmk__output_t *out, va_list args)
{
- const pe_resource_t *rsc = va_arg(args, const pe_resource_t *);
+ const pcmk_resource_t *rsc = va_arg(args, const pcmk_resource_t *);
const char *prefix = va_arg(args, const char *);
const char *uname = va_arg(args, const char *);
const char *score = va_arg(args, const char *);
@@ -2587,12 +2634,13 @@ op_history_xml(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("promotion-score", "pe_resource_t *", "pe_node_t *", "const char *")
+PCMK__OUTPUT_ARGS("promotion-score", "pcmk_resource_t *", "pcmk_node_t *",
+ "const char *")
static int
promotion_score(pcmk__output_t *out, va_list args)
{
- pe_resource_t *child_rsc = va_arg(args, pe_resource_t *);
- pe_node_t *chosen = va_arg(args, pe_node_t *);
+ pcmk_resource_t *child_rsc = va_arg(args, pcmk_resource_t *);
+ pcmk_node_t *chosen = va_arg(args, pcmk_node_t *);
const char *score = va_arg(args, const char *);
out->list_item(out, NULL, "%s promotion score on %s: %s",
@@ -2602,12 +2650,13 @@ promotion_score(pcmk__output_t *out, va_list args)
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("promotion-score", "pe_resource_t *", "pe_node_t *", "const char *")
+PCMK__OUTPUT_ARGS("promotion-score", "pcmk_resource_t *", "pcmk_node_t *",
+ "const char *")
static int
promotion_score_xml(pcmk__output_t *out, va_list args)
{
- pe_resource_t *child_rsc = va_arg(args, pe_resource_t *);
- pe_node_t *chosen = va_arg(args, pe_node_t *);
+ pcmk_resource_t *child_rsc = va_arg(args, pcmk_resource_t *);
+ pcmk_node_t *chosen = va_arg(args, pcmk_node_t *);
const char *score = va_arg(args, const char *);
xmlNodePtr node = pcmk__output_create_xml_node(out, "promotion_score",
@@ -2622,10 +2671,10 @@ promotion_score_xml(pcmk__output_t *out, va_list args)
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("resource-config", "pe_resource_t *", "bool")
+PCMK__OUTPUT_ARGS("resource-config", "const pcmk_resource_t *", "bool")
static int
resource_config(pcmk__output_t *out, va_list args) {
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ const pcmk_resource_t *rsc = va_arg(args, const pcmk_resource_t *);
bool raw = va_arg(args, int);
char *rsc_xml = formatted_xml_buf(rsc, raw);
@@ -2636,10 +2685,10 @@ resource_config(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("resource-config", "pe_resource_t *", "bool")
+PCMK__OUTPUT_ARGS("resource-config", "const pcmk_resource_t *", "bool")
static int
resource_config_text(pcmk__output_t *out, va_list args) {
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ const pcmk_resource_t *rsc = va_arg(args, const pcmk_resource_t *);
bool raw = va_arg(args, int);
char *rsc_xml = formatted_xml_buf(rsc, raw);
@@ -2651,10 +2700,11 @@ resource_config_text(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("resource-history", "pe_resource_t *", "const char *", "bool", "int", "time_t", "bool")
+PCMK__OUTPUT_ARGS("resource-history", "pcmk_resource_t *", "const char *",
+ "bool", "int", "time_t", "bool")
static int
resource_history_text(pcmk__output_t *out, va_list args) {
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
const char *rsc_id = va_arg(args, const char *);
bool all = va_arg(args, int);
int failcount = va_arg(args, int);
@@ -2673,10 +2723,11 @@ resource_history_text(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("resource-history", "pe_resource_t *", "const char *", "bool", "int", "time_t", "bool")
+PCMK__OUTPUT_ARGS("resource-history", "pcmk_resource_t *", "const char *",
+ "bool", "int", "time_t", "bool")
static int
resource_history_xml(pcmk__output_t *out, va_list args) {
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
const char *rsc_id = va_arg(args, const char *);
bool all = va_arg(args, int);
int failcount = va_arg(args, int);
@@ -2733,12 +2784,12 @@ print_resource_header(pcmk__output_t *out, uint32_t show_opts)
}
-PCMK__OUTPUT_ARGS("resource-list", "pe_working_set_t *", "uint32_t", "bool",
+PCMK__OUTPUT_ARGS("resource-list", "pcmk_scheduler_t *", "uint32_t", "bool",
"GList *", "GList *", "bool")
static int
resource_list(pcmk__output_t *out, va_list args)
{
- pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+ pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *);
uint32_t show_opts = va_arg(args, uint32_t);
bool print_summary = va_arg(args, int);
GList *only_node = va_arg(args, GList *);
@@ -2759,8 +2810,9 @@ resource_list(pcmk__output_t *out, va_list args)
/* If we haven't already printed resources grouped by node,
* and brief output was requested, print resource summary */
- if (pcmk_is_set(show_opts, pcmk_show_brief) && !pcmk_is_set(show_opts, pcmk_show_rscs_by_node)) {
- GList *rscs = pe__filter_rsc_list(data_set->resources, only_rsc);
+ if (pcmk_is_set(show_opts, pcmk_show_brief)
+ && !pcmk_is_set(show_opts, pcmk_show_rscs_by_node)) {
+ GList *rscs = pe__filter_rsc_list(scheduler->resources, only_rsc);
PCMK__OUTPUT_SPACER_IF(out, print_spacer);
print_resource_header(out, show_opts);
@@ -2771,8 +2823,8 @@ resource_list(pcmk__output_t *out, va_list args)
}
/* For each resource, display it if appropriate */
- for (rsc_iter = data_set->resources; rsc_iter != NULL; rsc_iter = rsc_iter->next) {
- pe_resource_t *rsc = (pe_resource_t *) rsc_iter->data;
+ for (rsc_iter = scheduler->resources; rsc_iter != NULL; rsc_iter = rsc_iter->next) {
+ pcmk_resource_t *rsc = (pcmk_resource_t *) rsc_iter->data;
int x;
/* Complex resources may have some sub-resources active and some inactive */
@@ -2780,7 +2832,7 @@ resource_list(pcmk__output_t *out, va_list args)
gboolean partially_active = rsc->fns->active(rsc, FALSE);
/* Skip inactive orphans (deleted but still in CIB) */
- if (pcmk_is_set(rsc->flags, pe_rsc_orphan) && !is_active) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_removed) && !is_active) {
continue;
/* Skip active resources if we already displayed them by node */
@@ -2790,7 +2842,8 @@ resource_list(pcmk__output_t *out, va_list args)
}
/* Skip primitives already counted in a brief summary */
- } else if (pcmk_is_set(show_opts, pcmk_show_brief) && (rsc->variant == pe_native)) {
+ } else if (pcmk_is_set(show_opts, pcmk_show_brief)
+ && (rsc->variant == pcmk_rsc_variant_primitive)) {
continue;
/* Skip resources that aren't at least partially active,
@@ -2840,14 +2893,15 @@ resource_list(pcmk__output_t *out, va_list args)
return rc;
}
-PCMK__OUTPUT_ARGS("resource-operation-list", "pe_working_set_t *", "pe_resource_t *",
- "pe_node_t *", "GList *", "uint32_t")
+PCMK__OUTPUT_ARGS("resource-operation-list", "pcmk_scheduler_t *",
+ "pcmk_resource_t *", "pcmk_node_t *", "GList *", "uint32_t")
static int
resource_operation_list(pcmk__output_t *out, va_list args)
{
- pe_working_set_t *data_set G_GNUC_UNUSED = va_arg(args, pe_working_set_t *);
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
- pe_node_t *node = va_arg(args, pe_node_t *);
+ pcmk_scheduler_t *scheduler G_GNUC_UNUSED = va_arg(args,
+ pcmk_scheduler_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
+ pcmk_node_t *node = va_arg(args, pcmk_node_t *);
GList *op_list = va_arg(args, GList *);
uint32_t show_opts = va_arg(args, uint32_t);
@@ -2866,7 +2920,7 @@ resource_operation_list(pcmk__output_t *out, va_list args)
pcmk__scan_min_int(op_rc, &op_rc_i, 0);
/* Display 0-interval monitors as "probe" */
- if (pcmk__str_eq(task, CRMD_ACTION_STATUS, pcmk__str_casei)
+ if (pcmk__str_eq(task, PCMK_ACTION_MONITOR, pcmk__str_casei)
&& pcmk__str_eq(interval_ms_s, "0", pcmk__str_null_matches | pcmk__str_casei)) {
task = "probe";
}
@@ -2874,8 +2928,8 @@ resource_operation_list(pcmk__output_t *out, va_list args)
/* If this is the first printed operation, print heading for resource */
if (rc == pcmk_rc_no_output) {
time_t last_failure = 0;
- int failcount = pe_get_failcount(node, rsc, &last_failure, pe_fc_default,
- NULL);
+ int failcount = pe_get_failcount(node, rsc, &last_failure,
+ pcmk__fc_default, NULL);
out->message(out, "resource-history", rsc, rsc_printable_id(rsc), true,
failcount, last_failure, true);
@@ -2894,12 +2948,13 @@ resource_operation_list(pcmk__output_t *out, va_list args)
return rc;
}
-PCMK__OUTPUT_ARGS("resource-util", "pe_resource_t *", "pe_node_t *", "const char *")
+PCMK__OUTPUT_ARGS("resource-util", "pcmk_resource_t *", "pcmk_node_t *",
+ "const char *")
static int
resource_util(pcmk__output_t *out, va_list args)
{
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
- pe_node_t *node = va_arg(args, pe_node_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
+ pcmk_node_t *node = va_arg(args, pcmk_node_t *);
const char *fn = va_arg(args, const char *);
char *dump_text = crm_strdup_printf("%s: %s utilization on %s:",
@@ -2912,12 +2967,13 @@ resource_util(pcmk__output_t *out, va_list args)
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("resource-util", "pe_resource_t *", "pe_node_t *", "const char *")
+PCMK__OUTPUT_ARGS("resource-util", "pcmk_resource_t *", "pcmk_node_t *",
+ "const char *")
static int
resource_util_xml(pcmk__output_t *out, va_list args)
{
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
- pe_node_t *node = va_arg(args, pe_node_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
+ pcmk_node_t *node = va_arg(args, pcmk_node_t *);
const char *fn = va_arg(args, const char *);
xmlNodePtr xml_node = pcmk__output_create_xml_node(out, "utilization",
@@ -2930,10 +2986,10 @@ resource_util_xml(pcmk__output_t *out, va_list args)
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("ticket", "pe_ticket_t *")
+PCMK__OUTPUT_ARGS("ticket", "pcmk_ticket_t *")
static int
ticket_html(pcmk__output_t *out, va_list args) {
- pe_ticket_t *ticket = va_arg(args, pe_ticket_t *);
+ pcmk_ticket_t *ticket = va_arg(args, pcmk_ticket_t *);
if (ticket->last_granted > -1) {
char *epoch_str = pcmk__epoch2str(&(ticket->last_granted), 0);
@@ -2952,10 +3008,10 @@ ticket_html(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("ticket", "pe_ticket_t *")
+PCMK__OUTPUT_ARGS("ticket", "pcmk_ticket_t *")
static int
ticket_text(pcmk__output_t *out, va_list args) {
- pe_ticket_t *ticket = va_arg(args, pe_ticket_t *);
+ pcmk_ticket_t *ticket = va_arg(args, pcmk_ticket_t *);
if (ticket->last_granted > -1) {
char *epoch_str = pcmk__epoch2str(&(ticket->last_granted), 0);
@@ -2974,10 +3030,10 @@ ticket_text(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("ticket", "pe_ticket_t *")
+PCMK__OUTPUT_ARGS("ticket", "pcmk_ticket_t *")
static int
ticket_xml(pcmk__output_t *out, va_list args) {
- pe_ticket_t *ticket = va_arg(args, pe_ticket_t *);
+ pcmk_ticket_t *ticket = va_arg(args, pcmk_ticket_t *);
xmlNodePtr node = NULL;
@@ -2997,16 +3053,16 @@ ticket_xml(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("ticket-list", "pe_working_set_t *", "bool")
+PCMK__OUTPUT_ARGS("ticket-list", "pcmk_scheduler_t *", "bool")
static int
ticket_list(pcmk__output_t *out, va_list args) {
- pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
+ pcmk_scheduler_t *scheduler = va_arg(args, pcmk_scheduler_t *);
bool print_spacer = va_arg(args, int);
GHashTableIter iter;
gpointer key, value;
- if (g_hash_table_size(data_set->tickets) == 0) {
+ if (g_hash_table_size(scheduler->tickets) == 0) {
return pcmk_rc_no_output;
}
@@ -3016,9 +3072,9 @@ ticket_list(pcmk__output_t *out, va_list args) {
out->begin_list(out, NULL, NULL, "Tickets");
/* Print each ticket */
- g_hash_table_iter_init(&iter, data_set->tickets);
+ g_hash_table_iter_init(&iter, scheduler->tickets);
while (g_hash_table_iter_next(&iter, &key, &value)) {
- pe_ticket_t *ticket = (pe_ticket_t *) value;
+ pcmk_ticket_t *ticket = (pcmk_ticket_t *) value;
out->message(out, "ticket", ticket);
}
diff --git a/lib/pengine/pe_status_private.h b/lib/pengine/pe_status_private.h
index ae8d131..bb0ee4e 100644
--- a/lib/pengine/pe_status_private.h
+++ b/lib/pengine/pe_status_private.h
@@ -19,6 +19,11 @@
#define G_GNUC_INTERNAL
#endif
+#include <glib.h> // GSList, GList, GHashTable
+#include <libxml/tree.h> // xmlNode
+
+#include <crm/pengine/status.h> // pcmk_action_t, pcmk_resource_t, etc.
+
/*!
* \internal
* \deprecated This macro will be removed in a future release
@@ -43,10 +48,10 @@ typedef struct notify_data_s {
const char *action;
- pe_action_t *pre;
- pe_action_t *post;
- pe_action_t *pre_done;
- pe_action_t *post_done;
+ pcmk_action_t *pre;
+ pcmk_action_t *post;
+ pcmk_action_t *pre_done;
+ pcmk_action_t *post_done;
GList *active; /* notify_entry_t* */
GList *inactive; /* notify_entry_t* */
@@ -60,62 +65,86 @@ typedef struct notify_data_s {
} notify_data_t;
G_GNUC_INTERNAL
-pe_resource_t *pe__create_clone_child(pe_resource_t *rsc,
- pe_working_set_t *data_set);
+pcmk_resource_t *pe__create_clone_child(pcmk_resource_t *rsc,
+ pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
-void pe__create_action_notifications(pe_resource_t *rsc, notify_data_t *n_data);
+void pe__create_action_notifications(pcmk_resource_t *rsc,
+ notify_data_t *n_data);
G_GNUC_INTERNAL
void pe__free_action_notification_data(notify_data_t *n_data);
G_GNUC_INTERNAL
-notify_data_t *pe__action_notif_pseudo_ops(pe_resource_t *rsc, const char *task,
- pe_action_t *action,
- pe_action_t *complete);
+notify_data_t *pe__action_notif_pseudo_ops(pcmk_resource_t *rsc,
+ const char *task,
+ pcmk_action_t *action,
+ pcmk_action_t *complete);
G_GNUC_INTERNAL
-void pe__force_anon(const char *standard, pe_resource_t *rsc, const char *rid,
- pe_working_set_t *data_set);
+void pe__force_anon(const char *standard, pcmk_resource_t *rsc, const char *rid,
+ pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
gint pe__cmp_rsc_priority(gconstpointer a, gconstpointer b);
G_GNUC_INTERNAL
-gboolean pe__unpack_resource(xmlNode *xml_obj, pe_resource_t **rsc,
- pe_resource_t *parent, pe_working_set_t *data_set);
+gboolean pe__unpack_resource(xmlNode *xml_obj, pcmk_resource_t **rsc,
+ pcmk_resource_t *parent,
+ pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
-gboolean unpack_remote_nodes(xmlNode *xml_resources, pe_working_set_t *data_set);
+gboolean unpack_remote_nodes(xmlNode *xml_resources,
+ pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
gboolean unpack_resources(const xmlNode *xml_resources,
- pe_working_set_t *data_set);
+ pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
-gboolean unpack_config(xmlNode *config, pe_working_set_t *data_set);
+gboolean unpack_config(xmlNode *config, pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
-gboolean unpack_nodes(xmlNode *xml_nodes, pe_working_set_t *data_set);
+gboolean unpack_nodes(xmlNode *xml_nodes, pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
-gboolean unpack_tags(xmlNode *xml_tags, pe_working_set_t *data_set);
+gboolean unpack_tags(xmlNode *xml_tags, pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
-gboolean unpack_status(xmlNode *status, pe_working_set_t *data_set);
+gboolean unpack_status(xmlNode *status, pcmk_scheduler_t *scheduler);
G_GNUC_INTERNAL
-op_digest_cache_t *pe__compare_fencing_digest(pe_resource_t *rsc,
+op_digest_cache_t *pe__compare_fencing_digest(pcmk_resource_t *rsc,
const char *agent,
- pe_node_t *node,
- pe_working_set_t *data_set);
+ pcmk_node_t *node,
+ pcmk_scheduler_t *scheduler);
+
+G_GNUC_INTERNAL
+void pe__unpack_node_health_scores(pcmk_scheduler_t *scheduler);
+
+// Primitive resource methods
+
+G_GNUC_INTERNAL
+unsigned int pe__primitive_max_per_node(const pcmk_resource_t *rsc);
+
+// Group resource methods
+
+G_GNUC_INTERNAL
+unsigned int pe__group_max_per_node(const pcmk_resource_t *rsc);
+
+// Clone resource methods
+
+G_GNUC_INTERNAL
+unsigned int pe__clone_max_per_node(const pcmk_resource_t *rsc);
+
+// Bundle resource methods
G_GNUC_INTERNAL
-void pe__unpack_node_health_scores(pe_working_set_t *data_set);
+pcmk_node_t *pe__bundle_active_node(const pcmk_resource_t *rsc,
+ unsigned int *count_all,
+ unsigned int *count_clean);
G_GNUC_INTERNAL
-pe_node_t *pe__bundle_active_node(const pe_resource_t *rsc,
- unsigned int *count_all,
- unsigned int *count_clean);
+unsigned int pe__bundle_max_per_node(const pcmk_resource_t *rsc);
#endif // PE_STATUS_PRIVATE__H
diff --git a/lib/pengine/remote.c b/lib/pengine/remote.c
index 769635f..6b5058c 100644
--- a/lib/pengine/remote.c
+++ b/lib/pengine/remote.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2013-2022 the Pacemaker project contributors
+ * Copyright 2013-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -10,41 +10,41 @@
#include <crm_internal.h>
#include <crm/msg_xml.h>
#include <crm/common/xml.h>
+#include <crm/common/scheduler_internal.h>
#include <crm/pengine/internal.h>
#include <glib.h>
bool
-pe__resource_is_remote_conn(const pe_resource_t *rsc,
- const pe_working_set_t *data_set)
+pe__resource_is_remote_conn(const pcmk_resource_t *rsc)
{
return (rsc != NULL) && rsc->is_remote_node
- && pe__is_remote_node(pe_find_node(data_set->nodes, rsc->id));
+ && pe__is_remote_node(pe_find_node(rsc->cluster->nodes, rsc->id));
}
bool
-pe__is_remote_node(const pe_node_t *node)
+pe__is_remote_node(const pcmk_node_t *node)
{
- return (node != NULL) && (node->details->type == node_remote)
+ return (node != NULL) && (node->details->type == pcmk_node_variant_remote)
&& ((node->details->remote_rsc == NULL)
|| (node->details->remote_rsc->container == NULL));
}
bool
-pe__is_guest_node(const pe_node_t *node)
+pe__is_guest_node(const pcmk_node_t *node)
{
- return (node != NULL) && (node->details->type == node_remote)
+ return (node != NULL) && (node->details->type == pcmk_node_variant_remote)
&& (node->details->remote_rsc != NULL)
&& (node->details->remote_rsc->container != NULL);
}
bool
-pe__is_guest_or_remote_node(const pe_node_t *node)
+pe__is_guest_or_remote_node(const pcmk_node_t *node)
{
- return (node != NULL) && (node->details->type == node_remote);
+ return (node != NULL) && (node->details->type == pcmk_node_variant_remote);
}
bool
-pe__is_bundle_node(const pe_node_t *node)
+pe__is_bundle_node(const pcmk_node_t *node)
{
return pe__is_guest_node(node)
&& pe_rsc_is_bundled(node->details->remote_rsc);
@@ -57,20 +57,20 @@ pe__is_bundle_node(const pe_node_t *node)
* If a given resource contains a filler resource that is a remote connection,
* return that filler resource (or NULL if none is found).
*
- * \param[in] data_set Working set of cluster
- * \param[in] rsc Resource to check
+ * \param[in] scheduler Scheduler data
+ * \param[in] rsc Resource to check
*
* \return Filler resource with remote connection, or NULL if none found
*/
-pe_resource_t *
-pe__resource_contains_guest_node(const pe_working_set_t *data_set,
- const pe_resource_t *rsc)
+pcmk_resource_t *
+pe__resource_contains_guest_node(const pcmk_scheduler_t *scheduler,
+ const pcmk_resource_t *rsc)
{
- if ((rsc != NULL) && (data_set != NULL)
- && pcmk_is_set(data_set->flags, pe_flag_have_remote_nodes)) {
+ if ((rsc != NULL) && (scheduler != NULL)
+ && pcmk_is_set(scheduler->flags, pcmk_sched_have_remote_nodes)) {
for (GList *gIter = rsc->fillers; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *filler = gIter->data;
+ pcmk_resource_t *filler = gIter->data;
if (filler->is_remote_node) {
return filler;
@@ -111,26 +111,28 @@ xml_contains_remote_node(xmlNode *xml)
* \internal
* \brief Execute a supplied function for each guest node running on a host
*
- * \param[in] data_set Working set for cluster
+ * \param[in] scheduler Scheduler data
* \param[in] host Host node to check
* \param[in] helper Function to call for each guest node
* \param[in,out] user_data Pointer to pass to helper function
*/
void
-pe_foreach_guest_node(const pe_working_set_t *data_set, const pe_node_t *host,
- void (*helper)(const pe_node_t*, void*), void *user_data)
+pe_foreach_guest_node(const pcmk_scheduler_t *scheduler,
+ const pcmk_node_t *host,
+ void (*helper)(const pcmk_node_t*, void*),
+ void *user_data)
{
GList *iter;
- CRM_CHECK(data_set && host && host->details && helper, return);
- if (!pcmk_is_set(data_set->flags, pe_flag_have_remote_nodes)) {
+ CRM_CHECK(scheduler && host && host->details && helper, return);
+ if (!pcmk_is_set(scheduler->flags, pcmk_sched_have_remote_nodes)) {
return;
}
for (iter = host->details->running_rsc; iter != NULL; iter = iter->next) {
- pe_resource_t *rsc = (pe_resource_t *) iter->data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
if (rsc->is_remote_node && (rsc->container != NULL)) {
- pe_node_t *guest_node = pe_find_node(data_set->nodes, rsc->id);
+ pcmk_node_t *guest_node = pe_find_node(scheduler->nodes, rsc->id);
if (guest_node) {
(*helper)(guest_node, user_data);
@@ -203,29 +205,30 @@ pe_create_remote_xml(xmlNode *parent, const char *uname,
// Add operations
xml_sub = create_xml_node(remote, "operations");
- crm_create_op_xml(xml_sub, uname, "monitor", "30s", "30s");
+ crm_create_op_xml(xml_sub, uname, PCMK_ACTION_MONITOR, "30s", "30s");
if (start_timeout) {
- crm_create_op_xml(xml_sub, uname, "start", "0", start_timeout);
+ crm_create_op_xml(xml_sub, uname, PCMK_ACTION_START, "0",
+ start_timeout);
}
return remote;
}
// History entry to be checked for fail count clearing
struct check_op {
- const xmlNode *rsc_op; // History entry XML
- pe_resource_t *rsc; // Known resource corresponding to history entry
- pe_node_t *node; // Known node corresponding to history entry
- enum pe_check_parameters check_type; // What needs checking
+ const xmlNode *rsc_op; // History entry XML
+ pcmk_resource_t *rsc; // Known resource corresponding to history entry
+ pcmk_node_t *node; // Known node corresponding to history entry
+ enum pcmk__check_parameters check_type; // What needs checking
};
void
-pe__add_param_check(const xmlNode *rsc_op, pe_resource_t *rsc,
- pe_node_t *node, enum pe_check_parameters flag,
- pe_working_set_t *data_set)
+pe__add_param_check(const xmlNode *rsc_op, pcmk_resource_t *rsc,
+ pcmk_node_t *node, enum pcmk__check_parameters flag,
+ pcmk_scheduler_t *scheduler)
{
struct check_op *check_op = NULL;
- CRM_CHECK(data_set && rsc_op && rsc && node, return);
+ CRM_CHECK(scheduler && rsc_op && rsc && node, return);
check_op = calloc(1, sizeof(struct check_op));
CRM_ASSERT(check_op != NULL);
@@ -235,24 +238,25 @@ pe__add_param_check(const xmlNode *rsc_op, pe_resource_t *rsc,
check_op->rsc = rsc;
check_op->node = node;
check_op->check_type = flag;
- data_set->param_check = g_list_prepend(data_set->param_check, check_op);
+ scheduler->param_check = g_list_prepend(scheduler->param_check, check_op);
}
/*!
* \internal
* \brief Call a function for each action to be checked for addr substitution
*
- * \param[in,out] data_set Working set for cluster
- * \param[in] cb Function to be called
+ * \param[in,out] scheduler Scheduler data
+ * \param[in] cb Function to be called
*/
void
-pe__foreach_param_check(pe_working_set_t *data_set,
- void (*cb)(pe_resource_t*, pe_node_t*, const xmlNode*,
- enum pe_check_parameters))
+pe__foreach_param_check(pcmk_scheduler_t *scheduler,
+ void (*cb)(pcmk_resource_t*, pcmk_node_t*,
+ const xmlNode*, enum pcmk__check_parameters))
{
- CRM_CHECK(data_set && cb, return);
+ CRM_CHECK(scheduler && cb, return);
- for (GList *item = data_set->param_check; item != NULL; item = item->next) {
+ for (GList *item = scheduler->param_check;
+ item != NULL; item = item->next) {
struct check_op *check_op = item->data;
cb(check_op->rsc, check_op->node, check_op->rsc_op,
@@ -261,10 +265,10 @@ pe__foreach_param_check(pe_working_set_t *data_set,
}
void
-pe__free_param_checks(pe_working_set_t *data_set)
+pe__free_param_checks(pcmk_scheduler_t *scheduler)
{
- if (data_set && data_set->param_check) {
- g_list_free_full(data_set->param_check, free);
- data_set->param_check = NULL;
+ if (scheduler && scheduler->param_check) {
+ g_list_free_full(scheduler->param_check, free);
+ scheduler->param_check = NULL;
}
}
diff --git a/lib/pengine/rules.c b/lib/pengine/rules.c
index 7021d3c..50f9f64 100644
--- a/lib/pengine/rules.c
+++ b/lib/pengine/rules.c
@@ -41,7 +41,7 @@ pe_evaluate_rules(xmlNode *ruleset, GHashTable *node_hash, crm_time_t *now,
{
pe_rule_eval_data_t rule_data = {
.node_hash = node_hash,
- .role = RSC_ROLE_UNKNOWN,
+ .role = pcmk_role_unknown,
.now = now,
.match_data = NULL,
.rsc_data = NULL,
@@ -104,25 +104,23 @@ pe_test_expression(xmlNode *expr, GHashTable *node_hash, enum rsc_role_e role,
enum expression_type
find_expression_type(xmlNode * expr)
{
- const char *tag = NULL;
const char *attr = NULL;
attr = crm_element_value(expr, XML_EXPR_ATTR_ATTRIBUTE);
- tag = crm_element_name(expr);
- if (pcmk__str_eq(tag, PCMK_XE_DATE_EXPRESSION, pcmk__str_none)) {
+ if (pcmk__xe_is(expr, PCMK_XE_DATE_EXPRESSION)) {
return time_expr;
- } else if (pcmk__str_eq(tag, PCMK_XE_RSC_EXPRESSION, pcmk__str_none)) {
+ } else if (pcmk__xe_is(expr, PCMK_XE_RSC_EXPRESSION)) {
return rsc_expr;
- } else if (pcmk__str_eq(tag, PCMK_XE_OP_EXPRESSION, pcmk__str_none)) {
+ } else if (pcmk__xe_is(expr, PCMK_XE_OP_EXPRESSION)) {
return op_expr;
- } else if (pcmk__str_eq(tag, XML_TAG_RULE, pcmk__str_none)) {
+ } else if (pcmk__xe_is(expr, XML_TAG_RULE)) {
return nested_rule;
- } else if (!pcmk__str_eq(tag, XML_TAG_EXPRESSION, pcmk__str_none)) {
+ } else if (!pcmk__xe_is(expr, XML_TAG_EXPRESSION)) {
return not_expr;
} else if (pcmk__str_any_of(attr, CRM_ATTR_UNAME, CRM_ATTR_KIND, CRM_ATTR_ID, NULL)) {
@@ -320,6 +318,7 @@ typedef struct sorted_set_s {
const char *name; // This block's ID
const char *special_name; // ID that should sort first
xmlNode *attr_set; // This block
+ gboolean overwrite; // Whether existing values will be overwritten
} sorted_set_t;
static gint
@@ -343,10 +342,14 @@ sort_pairs(gconstpointer a, gconstpointer b)
return 1;
}
+ /* If we're overwriting values, we want lowest score first, so the highest
+ * score is processed last; if we're not overwriting values, we want highest
+ * score first, so nothing else overwrites it.
+ */
if (pair_a->score < pair_b->score) {
- return 1;
+ return pair_a->overwrite? -1 : 1;
} else if (pair_a->score > pair_b->score) {
- return -1;
+ return pair_a->overwrite? 1 : -1;
}
return 0;
}
@@ -360,8 +363,7 @@ populate_hash(xmlNode * nvpair_list, GHashTable * hash, gboolean overwrite, xmlN
xmlNode *list = nvpair_list;
xmlNode *an_attr = NULL;
- name = crm_element_name(list->children);
- if (pcmk__str_eq(XML_TAG_ATTRS, name, pcmk__str_casei)) {
+ if (pcmk__xe_is(list->children, XML_TAG_ATTRS)) {
list = list->children;
}
@@ -446,7 +448,7 @@ unpack_attr_set(gpointer data, gpointer user_data)
*/
static GList *
make_pairs(xmlNode *top, const xmlNode *xml_obj, const char *set_name,
- const char *always_first)
+ const char *always_first, gboolean overwrite)
{
GList *unsorted = NULL;
@@ -471,6 +473,7 @@ make_pairs(xmlNode *top, const xmlNode *xml_obj, const char *set_name,
pair->name = ID(expanded_attr_set);
pair->special_name = always_first;
pair->attr_set = expanded_attr_set;
+ pair->overwrite = overwrite;
score = crm_element_value(expanded_attr_set, XML_RULE_ATTR_SCORE);
pair->score = char2score(score);
@@ -499,7 +502,7 @@ pe_eval_nvpairs(xmlNode *top, const xmlNode *xml_obj, const char *set_name,
const char *always_first, gboolean overwrite,
crm_time_t *next_change)
{
- GList *pairs = make_pairs(top, xml_obj, set_name, always_first);
+ GList *pairs = make_pairs(top, xml_obj, set_name, always_first, overwrite);
if (pairs) {
unpack_data_t data = {
@@ -536,7 +539,7 @@ pe_unpack_nvpairs(xmlNode *top, const xmlNode *xml_obj, const char *set_name,
{
pe_rule_eval_data_t rule_data = {
.node_hash = node_hash,
- .role = RSC_ROLE_UNKNOWN,
+ .role = pcmk_role_unknown,
.now = now,
.match_data = NULL,
.rsc_data = NULL,
@@ -1161,7 +1164,7 @@ pe__eval_role_expr(const xmlNode *expr, const pe_rule_eval_data_t *rule_data)
const char *op = NULL;
const char *value = NULL;
- if (rule_data->role == RSC_ROLE_UNKNOWN) {
+ if (rule_data->role == pcmk_role_unknown) {
return accept;
}
@@ -1169,13 +1172,13 @@ pe__eval_role_expr(const xmlNode *expr, const pe_rule_eval_data_t *rule_data)
op = crm_element_value(expr, XML_EXPR_ATTR_OPERATION);
if (pcmk__str_eq(op, "defined", pcmk__str_casei)) {
- if (rule_data->role > RSC_ROLE_STARTED) {
+ if (rule_data->role > pcmk_role_started) {
accept = TRUE;
}
} else if (pcmk__str_eq(op, "not_defined", pcmk__str_casei)) {
- if ((rule_data->role > RSC_ROLE_UNKNOWN)
- && (rule_data->role < RSC_ROLE_UNPROMOTED)) {
+ if ((rule_data->role > pcmk_role_unknown)
+ && (rule_data->role < pcmk_role_unpromoted)) {
accept = TRUE;
}
@@ -1186,8 +1189,8 @@ pe__eval_role_expr(const xmlNode *expr, const pe_rule_eval_data_t *rule_data)
} else if (pcmk__str_eq(op, "ne", pcmk__str_casei)) {
// Test "ne" only with promotable clone roles
- if ((rule_data->role > RSC_ROLE_UNKNOWN)
- && (rule_data->role < RSC_ROLE_UNPROMOTED)) {
+ if ((rule_data->role > pcmk_role_unknown)
+ && (rule_data->role < pcmk_role_unpromoted)) {
accept = FALSE;
} else if (text2role(value) != rule_data->role) {
@@ -1301,7 +1304,7 @@ unpack_instance_attributes(xmlNode *top, xmlNode *xml_obj, const char *set_name,
{
pe_rule_eval_data_t rule_data = {
.node_hash = node_hash,
- .role = RSC_ROLE_UNKNOWN,
+ .role = pcmk_role_unknown,
.now = now,
.match_data = NULL,
.rsc_data = NULL,
diff --git a/lib/pengine/rules_alerts.c b/lib/pengine/rules_alerts.c
index 073b0c1..9eed7ff 100644
--- a/lib/pengine/rules_alerts.c
+++ b/lib/pengine/rules_alerts.c
@@ -123,21 +123,16 @@ unpack_alert_filter(xmlNode *basenode, pcmk__alert_t *entry)
for (event_type = pcmk__xe_first_child(select); event_type != NULL;
event_type = pcmk__xe_next(event_type)) {
- const char *tagname = crm_element_name(event_type);
-
- if (tagname == NULL) {
- continue;
-
- } else if (!strcmp(tagname, XML_CIB_TAG_ALERT_FENCING)) {
+ if (pcmk__xe_is(event_type, XML_CIB_TAG_ALERT_FENCING)) {
flags |= pcmk__alert_fencing;
- } else if (!strcmp(tagname, XML_CIB_TAG_ALERT_NODES)) {
+ } else if (pcmk__xe_is(event_type, XML_CIB_TAG_ALERT_NODES)) {
flags |= pcmk__alert_node;
- } else if (!strcmp(tagname, XML_CIB_TAG_ALERT_RESOURCES)) {
+ } else if (pcmk__xe_is(event_type, XML_CIB_TAG_ALERT_RESOURCES)) {
flags |= pcmk__alert_resource;
- } else if (!strcmp(tagname, XML_CIB_TAG_ALERT_ATTRIBUTES)) {
+ } else if (pcmk__xe_is(event_type, XML_CIB_TAG_ALERT_ATTRIBUTES)) {
xmlNode *attr;
const char *attr_name;
int nattrs = 0;
diff --git a/lib/pengine/status.c b/lib/pengine/status.c
index b1144eb..e6ec237 100644
--- a/lib/pengine/status.c
+++ b/lib/pengine/status.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2004-2022 the Pacemaker project contributors
+ * Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -21,38 +21,38 @@
#include <pe_status_private.h>
/*!
- * \brief Create a new working set
+ * \brief Create a new object to hold scheduler data
*
- * \return New, initialized working set on success, else NULL (and set errno)
- * \note Only pe_working_set_t objects created with this function (as opposed
+ * \return New, initialized scheduler data on success, else NULL (and set errno)
+ * \note Only pcmk_scheduler_t objects created with this function (as opposed
* to statically declared or directly allocated) should be used with the
* functions in this library, to allow for future extensions to the
* data type. The caller is responsible for freeing the memory with
* pe_free_working_set() when the instance is no longer needed.
*/
-pe_working_set_t *
+pcmk_scheduler_t *
pe_new_working_set(void)
{
- pe_working_set_t *data_set = calloc(1, sizeof(pe_working_set_t));
+ pcmk_scheduler_t *scheduler = calloc(1, sizeof(pcmk_scheduler_t));
- if (data_set != NULL) {
- set_working_set_defaults(data_set);
+ if (scheduler != NULL) {
+ set_working_set_defaults(scheduler);
}
- return data_set;
+ return scheduler;
}
/*!
- * \brief Free a working set
+ * \brief Free scheduler data
*
- * \param[in,out] data_set Working set to free
+ * \param[in,out] scheduler Scheduler data to free
*/
void
-pe_free_working_set(pe_working_set_t *data_set)
+pe_free_working_set(pcmk_scheduler_t *scheduler)
{
- if (data_set != NULL) {
- pe_reset_working_set(data_set);
- data_set->priv = NULL;
- free(data_set);
+ if (scheduler != NULL) {
+ pe_reset_working_set(scheduler);
+ scheduler->priv = NULL;
+ free(scheduler);
}
}
@@ -68,105 +68,105 @@ pe_free_working_set(pe_working_set_t *data_set)
* - A list of the possible stop/start actions (without dependencies)
*/
gboolean
-cluster_status(pe_working_set_t * data_set)
+cluster_status(pcmk_scheduler_t * scheduler)
{
xmlNode *section = NULL;
- if ((data_set == NULL) || (data_set->input == NULL)) {
+ if ((scheduler == NULL) || (scheduler->input == NULL)) {
return FALSE;
}
crm_trace("Beginning unpack");
- if (data_set->failed != NULL) {
- free_xml(data_set->failed);
+ if (scheduler->failed != NULL) {
+ free_xml(scheduler->failed);
}
- data_set->failed = create_xml_node(NULL, "failed-ops");
+ scheduler->failed = create_xml_node(NULL, "failed-ops");
- if (data_set->now == NULL) {
- data_set->now = crm_time_new(NULL);
+ if (scheduler->now == NULL) {
+ scheduler->now = crm_time_new(NULL);
}
- if (data_set->dc_uuid == NULL) {
- data_set->dc_uuid = crm_element_value_copy(data_set->input,
- XML_ATTR_DC_UUID);
+ if (scheduler->dc_uuid == NULL) {
+ scheduler->dc_uuid = crm_element_value_copy(scheduler->input,
+ XML_ATTR_DC_UUID);
}
- if (pcmk__xe_attr_is_true(data_set->input, XML_ATTR_HAVE_QUORUM)) {
- pe__set_working_set_flags(data_set, pe_flag_have_quorum);
+ if (pcmk__xe_attr_is_true(scheduler->input, XML_ATTR_HAVE_QUORUM)) {
+ pe__set_working_set_flags(scheduler, pcmk_sched_quorate);
} else {
- pe__clear_working_set_flags(data_set, pe_flag_have_quorum);
+ pe__clear_working_set_flags(scheduler, pcmk_sched_quorate);
}
- data_set->op_defaults = get_xpath_object("//" XML_CIB_TAG_OPCONFIG,
- data_set->input, LOG_NEVER);
- data_set->rsc_defaults = get_xpath_object("//" XML_CIB_TAG_RSCCONFIG,
- data_set->input, LOG_NEVER);
+ scheduler->op_defaults = get_xpath_object("//" XML_CIB_TAG_OPCONFIG,
+ scheduler->input, LOG_NEVER);
+ scheduler->rsc_defaults = get_xpath_object("//" XML_CIB_TAG_RSCCONFIG,
+ scheduler->input, LOG_NEVER);
- section = get_xpath_object("//" XML_CIB_TAG_CRMCONFIG, data_set->input,
+ section = get_xpath_object("//" XML_CIB_TAG_CRMCONFIG, scheduler->input,
LOG_TRACE);
- unpack_config(section, data_set);
+ unpack_config(section, scheduler);
- if (!pcmk_any_flags_set(data_set->flags,
- pe_flag_quick_location|pe_flag_have_quorum)
- && (data_set->no_quorum_policy != no_quorum_ignore)) {
+ if (!pcmk_any_flags_set(scheduler->flags,
+ pcmk_sched_location_only|pcmk_sched_quorate)
+ && (scheduler->no_quorum_policy != pcmk_no_quorum_ignore)) {
crm_warn("Fencing and resource management disabled due to lack of quorum");
}
- section = get_xpath_object("//" XML_CIB_TAG_NODES, data_set->input,
+ section = get_xpath_object("//" XML_CIB_TAG_NODES, scheduler->input,
LOG_TRACE);
- unpack_nodes(section, data_set);
+ unpack_nodes(section, scheduler);
- section = get_xpath_object("//" XML_CIB_TAG_RESOURCES, data_set->input,
+ section = get_xpath_object("//" XML_CIB_TAG_RESOURCES, scheduler->input,
LOG_TRACE);
- if (!pcmk_is_set(data_set->flags, pe_flag_quick_location)) {
- unpack_remote_nodes(section, data_set);
+ if (!pcmk_is_set(scheduler->flags, pcmk_sched_location_only)) {
+ unpack_remote_nodes(section, scheduler);
}
- unpack_resources(section, data_set);
+ unpack_resources(section, scheduler);
- section = get_xpath_object("//" XML_CIB_TAG_TAGS, data_set->input,
+ section = get_xpath_object("//" XML_CIB_TAG_TAGS, scheduler->input,
LOG_NEVER);
- unpack_tags(section, data_set);
+ unpack_tags(section, scheduler);
- if (!pcmk_is_set(data_set->flags, pe_flag_quick_location)) {
- section = get_xpath_object("//"XML_CIB_TAG_STATUS, data_set->input,
+ if (!pcmk_is_set(scheduler->flags, pcmk_sched_location_only)) {
+ section = get_xpath_object("//"XML_CIB_TAG_STATUS, scheduler->input,
LOG_TRACE);
- unpack_status(section, data_set);
+ unpack_status(section, scheduler);
}
- if (!pcmk_is_set(data_set->flags, pe_flag_no_counts)) {
- for (GList *item = data_set->resources; item != NULL;
+ if (!pcmk_is_set(scheduler->flags, pcmk_sched_no_counts)) {
+ for (GList *item = scheduler->resources; item != NULL;
item = item->next) {
- ((pe_resource_t *) (item->data))->fns->count(item->data);
+ ((pcmk_resource_t *) (item->data))->fns->count(item->data);
}
crm_trace("Cluster resource count: %d (%d disabled, %d blocked)",
- data_set->ninstances, data_set->disabled_resources,
- data_set->blocked_resources);
+ scheduler->ninstances, scheduler->disabled_resources,
+ scheduler->blocked_resources);
}
- pe__set_working_set_flags(data_set, pe_flag_have_status);
+ pe__set_working_set_flags(scheduler, pcmk_sched_have_status);
return TRUE;
}
/*!
* \internal
- * \brief Free a list of pe_resource_t
+ * \brief Free a list of pcmk_resource_t
*
* \param[in,out] resources List to free
*
- * \note When a working set's resource list is freed, that includes the original
+ * \note When the scheduler's resource list is freed, that includes the original
* storage for the uname and id of any Pacemaker Remote nodes in the
- * working set's node list, so take care not to use those afterward.
- * \todo Refactor pe_node_t to strdup() the node name.
+ * scheduler's node list, so take care not to use those afterward.
+ * \todo Refactor pcmk_node_t to strdup() the node name.
*/
static void
pe_free_resources(GList *resources)
{
- pe_resource_t *rsc = NULL;
+ pcmk_resource_t *rsc = NULL;
GList *iterator = resources;
while (iterator != NULL) {
- rsc = (pe_resource_t *) iterator->data;
+ rsc = (pcmk_resource_t *) iterator->data;
iterator = iterator->next;
rsc->fns->free(rsc);
}
@@ -193,7 +193,7 @@ static void
pe_free_nodes(GList *nodes)
{
for (GList *iterator = nodes; iterator != NULL; iterator = iterator->next) {
- pe_node_t *node = (pe_node_t *) iterator->data;
+ pcmk_node_t *node = (pcmk_node_t *) iterator->data;
// Shouldn't be possible, but to be safe ...
if (node == NULL) {
@@ -268,140 +268,140 @@ pe__free_location(GList *constraints)
}
/*!
- * \brief Reset working set to default state without freeing it or constraints
+ * \brief Reset scheduler data to defaults without freeing it or constraints
*
- * \param[in,out] data_set Working set to reset
+ * \param[in,out] scheduler Scheduler data to reset
*
* \deprecated This function is deprecated as part of the API;
* pe_reset_working_set() should be used instead.
*/
void
-cleanup_calculations(pe_working_set_t * data_set)
+cleanup_calculations(pcmk_scheduler_t *scheduler)
{
- if (data_set == NULL) {
+ if (scheduler == NULL) {
return;
}
- pe__clear_working_set_flags(data_set, pe_flag_have_status);
- if (data_set->config_hash != NULL) {
- g_hash_table_destroy(data_set->config_hash);
+ pe__clear_working_set_flags(scheduler, pcmk_sched_have_status);
+ if (scheduler->config_hash != NULL) {
+ g_hash_table_destroy(scheduler->config_hash);
}
- if (data_set->singletons != NULL) {
- g_hash_table_destroy(data_set->singletons);
+ if (scheduler->singletons != NULL) {
+ g_hash_table_destroy(scheduler->singletons);
}
- if (data_set->tickets) {
- g_hash_table_destroy(data_set->tickets);
+ if (scheduler->tickets) {
+ g_hash_table_destroy(scheduler->tickets);
}
- if (data_set->template_rsc_sets) {
- g_hash_table_destroy(data_set->template_rsc_sets);
+ if (scheduler->template_rsc_sets) {
+ g_hash_table_destroy(scheduler->template_rsc_sets);
}
- if (data_set->tags) {
- g_hash_table_destroy(data_set->tags);
+ if (scheduler->tags) {
+ g_hash_table_destroy(scheduler->tags);
}
- free(data_set->dc_uuid);
+ free(scheduler->dc_uuid);
crm_trace("deleting resources");
- pe_free_resources(data_set->resources);
+ pe_free_resources(scheduler->resources);
crm_trace("deleting actions");
- pe_free_actions(data_set->actions);
+ pe_free_actions(scheduler->actions);
crm_trace("deleting nodes");
- pe_free_nodes(data_set->nodes);
+ pe_free_nodes(scheduler->nodes);
- pe__free_param_checks(data_set);
- g_list_free(data_set->stop_needed);
- free_xml(data_set->graph);
- crm_time_free(data_set->now);
- free_xml(data_set->input);
- free_xml(data_set->failed);
+ pe__free_param_checks(scheduler);
+ g_list_free(scheduler->stop_needed);
+ free_xml(scheduler->graph);
+ crm_time_free(scheduler->now);
+ free_xml(scheduler->input);
+ free_xml(scheduler->failed);
- set_working_set_defaults(data_set);
+ set_working_set_defaults(scheduler);
- CRM_CHECK(data_set->ordering_constraints == NULL,;
+ CRM_CHECK(scheduler->ordering_constraints == NULL,;
);
- CRM_CHECK(data_set->placement_constraints == NULL,;
+ CRM_CHECK(scheduler->placement_constraints == NULL,;
);
}
/*!
- * \brief Reset a working set to default state without freeing it
+ * \brief Reset scheduler data to default state without freeing it
*
- * \param[in,out] data_set Working set to reset
+ * \param[in,out] scheduler Scheduler data to reset
*/
void
-pe_reset_working_set(pe_working_set_t *data_set)
+pe_reset_working_set(pcmk_scheduler_t *scheduler)
{
- if (data_set == NULL) {
+ if (scheduler == NULL) {
return;
}
crm_trace("Deleting %d ordering constraints",
- g_list_length(data_set->ordering_constraints));
- pe__free_ordering(data_set->ordering_constraints);
- data_set->ordering_constraints = NULL;
+ g_list_length(scheduler->ordering_constraints));
+ pe__free_ordering(scheduler->ordering_constraints);
+ scheduler->ordering_constraints = NULL;
crm_trace("Deleting %d location constraints",
- g_list_length(data_set->placement_constraints));
- pe__free_location(data_set->placement_constraints);
- data_set->placement_constraints = NULL;
+ g_list_length(scheduler->placement_constraints));
+ pe__free_location(scheduler->placement_constraints);
+ scheduler->placement_constraints = NULL;
crm_trace("Deleting %d colocation constraints",
- g_list_length(data_set->colocation_constraints));
- g_list_free_full(data_set->colocation_constraints, free);
- data_set->colocation_constraints = NULL;
+ g_list_length(scheduler->colocation_constraints));
+ g_list_free_full(scheduler->colocation_constraints, free);
+ scheduler->colocation_constraints = NULL;
crm_trace("Deleting %d ticket constraints",
- g_list_length(data_set->ticket_constraints));
- g_list_free_full(data_set->ticket_constraints, free);
- data_set->ticket_constraints = NULL;
+ g_list_length(scheduler->ticket_constraints));
+ g_list_free_full(scheduler->ticket_constraints, free);
+ scheduler->ticket_constraints = NULL;
- cleanup_calculations(data_set);
+ cleanup_calculations(scheduler);
}
void
-set_working_set_defaults(pe_working_set_t * data_set)
+set_working_set_defaults(pcmk_scheduler_t *scheduler)
{
- void *priv = data_set->priv;
+ void *priv = scheduler->priv;
- memset(data_set, 0, sizeof(pe_working_set_t));
+ memset(scheduler, 0, sizeof(pcmk_scheduler_t));
- data_set->priv = priv;
- data_set->order_id = 1;
- data_set->action_id = 1;
- data_set->no_quorum_policy = no_quorum_stop;
+ scheduler->priv = priv;
+ scheduler->order_id = 1;
+ scheduler->action_id = 1;
+ scheduler->no_quorum_policy = pcmk_no_quorum_stop;
- data_set->flags = 0x0ULL;
+ scheduler->flags = 0x0ULL;
- pe__set_working_set_flags(data_set,
- pe_flag_stop_rsc_orphans
- |pe_flag_symmetric_cluster
- |pe_flag_stop_action_orphans);
+ pe__set_working_set_flags(scheduler,
+ pcmk_sched_symmetric_cluster
+ |pcmk_sched_stop_removed_resources
+ |pcmk_sched_cancel_removed_actions);
if (!strcmp(PCMK__CONCURRENT_FENCING_DEFAULT, "true")) {
- pe__set_working_set_flags(data_set, pe_flag_concurrent_fencing);
+ pe__set_working_set_flags(scheduler, pcmk_sched_concurrent_fencing);
}
}
-pe_resource_t *
+pcmk_resource_t *
pe_find_resource(GList *rsc_list, const char *id)
{
- return pe_find_resource_with_flags(rsc_list, id, pe_find_renamed);
+ return pe_find_resource_with_flags(rsc_list, id, pcmk_rsc_match_history);
}
-pe_resource_t *
+pcmk_resource_t *
pe_find_resource_with_flags(GList *rsc_list, const char *id, enum pe_find flags)
{
GList *rIter = NULL;
for (rIter = rsc_list; id && rIter; rIter = rIter->next) {
- pe_resource_t *parent = rIter->data;
+ pcmk_resource_t *parent = rIter->data;
- pe_resource_t *match =
+ pcmk_resource_t *match =
parent->fns->find_rsc(parent, id, NULL, flags);
if (match != NULL) {
return match;
@@ -414,7 +414,7 @@ pe_find_resource_with_flags(GList *rsc_list, const char *id, enum pe_find flags)
/*!
* \brief Find a node by name or ID in a list of nodes
*
- * \param[in] nodes List of nodes (as pe_node_t*)
+ * \param[in] nodes List of nodes (as pcmk_node_t*)
* \param[in] id If not NULL, ID of node to find
* \param[in] node_name If not NULL, name of node to find
*
@@ -422,10 +422,10 @@ pe_find_resource_with_flags(GList *rsc_list, const char *id, enum pe_find flags)
* otherwise node from \p nodes that matches \p uname if any,
* otherwise NULL
*/
-pe_node_t *
+pcmk_node_t *
pe_find_node_any(const GList *nodes, const char *id, const char *uname)
{
- pe_node_t *match = NULL;
+ pcmk_node_t *match = NULL;
if (id != NULL) {
match = pe_find_node_id(nodes, id);
@@ -439,16 +439,16 @@ pe_find_node_any(const GList *nodes, const char *id, const char *uname)
/*!
* \brief Find a node by ID in a list of nodes
*
- * \param[in] nodes List of nodes (as pe_node_t*)
+ * \param[in] nodes List of nodes (as pcmk_node_t*)
* \param[in] id ID of node to find
*
* \return Node from \p nodes that matches \p id if any, otherwise NULL
*/
-pe_node_t *
+pcmk_node_t *
pe_find_node_id(const GList *nodes, const char *id)
{
for (const GList *iter = nodes; iter != NULL; iter = iter->next) {
- pe_node_t *node = (pe_node_t *) iter->data;
+ pcmk_node_t *node = (pcmk_node_t *) iter->data;
/* @TODO Whether node IDs should be considered case-sensitive should
* probably depend on the node type, so functionizing the comparison
@@ -464,16 +464,16 @@ pe_find_node_id(const GList *nodes, const char *id)
/*!
* \brief Find a node by name in a list of nodes
*
- * \param[in] nodes List of nodes (as pe_node_t*)
+ * \param[in] nodes List of nodes (as pcmk_node_t*)
* \param[in] node_name Name of node to find
*
* \return Node from \p nodes that matches \p node_name if any, otherwise NULL
*/
-pe_node_t *
+pcmk_node_t *
pe_find_node(const GList *nodes, const char *node_name)
{
for (const GList *iter = nodes; iter != NULL; iter = iter->next) {
- pe_node_t *node = (pe_node_t *) iter->data;
+ pcmk_node_t *node = (pcmk_node_t *) iter->data;
if (pcmk__str_eq(node->details->uname, node_name, pcmk__str_casei)) {
return node;
diff --git a/lib/pengine/tags.c b/lib/pengine/tags.c
index 81c27e4..d8d8ac9 100644
--- a/lib/pengine/tags.c
+++ b/lib/pengine/tags.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2020-2021 the Pacemaker project contributors
+ * Copyright 2020-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -13,29 +13,30 @@
#include <stdbool.h>
#include <crm/common/util.h>
+#include <crm/common/scheduler.h>
#include <crm/pengine/internal.h>
-#include <crm/pengine/pe_types.h>
GList *
-pe__rscs_with_tag(pe_working_set_t *data_set, const char *tag_name)
+pe__rscs_with_tag(pcmk_scheduler_t *scheduler, const char *tag_name)
{
gpointer value;
GList *retval = NULL;
- if (data_set->tags == NULL) {
+ if (scheduler->tags == NULL) {
return retval;
}
- value = g_hash_table_lookup(data_set->tags, tag_name);
+ value = g_hash_table_lookup(scheduler->tags, tag_name);
if (value == NULL) {
return retval;
}
- for (GList *refs = ((pe_tag_t *) value)->refs; refs; refs = refs->next) {
+ for (GList *refs = ((pcmk_tag_t *) value)->refs; refs; refs = refs->next) {
const char *id = (const char *) refs->data;
- pe_resource_t *rsc = pe_find_resource_with_flags(data_set->resources, id,
- pe_find_renamed|pe_find_any);
+ const uint32_t flags = pcmk_rsc_match_history|pcmk_rsc_match_basename;
+ pcmk_resource_t *rsc = pe_find_resource_with_flags(scheduler->resources,
+ id, flags);
if (!rsc) {
continue;
@@ -48,26 +49,26 @@ pe__rscs_with_tag(pe_working_set_t *data_set, const char *tag_name)
}
GList *
-pe__unames_with_tag(pe_working_set_t *data_set, const char *tag_name)
+pe__unames_with_tag(pcmk_scheduler_t *scheduler, const char *tag_name)
{
gpointer value;
GList *retval = NULL;
- if (data_set->tags == NULL) {
+ if (scheduler->tags == NULL) {
return retval;
}
- value = g_hash_table_lookup(data_set->tags, tag_name);
+ value = g_hash_table_lookup(scheduler->tags, tag_name);
if (value == NULL) {
return retval;
}
/* Iterate over the list of node IDs. */
- for (GList *refs = ((pe_tag_t *) value)->refs; refs; refs = refs->next) {
+ for (GList *refs = ((pcmk_tag_t *) value)->refs; refs; refs = refs->next) {
/* Find the node that has this ID. */
const char *id = (const char *) refs->data;
- pe_node_t *node = pe_find_node_id(data_set->nodes, id);
+ pcmk_node_t *node = pe_find_node_id(scheduler->nodes, id);
if (!node) {
continue;
@@ -81,9 +82,10 @@ pe__unames_with_tag(pe_working_set_t *data_set, const char *tag_name)
}
bool
-pe__rsc_has_tag(pe_working_set_t *data_set, const char *rsc_name, const char *tag_name)
+pe__rsc_has_tag(pcmk_scheduler_t *scheduler, const char *rsc_name,
+ const char *tag_name)
{
- GList *rscs = pe__rscs_with_tag(data_set, tag_name);
+ GList *rscs = pe__rscs_with_tag(scheduler, tag_name);
bool retval = false;
if (rscs == NULL) {
@@ -96,9 +98,10 @@ pe__rsc_has_tag(pe_working_set_t *data_set, const char *rsc_name, const char *ta
}
bool
-pe__uname_has_tag(pe_working_set_t *data_set, const char *node_name, const char *tag_name)
+pe__uname_has_tag(pcmk_scheduler_t *scheduler, const char *node_name,
+ const char *tag_name)
{
- GList *unames = pe__unames_with_tag(data_set, tag_name);
+ GList *unames = pe__unames_with_tag(scheduler, tag_name);
bool retval = false;
if (unames == NULL) {
diff --git a/lib/pengine/tests/Makefile.am b/lib/pengine/tests/Makefile.am
index 4986ef2..48ec5b4 100644
--- a/lib/pengine/tests/Makefile.am
+++ b/lib/pengine/tests/Makefile.am
@@ -1 +1,14 @@
-SUBDIRS = rules native status unpack utils
+#
+# Copyright 2020-2023 the Pacemaker project contributors
+#
+# The version control history for this file may have further details.
+#
+# This source code is licensed under the GNU General Public License version 2
+# or later (GPLv2+) WITHOUT ANY WARRANTY.
+#
+
+SUBDIRS = rules \
+ native \
+ status \
+ unpack \
+ utils
diff --git a/lib/pengine/tests/native/Makefile.am b/lib/pengine/tests/native/Makefile.am
index 5046ff1..07cc1a1 100644
--- a/lib/pengine/tests/native/Makefile.am
+++ b/lib/pengine/tests/native/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2022 the Pacemaker project contributors
+# Copyright 2022-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -17,6 +17,6 @@ AM_TESTS_ENVIRONMENT += PCMK_CTS_CLI_DIR=$(top_srcdir)/cts/cli
# Add "_test" to the end of all test program names to simplify .gitignore.
check_PROGRAMS = native_find_rsc_test \
- pe_base_name_eq_test
+ pe_base_name_eq_test
TESTS = $(check_PROGRAMS)
diff --git a/lib/pengine/tests/native/native_find_rsc_test.c b/lib/pengine/tests/native/native_find_rsc_test.c
index 22aaf41..b85ca24 100644
--- a/lib/pengine/tests/native/native_find_rsc_test.c
+++ b/lib/pengine/tests/native/native_find_rsc_test.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2022 the Pacemaker project contributors
+ * Copyright 2022-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -10,21 +10,18 @@
#include <crm_internal.h>
#include <crm/common/unittest_internal.h>
+#include <crm/common/scheduler.h>
#include <crm/common/xml.h>
#include <crm/pengine/internal.h>
#include <crm/pengine/status.h>
-#include <crm/pengine/pe_types.h>
-
-/* Needed to access replicas inside a bundle. */
-#define PE__VARIANT_BUNDLE 1
-#include <lib/pengine/variant.h>
xmlNode *input = NULL;
-pe_working_set_t *data_set = NULL;
+pcmk_scheduler_t *scheduler = NULL;
-pe_node_t *cluster01, *cluster02, *httpd_bundle_0;
-pe_resource_t *exim_group, *inactive_group, *promotable_clone, *inactive_clone;
-pe_resource_t *httpd_bundle, *mysql_clone_group;
+pcmk_node_t *cluster01, *cluster02, *httpd_bundle_0;
+pcmk_resource_t *exim_group, *inactive_group;
+pcmk_resource_t *promotable_clone, *inactive_clone;
+pcmk_resource_t *httpd_bundle, *mysql_clone_group;
static int
setup(void **state) {
@@ -40,25 +37,26 @@ setup(void **state) {
return 1;
}
- data_set = pe_new_working_set();
+ scheduler = pe_new_working_set();
- if (data_set == NULL) {
+ if (scheduler == NULL) {
return 1;
}
- pe__set_working_set_flags(data_set, pe_flag_no_counts|pe_flag_no_compat);
- data_set->input = input;
+ pe__set_working_set_flags(scheduler,
+ pcmk_sched_no_counts|pcmk_sched_no_compat);
+ scheduler->input = input;
- cluster_status(data_set);
+ cluster_status(scheduler);
/* Get references to the cluster nodes so we don't have to find them repeatedly. */
- cluster01 = pe_find_node(data_set->nodes, "cluster01");
- cluster02 = pe_find_node(data_set->nodes, "cluster02");
- httpd_bundle_0 = pe_find_node(data_set->nodes, "httpd-bundle-0");
+ cluster01 = pe_find_node(scheduler->nodes, "cluster01");
+ cluster02 = pe_find_node(scheduler->nodes, "cluster02");
+ httpd_bundle_0 = pe_find_node(scheduler->nodes, "httpd-bundle-0");
/* Get references to several resources we use frequently. */
- for (GList *iter = data_set->resources; iter != NULL; iter = iter->next) {
- pe_resource_t *rsc = (pe_resource_t *) iter->data;
+ for (GList *iter = scheduler->resources; iter != NULL; iter = iter->next) {
+ pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
if (strcmp(rsc->id, "exim-group") == 0) {
exim_group = rsc;
@@ -80,14 +78,14 @@ setup(void **state) {
static int
teardown(void **state) {
- pe_free_working_set(data_set);
+ pe_free_working_set(scheduler);
return 0;
}
static void
bad_args(void **state) {
- pe_resource_t *rsc = (pe_resource_t *) g_list_first(data_set->resources)->data;
+ pcmk_resource_t *rsc = g_list_first(scheduler->resources)->data;
char *id = rsc->id;
char *name = NULL;
@@ -117,11 +115,11 @@ bad_args(void **state) {
static void
primitive_rsc(void **state) {
- pe_resource_t *dummy = NULL;
+ pcmk_resource_t *dummy = NULL;
/* Find the "dummy" resource, which is the only one with that ID in the set. */
- for (GList *iter = data_set->resources; iter != NULL; iter = iter->next) {
- pe_resource_t *rsc = (pe_resource_t *) iter->data;
+ for (GList *iter = scheduler->resources; iter != NULL; iter = iter->next) {
+ pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
if (strcmp(rsc->id, "dummy") == 0) {
dummy = rsc;
@@ -133,20 +131,27 @@ primitive_rsc(void **state) {
/* Passes because NULL was passed for node, regardless of flags. */
assert_ptr_equal(dummy, native_find_rsc(dummy, "dummy", NULL, 0));
- assert_ptr_equal(dummy, native_find_rsc(dummy, "dummy", NULL, pe_find_current));
+ assert_ptr_equal(dummy,
+ native_find_rsc(dummy, "dummy", NULL,
+ pcmk_rsc_match_current_node));
/* Fails because resource is not a clone (nor cloned). */
- assert_null(native_find_rsc(dummy, "dummy", NULL, pe_find_clone));
- assert_null(native_find_rsc(dummy, "dummy", cluster02, pe_find_clone));
+ assert_null(native_find_rsc(dummy, "dummy", NULL,
+ pcmk_rsc_match_clone_only));
+ assert_null(native_find_rsc(dummy, "dummy", cluster02,
+ pcmk_rsc_match_clone_only));
/* Fails because dummy is not running on cluster01, even with the right flags. */
- assert_null(native_find_rsc(dummy, "dummy", cluster01, pe_find_current));
+ assert_null(native_find_rsc(dummy, "dummy", cluster01,
+ pcmk_rsc_match_current_node));
- /* Fails because pe_find_current is required if a node is given. */
+ // Fails because pcmk_rsc_match_current_node is required if a node is given
assert_null(native_find_rsc(dummy, "dummy", cluster02, 0));
/* Passes because dummy is running on cluster02. */
- assert_ptr_equal(dummy, native_find_rsc(dummy, "dummy", cluster02, pe_find_current));
+ assert_ptr_equal(dummy,
+ native_find_rsc(dummy, "dummy", cluster02,
+ pcmk_rsc_match_current_node));
}
static void
@@ -155,20 +160,27 @@ group_rsc(void **state) {
/* Passes because NULL was passed for node, regardless of flags. */
assert_ptr_equal(exim_group, native_find_rsc(exim_group, "exim-group", NULL, 0));
- assert_ptr_equal(exim_group, native_find_rsc(exim_group, "exim-group", NULL, pe_find_current));
+ assert_ptr_equal(exim_group,
+ native_find_rsc(exim_group, "exim-group", NULL,
+ pcmk_rsc_match_current_node));
/* Fails because resource is not a clone (nor cloned). */
- assert_null(native_find_rsc(exim_group, "exim-group", NULL, pe_find_clone));
- assert_null(native_find_rsc(exim_group, "exim-group", cluster01, pe_find_clone));
+ assert_null(native_find_rsc(exim_group, "exim-group", NULL,
+ pcmk_rsc_match_clone_only));
+ assert_null(native_find_rsc(exim_group, "exim-group", cluster01,
+ pcmk_rsc_match_clone_only));
/* Fails because none of exim-group's children are running on cluster01, even with the right flags. */
- assert_null(native_find_rsc(exim_group, "exim-group", cluster01, pe_find_current));
+ assert_null(native_find_rsc(exim_group, "exim-group", cluster01,
+ pcmk_rsc_match_current_node));
- /* Fails because pe_find_current is required if a node is given. */
+ // Fails because pcmk_rsc_match_current_node is required if a node is given
assert_null(native_find_rsc(exim_group, "exim-group", cluster01, 0));
/* Passes because one of exim-group's children is running on cluster02. */
- assert_ptr_equal(exim_group, native_find_rsc(exim_group, "exim-group", cluster02, pe_find_current));
+ assert_ptr_equal(exim_group,
+ native_find_rsc(exim_group, "exim-group", cluster02,
+ pcmk_rsc_match_current_node));
}
static void
@@ -177,30 +189,30 @@ inactive_group_rsc(void **state) {
/* Passes because NULL was passed for node, regardless of flags. */
assert_ptr_equal(inactive_group, native_find_rsc(inactive_group, "inactive-group", NULL, 0));
- assert_ptr_equal(inactive_group, native_find_rsc(inactive_group, "inactive-group", NULL, pe_find_current));
- assert_ptr_equal(inactive_group, native_find_rsc(inactive_group, "inactive-group", NULL, pe_find_inactive));
+ assert_ptr_equal(inactive_group,
+ native_find_rsc(inactive_group, "inactive-group", NULL,
+ pcmk_rsc_match_current_node));
/* Fails because resource is not a clone (nor cloned). */
- assert_null(native_find_rsc(inactive_group, "inactive-group", NULL, pe_find_clone));
- assert_null(native_find_rsc(inactive_group, "inactive-group", cluster01, pe_find_clone));
+ assert_null(native_find_rsc(inactive_group, "inactive-group", NULL,
+ pcmk_rsc_match_clone_only));
+ assert_null(native_find_rsc(inactive_group, "inactive-group", cluster01,
+ pcmk_rsc_match_clone_only));
/* Fails because none of inactive-group's children are running. */
- assert_null(native_find_rsc(inactive_group, "inactive-group", cluster01, pe_find_current));
- assert_null(native_find_rsc(inactive_group, "inactive-group", cluster02, pe_find_current));
-
- /* Passes because of flags. */
- assert_ptr_equal(inactive_group, native_find_rsc(inactive_group, "inactive-group", cluster01, pe_find_inactive));
- /* Passes because of flags. */
- assert_ptr_equal(inactive_group, native_find_rsc(inactive_group, "inactive-group", cluster02, pe_find_inactive));
+ assert_null(native_find_rsc(inactive_group, "inactive-group", cluster01,
+ pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(inactive_group, "inactive-group", cluster02,
+ pcmk_rsc_match_current_node));
}
static void
group_member_rsc(void **state) {
- pe_resource_t *public_ip = NULL;
+ pcmk_resource_t *public_ip = NULL;
/* Find the "Public-IP" resource, a member of "exim-group". */
for (GList *iter = exim_group->children; iter != NULL; iter = iter->next) {
- pe_resource_t *rsc = (pe_resource_t *) iter->data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
if (strcmp(rsc->id, "Public-IP") == 0) {
public_ip = rsc;
@@ -212,29 +224,36 @@ group_member_rsc(void **state) {
/* Passes because NULL was passed for node, regardless of flags. */
assert_ptr_equal(public_ip, native_find_rsc(public_ip, "Public-IP", NULL, 0));
- assert_ptr_equal(public_ip, native_find_rsc(public_ip, "Public-IP", NULL, pe_find_current));
+ assert_ptr_equal(public_ip,
+ native_find_rsc(public_ip, "Public-IP", NULL,
+ pcmk_rsc_match_current_node));
/* Fails because resource is not a clone (nor cloned). */
- assert_null(native_find_rsc(public_ip, "Public-IP", NULL, pe_find_clone));
- assert_null(native_find_rsc(public_ip, "Public-IP", cluster02, pe_find_clone));
+ assert_null(native_find_rsc(public_ip, "Public-IP", NULL,
+ pcmk_rsc_match_clone_only));
+ assert_null(native_find_rsc(public_ip, "Public-IP", cluster02,
+ pcmk_rsc_match_clone_only));
/* Fails because Public-IP is not running on cluster01, even with the right flags. */
- assert_null(native_find_rsc(public_ip, "Public-IP", cluster01, pe_find_current));
+ assert_null(native_find_rsc(public_ip, "Public-IP", cluster01,
+ pcmk_rsc_match_current_node));
- /* Fails because pe_find_current is required if a node is given. */
+ // Fails because pcmk_rsc_match_current_node is required if a node is given
assert_null(native_find_rsc(public_ip, "Public-IP", cluster02, 0));
/* Passes because Public-IP is running on cluster02. */
- assert_ptr_equal(public_ip, native_find_rsc(public_ip, "Public-IP", cluster02, pe_find_current));
+ assert_ptr_equal(public_ip,
+ native_find_rsc(public_ip, "Public-IP", cluster02,
+ pcmk_rsc_match_current_node));
}
static void
inactive_group_member_rsc(void **state) {
- pe_resource_t *inactive_dummy_1 = NULL;
+ pcmk_resource_t *inactive_dummy_1 = NULL;
/* Find the "inactive-dummy-1" resource, a member of "inactive-group". */
for (GList *iter = inactive_group->children; iter != NULL; iter = iter->next) {
- pe_resource_t *rsc = (pe_resource_t *) iter->data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
if (strcmp(rsc->id, "inactive-dummy-1") == 0) {
inactive_dummy_1 = rsc;
@@ -246,20 +265,21 @@ inactive_group_member_rsc(void **state) {
/* Passes because NULL was passed for node, regardless of flags. */
assert_ptr_equal(inactive_dummy_1, native_find_rsc(inactive_dummy_1, "inactive-dummy-1", NULL, 0));
- assert_ptr_equal(inactive_dummy_1, native_find_rsc(inactive_dummy_1, "inactive-dummy-1", NULL, pe_find_current));
+ assert_ptr_equal(inactive_dummy_1,
+ native_find_rsc(inactive_dummy_1, "inactive-dummy-1", NULL,
+ pcmk_rsc_match_current_node));
/* Fails because resource is not a clone (nor cloned). */
- assert_null(native_find_rsc(inactive_dummy_1, "inactive-dummy-1", NULL, pe_find_clone));
- assert_null(native_find_rsc(inactive_dummy_1, "inactive-dummy-1", cluster01, pe_find_clone));
+ assert_null(native_find_rsc(inactive_dummy_1, "inactive-dummy-1", NULL,
+ pcmk_rsc_match_clone_only));
+ assert_null(native_find_rsc(inactive_dummy_1, "inactive-dummy-1", cluster01,
+ pcmk_rsc_match_clone_only));
/* Fails because inactive-dummy-1 is not running. */
- assert_null(native_find_rsc(inactive_dummy_1, "inactive-dummy-1", cluster01, pe_find_current));
- assert_null(native_find_rsc(inactive_dummy_1, "inactive-dummy-1", cluster02, pe_find_current));
-
- /* Passes because of flags. */
- assert_ptr_equal(inactive_dummy_1, native_find_rsc(inactive_dummy_1, "inactive-dummy-1", cluster01, pe_find_inactive));
- /* Passes because of flags. */
- assert_ptr_equal(inactive_dummy_1, native_find_rsc(inactive_dummy_1, "inactive-dummy-1", cluster02, pe_find_inactive));
+ assert_null(native_find_rsc(inactive_dummy_1, "inactive-dummy-1", cluster01,
+ pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(inactive_dummy_1, "inactive-dummy-1", cluster02,
+ pcmk_rsc_match_current_node));
}
static void
@@ -268,24 +288,40 @@ clone_rsc(void **state) {
/* Passes because NULL was passed for node, regardless of flags. */
assert_ptr_equal(promotable_clone, native_find_rsc(promotable_clone, "promotable-clone", NULL, 0));
- assert_ptr_equal(promotable_clone, native_find_rsc(promotable_clone, "promotable-clone", NULL, pe_find_current));
- assert_ptr_equal(promotable_clone, native_find_rsc(promotable_clone, "promotable-clone", NULL, pe_find_clone));
-
- /* Fails because pe_find_current is required if a node is given. */
+ assert_ptr_equal(promotable_clone,
+ native_find_rsc(promotable_clone, "promotable-clone", NULL,
+ pcmk_rsc_match_current_node));
+ assert_ptr_equal(promotable_clone,
+ native_find_rsc(promotable_clone, "promotable-clone", NULL,
+ pcmk_rsc_match_clone_only));
+
+ // Fails because pcmk_rsc_match_current_node is required if a node is given
assert_null(native_find_rsc(promotable_clone, "promotable-clone", cluster01, 0));
/* Passes because one of ping-clone's children is running on cluster01. */
- assert_ptr_equal(promotable_clone, native_find_rsc(promotable_clone, "promotable-clone", cluster01, pe_find_current));
+ assert_ptr_equal(promotable_clone,
+ native_find_rsc(promotable_clone, "promotable-clone",
+ cluster01, pcmk_rsc_match_current_node));
- /* Fails because pe_find_current is required if a node is given. */
+ // Fails because pcmk_rsc_match_current_node is required if a node is given
assert_null(native_find_rsc(promotable_clone, "promotable-clone", cluster02, 0));
/* Passes because one of ping_clone's children is running on cluster02. */
- assert_ptr_equal(promotable_clone, native_find_rsc(promotable_clone, "promotable-clone", cluster02, pe_find_current));
-
- /* Passes for previous reasons, plus includes pe_find_clone check. */
- assert_ptr_equal(promotable_clone, native_find_rsc(promotable_clone, "promotable-clone", cluster01, pe_find_clone|pe_find_current));
- assert_ptr_equal(promotable_clone, native_find_rsc(promotable_clone, "promotable-clone", cluster02, pe_find_clone|pe_find_current));
+ assert_ptr_equal(promotable_clone,
+ native_find_rsc(promotable_clone, "promotable-clone",
+ cluster02, pcmk_rsc_match_current_node));
+
+ // Passes for previous reasons, plus includes pcmk_rsc_match_clone_only
+ assert_ptr_equal(promotable_clone,
+ native_find_rsc(promotable_clone, "promotable-clone",
+ cluster01,
+ pcmk_rsc_match_clone_only
+ |pcmk_rsc_match_current_node));
+ assert_ptr_equal(promotable_clone,
+ native_find_rsc(promotable_clone, "promotable-clone",
+ cluster02,
+ pcmk_rsc_match_clone_only
+ |pcmk_rsc_match_current_node));
}
static void
@@ -294,28 +330,30 @@ inactive_clone_rsc(void **state) {
/* Passes because NULL was passed for node, regardless of flags. */
assert_ptr_equal(inactive_clone, native_find_rsc(inactive_clone, "inactive-clone", NULL, 0));
- assert_ptr_equal(inactive_clone, native_find_rsc(inactive_clone, "inactive-clone", NULL, pe_find_current));
- assert_ptr_equal(inactive_clone, native_find_rsc(inactive_clone, "inactive-clone", NULL, pe_find_clone));
- assert_ptr_equal(inactive_clone, native_find_rsc(inactive_clone, "inactive-clone", NULL, pe_find_inactive));
+ assert_ptr_equal(inactive_clone,
+ native_find_rsc(inactive_clone, "inactive-clone", NULL,
+ pcmk_rsc_match_current_node));
+ assert_ptr_equal(inactive_clone,
+ native_find_rsc(inactive_clone, "inactive-clone", NULL,
+ pcmk_rsc_match_clone_only));
/* Fails because none of inactive-clone's children are running. */
- assert_null(native_find_rsc(inactive_clone, "inactive-clone", cluster01, pe_find_current|pe_find_clone));
- assert_null(native_find_rsc(inactive_clone, "inactive-clone", cluster02, pe_find_current|pe_find_clone));
-
- /* Passes because of flags. */
- assert_ptr_equal(inactive_clone, native_find_rsc(inactive_clone, "inactive-clone", cluster01, pe_find_inactive));
- /* Passes because of flags. */
- assert_ptr_equal(inactive_clone, native_find_rsc(inactive_clone, "inactive-clone", cluster02, pe_find_inactive));
+ assert_null(native_find_rsc(inactive_clone, "inactive-clone", cluster01,
+ pcmk_rsc_match_current_node
+ |pcmk_rsc_match_clone_only));
+ assert_null(native_find_rsc(inactive_clone, "inactive-clone", cluster02,
+ pcmk_rsc_match_current_node
+ |pcmk_rsc_match_clone_only));
}
static void
clone_instance_rsc(void **state) {
- pe_resource_t *promotable_0 = NULL;
- pe_resource_t *promotable_1 = NULL;
+ pcmk_resource_t *promotable_0 = NULL;
+ pcmk_resource_t *promotable_1 = NULL;
/* Find the "promotable-rsc:0" and "promotable-rsc:1" resources, members of "promotable-clone". */
for (GList *iter = promotable_clone->children; iter != NULL; iter = iter->next) {
- pe_resource_t *rsc = (pe_resource_t *) iter->data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
if (strcmp(rsc->id, "promotable-rsc:0") == 0) {
promotable_0 = rsc;
@@ -329,70 +367,132 @@ clone_instance_rsc(void **state) {
/* Passes because NULL was passed for node, regardless of flags. */
assert_ptr_equal(promotable_0, native_find_rsc(promotable_0, "promotable-rsc:0", NULL, 0));
- assert_ptr_equal(promotable_0, native_find_rsc(promotable_0, "promotable-rsc:0", NULL, pe_find_current));
+ assert_ptr_equal(promotable_0,
+ native_find_rsc(promotable_0, "promotable-rsc:0", NULL,
+ pcmk_rsc_match_current_node));
assert_ptr_equal(promotable_1, native_find_rsc(promotable_1, "promotable-rsc:1", NULL, 0));
- assert_ptr_equal(promotable_1, native_find_rsc(promotable_1, "promotable-rsc:1", NULL, pe_find_current));
+ assert_ptr_equal(promotable_1,
+ native_find_rsc(promotable_1, "promotable-rsc:1", NULL,
+ pcmk_rsc_match_current_node));
- /* Fails because pe_find_current is required if a node is given. */
+ // Fails because pcmk_rsc_match_current_node is required if a node is given
assert_null(native_find_rsc(promotable_0, "promotable-rsc:0", cluster02, 0));
assert_null(native_find_rsc(promotable_1, "promotable-rsc:1", cluster01, 0));
/* Check that the resource is running on the node we expect. */
- assert_ptr_equal(promotable_0, native_find_rsc(promotable_0, "promotable-rsc:0", cluster02, pe_find_current));
- assert_null(native_find_rsc(promotable_0, "promotable-rsc:0", cluster01, pe_find_current));
- assert_ptr_equal(promotable_1, native_find_rsc(promotable_1, "promotable-rsc:1", cluster01, pe_find_current));
- assert_null(native_find_rsc(promotable_1, "promotable-rsc:1", cluster02, pe_find_current));
+ assert_ptr_equal(promotable_0,
+ native_find_rsc(promotable_0, "promotable-rsc:0",
+ cluster02, pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(promotable_0, "promotable-rsc:0", cluster01,
+ pcmk_rsc_match_current_node));
+ assert_ptr_equal(promotable_1,
+ native_find_rsc(promotable_1, "promotable-rsc:1",
+ cluster01, pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(promotable_1, "promotable-rsc:1", cluster02,
+ pcmk_rsc_match_current_node));
/* Passes because NULL was passed for node and primitive name was given, with correct flags. */
- assert_ptr_equal(promotable_0, native_find_rsc(promotable_0, "promotable-rsc", NULL, pe_find_clone));
-
- /* Passes because pe_find_any matches any instance's base name. */
- assert_ptr_equal(promotable_0, native_find_rsc(promotable_0, "promotable-rsc", NULL, pe_find_any));
- assert_ptr_equal(promotable_1, native_find_rsc(promotable_1, "promotable-rsc", NULL, pe_find_any));
-
- /* Passes because pe_find_anon matches. */
- assert_ptr_equal(promotable_0, native_find_rsc(promotable_0, "promotable-rsc", NULL, pe_find_anon));
- assert_ptr_equal(promotable_1, native_find_rsc(promotable_1, "promotable-rsc", NULL, pe_find_anon));
+ assert_ptr_equal(promotable_0,
+ native_find_rsc(promotable_0, "promotable-rsc", NULL,
+ pcmk_rsc_match_clone_only));
+
+ // Passes because pcmk_rsc_match_basename matches any instance's base name
+ assert_ptr_equal(promotable_0,
+ native_find_rsc(promotable_0, "promotable-rsc", NULL,
+ pcmk_rsc_match_basename));
+ assert_ptr_equal(promotable_1,
+ native_find_rsc(promotable_1, "promotable-rsc", NULL,
+ pcmk_rsc_match_basename));
+
+ // Passes because pcmk_rsc_match_anon_basename matches
+ assert_ptr_equal(promotable_0,
+ native_find_rsc(promotable_0, "promotable-rsc", NULL,
+ pcmk_rsc_match_anon_basename));
+ assert_ptr_equal(promotable_1,
+ native_find_rsc(promotable_1, "promotable-rsc", NULL,
+ pcmk_rsc_match_anon_basename));
/* Check that the resource is running on the node we expect. */
- assert_ptr_equal(promotable_0, native_find_rsc(promotable_0, "promotable-rsc", cluster02, pe_find_any|pe_find_current));
- assert_ptr_equal(promotable_0, native_find_rsc(promotable_0, "promotable-rsc", cluster02, pe_find_anon|pe_find_current));
- assert_null(native_find_rsc(promotable_0, "promotable-rsc", cluster01, pe_find_any|pe_find_current));
- assert_null(native_find_rsc(promotable_0, "promotable-rsc", cluster01, pe_find_anon|pe_find_current));
- assert_ptr_equal(promotable_1, native_find_rsc(promotable_1, "promotable-rsc", cluster01, pe_find_any|pe_find_current));
- assert_ptr_equal(promotable_1, native_find_rsc(promotable_1, "promotable-rsc", cluster01, pe_find_anon|pe_find_current));
- assert_null(native_find_rsc(promotable_1, "promotable-rsc", cluster02, pe_find_any|pe_find_current));
- assert_null(native_find_rsc(promotable_1, "promotable-rsc", cluster02, pe_find_anon|pe_find_current));
+ assert_ptr_equal(promotable_0,
+ native_find_rsc(promotable_0, "promotable-rsc", cluster02,
+ pcmk_rsc_match_basename
+ |pcmk_rsc_match_current_node));
+ assert_ptr_equal(promotable_0,
+ native_find_rsc(promotable_0, "promotable-rsc", cluster02,
+ pcmk_rsc_match_anon_basename
+ |pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(promotable_0, "promotable-rsc", cluster01,
+ pcmk_rsc_match_basename
+ |pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(promotable_0, "promotable-rsc", cluster01,
+ pcmk_rsc_match_anon_basename
+ |pcmk_rsc_match_current_node));
+ assert_ptr_equal(promotable_1,
+ native_find_rsc(promotable_1, "promotable-rsc", cluster01,
+ pcmk_rsc_match_basename
+ |pcmk_rsc_match_current_node));
+ assert_ptr_equal(promotable_1,
+ native_find_rsc(promotable_1, "promotable-rsc", cluster01,
+ pcmk_rsc_match_anon_basename
+ |pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(promotable_1, "promotable-rsc", cluster02,
+ pcmk_rsc_match_basename
+ |pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(promotable_1, "promotable-rsc", cluster02,
+ pcmk_rsc_match_anon_basename
+ |pcmk_rsc_match_current_node));
/* Fails because incorrect flags were given along with primitive name. */
- assert_null(native_find_rsc(promotable_0, "promotable-rsc", NULL, pe_find_current));
- assert_null(native_find_rsc(promotable_1, "promotable-rsc", NULL, pe_find_current));
+ assert_null(native_find_rsc(promotable_0, "promotable-rsc", NULL,
+ pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(promotable_1, "promotable-rsc", NULL,
+ pcmk_rsc_match_current_node));
/* And then we check failure possibilities again, except passing promotable_clone
* instead of promotable_X as the first argument to native_find_rsc.
*/
- /* Fails because pe_find_current is required if a node is given. */
+ // Fails because pcmk_rsc_match_current_node is required if a node is given
assert_null(native_find_rsc(promotable_clone, "promotable-rsc:0", cluster02, 0));
assert_null(native_find_rsc(promotable_clone, "promotable-rsc:1", cluster01, 0));
/* Check that the resource is running on the node we expect. */
- assert_ptr_equal(promotable_0, native_find_rsc(promotable_clone, "promotable-rsc:0", cluster02, pe_find_current));
- assert_ptr_equal(promotable_0, native_find_rsc(promotable_clone, "promotable-rsc", cluster02, pe_find_any|pe_find_current));
- assert_ptr_equal(promotable_0, native_find_rsc(promotable_clone, "promotable-rsc", cluster02, pe_find_anon|pe_find_current));
- assert_ptr_equal(promotable_1, native_find_rsc(promotable_clone, "promotable-rsc:1", cluster01, pe_find_current));
- assert_ptr_equal(promotable_1, native_find_rsc(promotable_clone, "promotable-rsc", cluster01, pe_find_any|pe_find_current));
- assert_ptr_equal(promotable_1, native_find_rsc(promotable_clone, "promotable-rsc", cluster01, pe_find_anon|pe_find_current));
+ assert_ptr_equal(promotable_0,
+ native_find_rsc(promotable_clone, "promotable-rsc:0",
+ cluster02, pcmk_rsc_match_current_node));
+ assert_ptr_equal(promotable_0,
+ native_find_rsc(promotable_clone, "promotable-rsc",
+ cluster02,
+ pcmk_rsc_match_basename
+ |pcmk_rsc_match_current_node));
+ assert_ptr_equal(promotable_0,
+ native_find_rsc(promotable_clone, "promotable-rsc",
+ cluster02,
+ pcmk_rsc_match_anon_basename
+ |pcmk_rsc_match_current_node));
+ assert_ptr_equal(promotable_1,
+ native_find_rsc(promotable_clone, "promotable-rsc:1",
+ cluster01, pcmk_rsc_match_current_node));
+ assert_ptr_equal(promotable_1,
+ native_find_rsc(promotable_clone, "promotable-rsc",
+ cluster01,
+ pcmk_rsc_match_basename
+ |pcmk_rsc_match_current_node));
+ assert_ptr_equal(promotable_1,
+ native_find_rsc(promotable_clone, "promotable-rsc",
+ cluster01,
+ pcmk_rsc_match_anon_basename
+ |pcmk_rsc_match_current_node));
}
static void
renamed_rsc(void **state) {
- pe_resource_t *promotable_0 = NULL;
- pe_resource_t *promotable_1 = NULL;
+ pcmk_resource_t *promotable_0 = NULL;
+ pcmk_resource_t *promotable_1 = NULL;
/* Find the "promotable-rsc:0" and "promotable-rsc:1" resources, members of "promotable-clone". */
for (GList *iter = promotable_clone->children; iter != NULL; iter = iter->next) {
- pe_resource_t *rsc = (pe_resource_t *) iter->data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
if (strcmp(rsc->id, "promotable-rsc:0") == 0) {
promotable_0 = rsc;
@@ -404,9 +504,13 @@ renamed_rsc(void **state) {
assert_non_null(promotable_0);
assert_non_null(promotable_1);
- /* Passes because pe_find_renamed means the base name matches clone_name. */
- assert_ptr_equal(promotable_0, native_find_rsc(promotable_0, "promotable-rsc", NULL, pe_find_renamed));
- assert_ptr_equal(promotable_1, native_find_rsc(promotable_1, "promotable-rsc", NULL, pe_find_renamed));
+ // Passes because pcmk_rsc_match_history means base name matches clone_name
+ assert_ptr_equal(promotable_0,
+ native_find_rsc(promotable_0, "promotable-rsc", NULL,
+ pcmk_rsc_match_history));
+ assert_ptr_equal(promotable_1,
+ native_find_rsc(promotable_1, "promotable-rsc", NULL,
+ pcmk_rsc_match_history));
}
static void
@@ -415,36 +519,32 @@ bundle_rsc(void **state) {
/* Passes because NULL was passed for node, regardless of flags. */
assert_ptr_equal(httpd_bundle, native_find_rsc(httpd_bundle, "httpd-bundle", NULL, 0));
- assert_ptr_equal(httpd_bundle, native_find_rsc(httpd_bundle, "httpd-bundle", NULL, pe_find_current));
+ assert_ptr_equal(httpd_bundle,
+ native_find_rsc(httpd_bundle, "httpd-bundle", NULL,
+ pcmk_rsc_match_current_node));
/* Fails because resource is not a clone (nor cloned). */
- assert_null(native_find_rsc(httpd_bundle, "httpd-bundle", NULL, pe_find_clone));
- assert_null(native_find_rsc(httpd_bundle, "httpd-bundle", cluster01, pe_find_clone));
+ assert_null(native_find_rsc(httpd_bundle, "httpd-bundle", NULL,
+ pcmk_rsc_match_clone_only));
+ assert_null(native_find_rsc(httpd_bundle, "httpd-bundle", cluster01,
+ pcmk_rsc_match_clone_only));
- /* Fails because pe_find_current is required if a node is given. */
+ // Fails because pcmk_rsc_match_current_node is required if a node is given
assert_null(native_find_rsc(httpd_bundle, "httpd-bundle", cluster01, 0));
/* Passes because one of httpd_bundle's children is running on cluster01. */
- assert_ptr_equal(httpd_bundle, native_find_rsc(httpd_bundle, "httpd-bundle", cluster01, pe_find_current));
+ assert_ptr_equal(httpd_bundle,
+ native_find_rsc(httpd_bundle, "httpd-bundle", cluster01,
+ pcmk_rsc_match_current_node));
}
-static void
-bundle_replica_rsc(void **state) {
- pe__bundle_variant_data_t *bundle_data = NULL;
- pe__bundle_replica_t *replica_0 = NULL;
-
- pe_resource_t *ip_0 = NULL;
- pe_resource_t *child_0 = NULL;
- pe_resource_t *container_0 = NULL;
- pe_resource_t *remote_0 = NULL;
-
- get_bundle_variant_data(bundle_data, httpd_bundle);
- replica_0 = (pe__bundle_replica_t *) bundle_data->replicas->data;
-
- ip_0 = replica_0->ip;
- child_0 = replica_0->child;
- container_0 = replica_0->container;
- remote_0 = replica_0->remote;
+static bool
+bundle_first_replica(pe__bundle_replica_t *replica, void *user_data)
+{
+ pcmk_resource_t *ip_0 = replica->ip;
+ pcmk_resource_t *child_0 = replica->child;
+ pcmk_resource_t *container_0 = replica->container;
+ pcmk_resource_t *remote_0 = replica->remote;
assert_non_null(ip_0);
assert_non_null(child_0);
@@ -457,58 +557,109 @@ bundle_replica_rsc(void **state) {
assert_ptr_equal(container_0, native_find_rsc(container_0, "httpd-bundle-docker-0", NULL, 0));
assert_ptr_equal(remote_0, native_find_rsc(remote_0, "httpd-bundle-0", NULL, 0));
- /* Fails because pe_find_current is required if a node is given. */
+ // Fails because pcmk_rsc_match_current_node is required if a node is given
assert_null(native_find_rsc(ip_0, "httpd-bundle-ip-192.168.122.131", cluster01, 0));
assert_null(native_find_rsc(child_0, "httpd:0", httpd_bundle_0, 0));
assert_null(native_find_rsc(container_0, "httpd-bundle-docker-0", cluster01, 0));
assert_null(native_find_rsc(remote_0, "httpd-bundle-0", cluster01, 0));
/* Check that the resource is running on the node we expect. */
- assert_ptr_equal(ip_0, native_find_rsc(ip_0, "httpd-bundle-ip-192.168.122.131", cluster01, pe_find_current));
- assert_null(native_find_rsc(ip_0, "httpd-bundle-ip-192.168.122.131", cluster02, pe_find_current));
- assert_null(native_find_rsc(ip_0, "httpd-bundle-ip-192.168.122.131", httpd_bundle_0, pe_find_current));
- assert_ptr_equal(child_0, native_find_rsc(child_0, "httpd:0", httpd_bundle_0, pe_find_current));
- assert_null(native_find_rsc(child_0, "httpd:0", cluster01, pe_find_current));
- assert_null(native_find_rsc(child_0, "httpd:0", cluster02, pe_find_current));
- assert_ptr_equal(container_0, native_find_rsc(container_0, "httpd-bundle-docker-0", cluster01, pe_find_current));
- assert_null(native_find_rsc(container_0, "httpd-bundle-docker-0", cluster02, pe_find_current));
- assert_null(native_find_rsc(container_0, "httpd-bundle-docker-0", httpd_bundle_0, pe_find_current));
- assert_ptr_equal(remote_0, native_find_rsc(remote_0, "httpd-bundle-0", cluster01, pe_find_current));
- assert_null(native_find_rsc(remote_0, "httpd-bundle-0", cluster02, pe_find_current));
- assert_null(native_find_rsc(remote_0, "httpd-bundle-0", httpd_bundle_0, pe_find_current));
-
- /* Passes because pe_find_any matches any replica's base name. */
- assert_ptr_equal(child_0, native_find_rsc(child_0, "httpd", NULL, pe_find_any));
-
- /* Passes because pe_find_anon matches. */
- assert_ptr_equal(child_0, native_find_rsc(child_0, "httpd", NULL, pe_find_anon));
+ assert_ptr_equal(ip_0,
+ native_find_rsc(ip_0, "httpd-bundle-ip-192.168.122.131",
+ cluster01, pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(ip_0, "httpd-bundle-ip-192.168.122.131",
+ cluster02, pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(ip_0, "httpd-bundle-ip-192.168.122.131",
+ httpd_bundle_0, pcmk_rsc_match_current_node));
+ assert_ptr_equal(child_0,
+ native_find_rsc(child_0, "httpd:0", httpd_bundle_0,
+ pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(child_0, "httpd:0", cluster01,
+ pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(child_0, "httpd:0", cluster02,
+ pcmk_rsc_match_current_node));
+ assert_ptr_equal(container_0,
+ native_find_rsc(container_0, "httpd-bundle-docker-0",
+ cluster01, pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(container_0, "httpd-bundle-docker-0", cluster02,
+ pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(container_0, "httpd-bundle-docker-0",
+ httpd_bundle_0, pcmk_rsc_match_current_node));
+ assert_ptr_equal(remote_0,
+ native_find_rsc(remote_0, "httpd-bundle-0", cluster01,
+ pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(remote_0, "httpd-bundle-0", cluster02,
+ pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(remote_0, "httpd-bundle-0", httpd_bundle_0,
+ pcmk_rsc_match_current_node));
+
+ // Passes because pcmk_rsc_match_basename matches any replica's base name
+ assert_ptr_equal(child_0,
+ native_find_rsc(child_0, "httpd", NULL,
+ pcmk_rsc_match_basename));
+
+ // Passes because pcmk_rsc_match_anon_basename matches
+ assert_ptr_equal(child_0,
+ native_find_rsc(child_0, "httpd", NULL,
+ pcmk_rsc_match_anon_basename));
/* Check that the resource is running on the node we expect. */
- assert_ptr_equal(child_0, native_find_rsc(child_0, "httpd", httpd_bundle_0, pe_find_any|pe_find_current));
- assert_ptr_equal(child_0, native_find_rsc(child_0, "httpd", httpd_bundle_0, pe_find_anon|pe_find_current));
- assert_null(native_find_rsc(child_0, "httpd", cluster01, pe_find_any|pe_find_current));
- assert_null(native_find_rsc(child_0, "httpd", cluster01, pe_find_anon|pe_find_current));
- assert_null(native_find_rsc(child_0, "httpd", cluster02, pe_find_any|pe_find_current));
- assert_null(native_find_rsc(child_0, "httpd", cluster02, pe_find_anon|pe_find_current));
+ assert_ptr_equal(child_0,
+ native_find_rsc(child_0, "httpd", httpd_bundle_0,
+ pcmk_rsc_match_basename
+ |pcmk_rsc_match_current_node));
+ assert_ptr_equal(child_0,
+ native_find_rsc(child_0, "httpd", httpd_bundle_0,
+ pcmk_rsc_match_anon_basename
+ |pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(child_0, "httpd", cluster01,
+ pcmk_rsc_match_basename
+ |pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(child_0, "httpd", cluster01,
+ pcmk_rsc_match_anon_basename
+ |pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(child_0, "httpd", cluster02,
+ pcmk_rsc_match_basename
+ |pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(child_0, "httpd", cluster02,
+ pcmk_rsc_match_anon_basename
+ |pcmk_rsc_match_current_node));
/* Fails because incorrect flags were given along with base name. */
- assert_null(native_find_rsc(child_0, "httpd", NULL, pe_find_current));
+ assert_null(native_find_rsc(child_0, "httpd", NULL,
+ pcmk_rsc_match_current_node));
/* And then we check failure possibilities again, except passing httpd-bundle
* instead of X_0 as the first argument to native_find_rsc.
*/
- /* Fails because pe_find_current is required if a node is given. */
+ // Fails because pcmk_rsc_match_current_node is required if a node is given
assert_null(native_find_rsc(httpd_bundle, "httpd-bundle-ip-192.168.122.131", cluster01, 0));
assert_null(native_find_rsc(httpd_bundle, "httpd:0", httpd_bundle_0, 0));
assert_null(native_find_rsc(httpd_bundle, "httpd-bundle-docker-0", cluster01, 0));
assert_null(native_find_rsc(httpd_bundle, "httpd-bundle-0", cluster01, 0));
/* Check that the resource is running on the node we expect. */
- assert_ptr_equal(ip_0, native_find_rsc(httpd_bundle, "httpd-bundle-ip-192.168.122.131", cluster01, pe_find_current));
- assert_ptr_equal(child_0, native_find_rsc(httpd_bundle, "httpd:0", httpd_bundle_0, pe_find_current));
- assert_ptr_equal(container_0, native_find_rsc(httpd_bundle, "httpd-bundle-docker-0", cluster01, pe_find_current));
- assert_ptr_equal(remote_0, native_find_rsc(httpd_bundle, "httpd-bundle-0", cluster01, pe_find_current));
+ assert_ptr_equal(ip_0,
+ native_find_rsc(httpd_bundle,
+ "httpd-bundle-ip-192.168.122.131",
+ cluster01, pcmk_rsc_match_current_node));
+ assert_ptr_equal(child_0,
+ native_find_rsc(httpd_bundle, "httpd:0", httpd_bundle_0,
+ pcmk_rsc_match_current_node));
+ assert_ptr_equal(container_0,
+ native_find_rsc(httpd_bundle, "httpd-bundle-docker-0",
+ cluster01, pcmk_rsc_match_current_node));
+ assert_ptr_equal(remote_0,
+ native_find_rsc(httpd_bundle, "httpd-bundle-0", cluster01,
+ pcmk_rsc_match_current_node));
+ return false; // Do not iterate through any further replicas
+}
+
+static void
+bundle_replica_rsc(void **state)
+{
+ pe__foreach_bundle_replica(httpd_bundle, bundle_first_replica, NULL);
}
static void
@@ -517,34 +668,50 @@ clone_group_rsc(void **rsc) {
/* Passes because NULL was passed for node, regardless of flags. */
assert_ptr_equal(mysql_clone_group, native_find_rsc(mysql_clone_group, "mysql-clone-group", NULL, 0));
- assert_ptr_equal(mysql_clone_group, native_find_rsc(mysql_clone_group, "mysql-clone-group", NULL, pe_find_current));
- assert_ptr_equal(mysql_clone_group, native_find_rsc(mysql_clone_group, "mysql-clone-group", NULL, pe_find_clone));
-
- /* Fails because pe_find_current is required if a node is given. */
+ assert_ptr_equal(mysql_clone_group,
+ native_find_rsc(mysql_clone_group, "mysql-clone-group",
+ NULL, pcmk_rsc_match_current_node));
+ assert_ptr_equal(mysql_clone_group,
+ native_find_rsc(mysql_clone_group, "mysql-clone-group",
+ NULL, pcmk_rsc_match_clone_only));
+
+ // Fails because pcmk_rsc_match_current_node is required if a node is given
assert_null(native_find_rsc(mysql_clone_group, "mysql-clone-group", cluster01, 0));
/* Passes because one of mysql-clone-group's children is running on cluster01. */
- assert_ptr_equal(mysql_clone_group, native_find_rsc(mysql_clone_group, "mysql-clone-group", cluster01, pe_find_current));
+ assert_ptr_equal(mysql_clone_group,
+ native_find_rsc(mysql_clone_group, "mysql-clone-group",
+ cluster01, pcmk_rsc_match_current_node));
- /* Fails because pe_find_current is required if a node is given. */
+ // Fails because pcmk_rsc_match_current_node is required if a node is given
assert_null(native_find_rsc(mysql_clone_group, "mysql-clone-group", cluster02, 0));
/* Passes because one of mysql-clone-group's children is running on cluster02. */
- assert_ptr_equal(mysql_clone_group, native_find_rsc(mysql_clone_group, "mysql-clone-group", cluster02, pe_find_current));
-
- /* Passes for previous reasons, plus includes pe_find_clone check. */
- assert_ptr_equal(mysql_clone_group, native_find_rsc(mysql_clone_group, "mysql-clone-group", cluster01, pe_find_clone|pe_find_current));
- assert_ptr_equal(mysql_clone_group, native_find_rsc(mysql_clone_group, "mysql-clone-group", cluster02, pe_find_clone|pe_find_current));
+ assert_ptr_equal(mysql_clone_group,
+ native_find_rsc(mysql_clone_group, "mysql-clone-group",
+ cluster02, pcmk_rsc_match_current_node));
+
+ // Passes for previous reasons, plus includes pcmk_rsc_match_clone_only
+ assert_ptr_equal(mysql_clone_group,
+ native_find_rsc(mysql_clone_group, "mysql-clone-group",
+ cluster01,
+ pcmk_rsc_match_clone_only
+ |pcmk_rsc_match_current_node));
+ assert_ptr_equal(mysql_clone_group,
+ native_find_rsc(mysql_clone_group, "mysql-clone-group",
+ cluster02,
+ pcmk_rsc_match_clone_only
+ |pcmk_rsc_match_current_node));
}
static void
clone_group_instance_rsc(void **rsc) {
- pe_resource_t *mysql_group_0 = NULL;
- pe_resource_t *mysql_group_1 = NULL;
+ pcmk_resource_t *mysql_group_0 = NULL;
+ pcmk_resource_t *mysql_group_1 = NULL;
/* Find the "mysql-group:0" and "mysql-group:1" resources, members of "mysql-clone-group". */
for (GList *iter = mysql_clone_group->children; iter != NULL; iter = iter->next) {
- pe_resource_t *rsc = (pe_resource_t *) iter->data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
if (strcmp(rsc->id, "mysql-group:0") == 0) {
mysql_group_0 = rsc;
@@ -558,73 +725,135 @@ clone_group_instance_rsc(void **rsc) {
/* Passes because NULL was passed for node, regardless of flags. */
assert_ptr_equal(mysql_group_0, native_find_rsc(mysql_group_0, "mysql-group:0", NULL, 0));
- assert_ptr_equal(mysql_group_0, native_find_rsc(mysql_group_0, "mysql-group:0", NULL, pe_find_current));
+ assert_ptr_equal(mysql_group_0,
+ native_find_rsc(mysql_group_0, "mysql-group:0", NULL,
+ pcmk_rsc_match_current_node));
assert_ptr_equal(mysql_group_1, native_find_rsc(mysql_group_1, "mysql-group:1", NULL, 0));
- assert_ptr_equal(mysql_group_1, native_find_rsc(mysql_group_1, "mysql-group:1", NULL, pe_find_current));
+ assert_ptr_equal(mysql_group_1,
+ native_find_rsc(mysql_group_1, "mysql-group:1", NULL,
+ pcmk_rsc_match_current_node));
- /* Fails because pe_find_current is required if a node is given. */
+ // Fails because pcmk_rsc_match_current_node is required if a node is given
assert_null(native_find_rsc(mysql_group_0, "mysql-group:0", cluster02, 0));
assert_null(native_find_rsc(mysql_group_1, "mysql-group:1", cluster01, 0));
/* Check that the resource is running on the node we expect. */
- assert_ptr_equal(mysql_group_0, native_find_rsc(mysql_group_0, "mysql-group:0", cluster02, pe_find_current));
- assert_null(native_find_rsc(mysql_group_0, "mysql-group:0", cluster01, pe_find_current));
- assert_ptr_equal(mysql_group_1, native_find_rsc(mysql_group_1, "mysql-group:1", cluster01, pe_find_current));
- assert_null(native_find_rsc(mysql_group_1, "mysql-group:1", cluster02, pe_find_current));
+ assert_ptr_equal(mysql_group_0,
+ native_find_rsc(mysql_group_0, "mysql-group:0", cluster02,
+ pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(mysql_group_0, "mysql-group:0", cluster01,
+ pcmk_rsc_match_current_node));
+ assert_ptr_equal(mysql_group_1,
+ native_find_rsc(mysql_group_1, "mysql-group:1", cluster01,
+ pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(mysql_group_1, "mysql-group:1", cluster02,
+ pcmk_rsc_match_current_node));
/* Passes because NULL was passed for node and base name was given, with correct flags. */
- assert_ptr_equal(mysql_group_0, native_find_rsc(mysql_group_0, "mysql-group" , NULL, pe_find_clone));
-
- /* Passes because pe_find_any matches any base name. */
- assert_ptr_equal(mysql_group_0, native_find_rsc(mysql_group_0, "mysql-group" , NULL, pe_find_any));
- assert_ptr_equal(mysql_group_1, native_find_rsc(mysql_group_1, "mysql-group" , NULL, pe_find_any));
-
- /* Passes because pe_find_anon matches. */
- assert_ptr_equal(mysql_group_0, native_find_rsc(mysql_group_0, "mysql-group" , NULL, pe_find_anon));
- assert_ptr_equal(mysql_group_1, native_find_rsc(mysql_group_1, "mysql-group" , NULL, pe_find_anon));
+ assert_ptr_equal(mysql_group_0,
+ native_find_rsc(mysql_group_0, "mysql-group" , NULL,
+ pcmk_rsc_match_clone_only));
+
+ // Passes because pcmk_rsc_match_basename matches any base name
+ assert_ptr_equal(mysql_group_0,
+ native_find_rsc(mysql_group_0, "mysql-group" , NULL,
+ pcmk_rsc_match_basename));
+ assert_ptr_equal(mysql_group_1,
+ native_find_rsc(mysql_group_1, "mysql-group" , NULL,
+ pcmk_rsc_match_basename));
+
+ // Passes because pcmk_rsc_match_anon_basename matches
+ assert_ptr_equal(mysql_group_0,
+ native_find_rsc(mysql_group_0, "mysql-group" , NULL,
+ pcmk_rsc_match_anon_basename));
+ assert_ptr_equal(mysql_group_1,
+ native_find_rsc(mysql_group_1, "mysql-group" , NULL,
+ pcmk_rsc_match_anon_basename));
/* Check that the resource is running on the node we expect. */
- assert_ptr_equal(mysql_group_0, native_find_rsc(mysql_group_0, "mysql-group", cluster02, pe_find_any|pe_find_current));
- assert_ptr_equal(mysql_group_0, native_find_rsc(mysql_group_0, "mysql-group", cluster02, pe_find_anon|pe_find_current));
- assert_null(native_find_rsc(mysql_group_0, "mysql-group", cluster01, pe_find_any|pe_find_current));
- assert_null(native_find_rsc(mysql_group_0, "mysql-group", cluster01, pe_find_anon|pe_find_current));
- assert_ptr_equal(mysql_group_1, native_find_rsc(mysql_group_1, "mysql-group", cluster01, pe_find_any|pe_find_current));
- assert_ptr_equal(mysql_group_1, native_find_rsc(mysql_group_1, "mysql-group", cluster01, pe_find_anon|pe_find_current));
- assert_null(native_find_rsc(mysql_group_1, "mysql-group", cluster02, pe_find_any|pe_find_current));
- assert_null(native_find_rsc(mysql_group_1, "mysql-group", cluster02, pe_find_anon|pe_find_current));
+ assert_ptr_equal(mysql_group_0,
+ native_find_rsc(mysql_group_0, "mysql-group", cluster02,
+ pcmk_rsc_match_basename
+ |pcmk_rsc_match_current_node));
+ assert_ptr_equal(mysql_group_0,
+ native_find_rsc(mysql_group_0, "mysql-group", cluster02,
+ pcmk_rsc_match_anon_basename
+ |pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(mysql_group_0, "mysql-group", cluster01,
+ pcmk_rsc_match_basename
+ |pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(mysql_group_0, "mysql-group", cluster01,
+ pcmk_rsc_match_anon_basename
+ |pcmk_rsc_match_current_node));
+ assert_ptr_equal(mysql_group_1,
+ native_find_rsc(mysql_group_1, "mysql-group", cluster01,
+ pcmk_rsc_match_basename
+ |pcmk_rsc_match_current_node));
+ assert_ptr_equal(mysql_group_1,
+ native_find_rsc(mysql_group_1, "mysql-group", cluster01,
+ pcmk_rsc_match_anon_basename
+ |pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(mysql_group_1, "mysql-group", cluster02,
+ pcmk_rsc_match_basename
+ |pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(mysql_group_1, "mysql-group", cluster02,
+ pcmk_rsc_match_anon_basename
+ |pcmk_rsc_match_current_node));
/* Fails because incorrect flags were given along with base name. */
- assert_null(native_find_rsc(mysql_group_0, "mysql-group", NULL, pe_find_current));
- assert_null(native_find_rsc(mysql_group_1, "mysql-group", NULL, pe_find_current));
+ assert_null(native_find_rsc(mysql_group_0, "mysql-group", NULL,
+ pcmk_rsc_match_current_node));
+ assert_null(native_find_rsc(mysql_group_1, "mysql-group", NULL,
+ pcmk_rsc_match_current_node));
/* And then we check failure possibilities again, except passing mysql_clone_group
* instead of mysql_group_X as the first argument to native_find_rsc.
*/
- /* Fails because pe_find_current is required if a node is given. */
+ // Fails because pcmk_rsc_match_current_node is required if a node is given
assert_null(native_find_rsc(mysql_clone_group, "mysql-group:0", cluster02, 0));
assert_null(native_find_rsc(mysql_clone_group, "mysql-group:1", cluster01, 0));
/* Check that the resource is running on the node we expect. */
- assert_ptr_equal(mysql_group_0, native_find_rsc(mysql_clone_group, "mysql-group:0", cluster02, pe_find_current));
- assert_ptr_equal(mysql_group_0, native_find_rsc(mysql_clone_group, "mysql-group", cluster02, pe_find_any|pe_find_current));
- assert_ptr_equal(mysql_group_0, native_find_rsc(mysql_clone_group, "mysql-group", cluster02, pe_find_anon|pe_find_current));
- assert_ptr_equal(mysql_group_1, native_find_rsc(mysql_clone_group, "mysql-group:1", cluster01, pe_find_current));
- assert_ptr_equal(mysql_group_1, native_find_rsc(mysql_clone_group, "mysql-group", cluster01, pe_find_any|pe_find_current));
- assert_ptr_equal(mysql_group_1, native_find_rsc(mysql_clone_group, "mysql-group", cluster01, pe_find_anon|pe_find_current));
+ assert_ptr_equal(mysql_group_0,
+ native_find_rsc(mysql_clone_group, "mysql-group:0",
+ cluster02, pcmk_rsc_match_current_node));
+ assert_ptr_equal(mysql_group_0,
+ native_find_rsc(mysql_clone_group, "mysql-group",
+ cluster02,
+ pcmk_rsc_match_basename
+ |pcmk_rsc_match_current_node));
+ assert_ptr_equal(mysql_group_0,
+ native_find_rsc(mysql_clone_group, "mysql-group",
+ cluster02,
+ pcmk_rsc_match_anon_basename
+ |pcmk_rsc_match_current_node));
+ assert_ptr_equal(mysql_group_1,
+ native_find_rsc(mysql_clone_group, "mysql-group:1",
+ cluster01, pcmk_rsc_match_current_node));
+ assert_ptr_equal(mysql_group_1,
+ native_find_rsc(mysql_clone_group, "mysql-group",
+ cluster01,
+ pcmk_rsc_match_basename
+ |pcmk_rsc_match_current_node));
+ assert_ptr_equal(mysql_group_1,
+ native_find_rsc(mysql_clone_group, "mysql-group",
+ cluster01,
+ pcmk_rsc_match_anon_basename
+ |pcmk_rsc_match_current_node));
}
static void
clone_group_member_rsc(void **state) {
- pe_resource_t *mysql_proxy = NULL;
+ pcmk_resource_t *mysql_proxy = NULL;
/* Find the "mysql-proxy" resource, a member of "mysql-group". */
for (GList *iter = mysql_clone_group->children; iter != NULL; iter = iter->next) {
- pe_resource_t *rsc = (pe_resource_t *) iter->data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
if (strcmp(rsc->id, "mysql-group:0") == 0) {
for (GList *iter2 = rsc->children; iter2 != NULL; iter2 = iter2->next) {
- pe_resource_t *child = (pe_resource_t *) iter2->data;
+ pcmk_resource_t *child = (pcmk_resource_t *) iter2->data;
if (strcmp(child->id, "mysql-proxy:0") == 0) {
mysql_proxy = child;
@@ -640,24 +869,35 @@ clone_group_member_rsc(void **state) {
/* Passes because NULL was passed for node, regardless of flags. */
assert_ptr_equal(mysql_proxy, native_find_rsc(mysql_proxy, "mysql-proxy:0", NULL, 0));
- assert_ptr_equal(mysql_proxy, native_find_rsc(mysql_proxy, "mysql-proxy:0", NULL, pe_find_current));
+ assert_ptr_equal(mysql_proxy,
+ native_find_rsc(mysql_proxy, "mysql-proxy:0", NULL,
+ pcmk_rsc_match_current_node));
/* Passes because resource's parent is a clone. */
- assert_ptr_equal(mysql_proxy, native_find_rsc(mysql_proxy, "mysql-proxy:0", NULL, pe_find_clone));
- assert_ptr_equal(mysql_proxy, native_find_rsc(mysql_proxy, "mysql-proxy:0", cluster02, pe_find_clone|pe_find_current));
+ assert_ptr_equal(mysql_proxy,
+ native_find_rsc(mysql_proxy, "mysql-proxy:0", NULL,
+ pcmk_rsc_match_clone_only));
+ assert_ptr_equal(mysql_proxy,
+ native_find_rsc(mysql_proxy, "mysql-proxy:0", cluster02,
+ pcmk_rsc_match_clone_only
+ |pcmk_rsc_match_current_node));
/* Fails because mysql-proxy:0 is not running on cluster01, even with the right flags. */
- assert_null(native_find_rsc(mysql_proxy, "mysql-proxy:0", cluster01, pe_find_current));
+ assert_null(native_find_rsc(mysql_proxy, "mysql-proxy:0", cluster01,
+ pcmk_rsc_match_current_node));
- /* Fails because pe_find_current is required if a node is given. */
+ // Fails because pcmk_rsc_match_current_node is required if a node is given
assert_null(native_find_rsc(mysql_proxy, "mysql-proxy:0", cluster02, 0));
/* Passes because mysql-proxy:0 is running on cluster02. */
- assert_ptr_equal(mysql_proxy, native_find_rsc(mysql_proxy, "mysql-proxy:0", cluster02, pe_find_current));
+ assert_ptr_equal(mysql_proxy,
+ native_find_rsc(mysql_proxy, "mysql-proxy:0", cluster02,
+ pcmk_rsc_match_current_node));
}
-/* TODO: Add tests for finding on allocated node (passing a node without
- * pe_find_current, after scheduling, for a resource that is starting/stopping/moving.
+/* TODO: Add tests for finding on assigned node (passing a node without
+ * pcmk_rsc_match_current_node, after scheduling, for a resource that is
+ * starting/stopping/moving.
*/
PCMK__UNIT_TEST(setup, teardown,
cmocka_unit_test(bad_args),
diff --git a/lib/pengine/tests/native/pe_base_name_eq_test.c b/lib/pengine/tests/native/pe_base_name_eq_test.c
index 67a62f8..cb3c908 100644
--- a/lib/pengine/tests/native/pe_base_name_eq_test.c
+++ b/lib/pengine/tests/native/pe_base_name_eq_test.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2022 the Pacemaker project contributors
+ * Copyright 2022-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -12,15 +12,15 @@
#include <crm/common/unittest_internal.h>
#include <crm/common/xml.h>
+#include <crm/common/scheduler.h>
#include <crm/pengine/internal.h>
#include <crm/pengine/status.h>
-#include <crm/pengine/pe_types.h>
xmlNode *input = NULL;
-pe_working_set_t *data_set = NULL;
+pcmk_scheduler_t *scheduler = NULL;
-pe_resource_t *exim_group, *promotable_0, *promotable_1, *dummy;
-pe_resource_t *httpd_bundle, *mysql_group_0, *mysql_group_1;
+pcmk_resource_t *exim_group, *promotable_0, *promotable_1, *dummy;
+pcmk_resource_t *httpd_bundle, *mysql_group_0, *mysql_group_1;
static int
setup(void **state) {
@@ -36,20 +36,21 @@ setup(void **state) {
return 1;
}
- data_set = pe_new_working_set();
+ scheduler = pe_new_working_set();
- if (data_set == NULL) {
+ if (scheduler == NULL) {
return 1;
}
- pe__set_working_set_flags(data_set, pe_flag_no_counts|pe_flag_no_compat);
- data_set->input = input;
+ pe__set_working_set_flags(scheduler,
+ pcmk_sched_no_counts|pcmk_sched_no_compat);
+ scheduler->input = input;
- cluster_status(data_set);
+ cluster_status(scheduler);
/* Get references to several resources we use frequently. */
- for (GList *iter = data_set->resources; iter != NULL; iter = iter->next) {
- pe_resource_t *rsc = (pe_resource_t *) iter->data;
+ for (GList *iter = scheduler->resources; iter != NULL; iter = iter->next) {
+ pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
if (strcmp(rsc->id, "dummy") == 0) {
dummy = rsc;
@@ -59,7 +60,7 @@ setup(void **state) {
httpd_bundle = rsc;
} else if (strcmp(rsc->id, "mysql-clone-group") == 0) {
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
- pe_resource_t *child = (pe_resource_t *) iter->data;
+ pcmk_resource_t *child = (pcmk_resource_t *) iter->data;
if (strcmp(child->id, "mysql-group:0") == 0) {
mysql_group_0 = child;
@@ -69,7 +70,7 @@ setup(void **state) {
}
} else if (strcmp(rsc->id, "promotable-clone") == 0) {
for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
- pe_resource_t *child = (pe_resource_t *) iter->data;
+ pcmk_resource_t *child = (pcmk_resource_t *) iter->data;
if (strcmp(child->id, "promotable-rsc:0") == 0) {
promotable_0 = child;
@@ -85,7 +86,7 @@ setup(void **state) {
static int
teardown(void **state) {
- pe_free_working_set(data_set);
+ pe_free_working_set(scheduler);
return 0;
}
diff --git a/lib/pengine/tests/status/Makefile.am b/lib/pengine/tests/status/Makefile.am
index 3f95496..c7ddb70 100644
--- a/lib/pengine/tests/status/Makefile.am
+++ b/lib/pengine/tests/status/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2022 the Pacemaker project contributors
+# Copyright 2022-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -13,10 +13,10 @@ include $(top_srcdir)/mk/unittest.mk
LDADD += $(top_builddir)/lib/pengine/libpe_status_test.la
# Add "_test" to the end of all test program names to simplify .gitignore.
-check_PROGRAMS = pe_find_node_any_test \
- pe_find_node_id_test \
- pe_find_node_test \
- pe_new_working_set_test \
- set_working_set_defaults_test
+check_PROGRAMS = pe_find_node_any_test \
+ pe_find_node_id_test \
+ pe_find_node_test \
+ pe_new_working_set_test \
+ set_working_set_defaults_test
TESTS = $(check_PROGRAMS)
diff --git a/lib/pengine/tests/status/pe_find_node_any_test.c b/lib/pengine/tests/status/pe_find_node_any_test.c
index b911424..5f5a27e 100644
--- a/lib/pengine/tests/status/pe_find_node_any_test.c
+++ b/lib/pengine/tests/status/pe_find_node_any_test.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2022 the Pacemaker project contributors
+ * Copyright 2022-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -24,8 +24,8 @@ static void
non_null_list(void **state) {
GList *nodes = NULL;
- pe_node_t *a = calloc(1, sizeof(pe_node_t));
- pe_node_t *b = calloc(1, sizeof(pe_node_t));
+ pcmk_node_t *a = calloc(1, sizeof(pcmk_node_t));
+ pcmk_node_t *b = calloc(1, sizeof(pcmk_node_t));
a->details = calloc(1, sizeof(struct pe_node_shared_s));
a->details->uname = "cluster1";
diff --git a/lib/pengine/tests/status/pe_find_node_id_test.c b/lib/pengine/tests/status/pe_find_node_id_test.c
index 832a40a..c6b8773 100644
--- a/lib/pengine/tests/status/pe_find_node_id_test.c
+++ b/lib/pengine/tests/status/pe_find_node_id_test.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2022 the Pacemaker project contributors
+ * Copyright 2022-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -22,8 +22,8 @@ static void
non_null_list(void **state) {
GList *nodes = NULL;
- pe_node_t *a = calloc(1, sizeof(pe_node_t));
- pe_node_t *b = calloc(1, sizeof(pe_node_t));
+ pcmk_node_t *a = calloc(1, sizeof(pcmk_node_t));
+ pcmk_node_t *b = calloc(1, sizeof(pcmk_node_t));
a->details = calloc(1, sizeof(struct pe_node_shared_s));
a->details->id = "id1";
diff --git a/lib/pengine/tests/status/pe_find_node_test.c b/lib/pengine/tests/status/pe_find_node_test.c
index 7c7ea30..305ddc9 100644
--- a/lib/pengine/tests/status/pe_find_node_test.c
+++ b/lib/pengine/tests/status/pe_find_node_test.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2022 the Pacemaker project contributors
+ * Copyright 2022-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -22,8 +22,8 @@ static void
non_null_list(void **state) {
GList *nodes = NULL;
- pe_node_t *a = calloc(1, sizeof(pe_node_t));
- pe_node_t *b = calloc(1, sizeof(pe_node_t));
+ pcmk_node_t *a = calloc(1, sizeof(pcmk_node_t));
+ pcmk_node_t *b = calloc(1, sizeof(pcmk_node_t));
a->details = calloc(1, sizeof(struct pe_node_shared_s));
a->details->uname = "cluster1";
diff --git a/lib/pengine/tests/status/pe_new_working_set_test.c b/lib/pengine/tests/status/pe_new_working_set_test.c
index cf2df4f..b385f9c 100644
--- a/lib/pengine/tests/status/pe_new_working_set_test.c
+++ b/lib/pengine/tests/status/pe_new_working_set_test.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2022 the Pacemaker project contributors
+ * Copyright 2022-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -19,7 +19,7 @@ calloc_fails(void **state) {
pcmk__mock_calloc = true; // calloc() will return NULL
expect_value(__wrap_calloc, nmemb, 1);
- expect_value(__wrap_calloc, size, sizeof(pe_working_set_t));
+ expect_value(__wrap_calloc, size, sizeof(pcmk_scheduler_t));
assert_null(pe_new_working_set());
pcmk__mock_calloc = false; // Use real calloc()
@@ -27,18 +27,18 @@ calloc_fails(void **state) {
static void
calloc_succeeds(void **state) {
- pe_working_set_t *data_set = pe_new_working_set();
+ pcmk_scheduler_t *scheduler = pe_new_working_set();
/* Nothing else to test about this function, as all it does is call
* set_working_set_defaults which is also a public function and should
* get its own unit test.
*/
- assert_non_null(data_set);
+ assert_non_null(scheduler);
/* Avoid calling pe_free_working_set here so we don't artificially
* inflate the coverage numbers.
*/
- free(data_set);
+ free(scheduler);
}
PCMK__UNIT_TEST(NULL, NULL,
diff --git a/lib/pengine/tests/status/set_working_set_defaults_test.c b/lib/pengine/tests/status/set_working_set_defaults_test.c
index c822278..7045a33 100644
--- a/lib/pengine/tests/status/set_working_set_defaults_test.c
+++ b/lib/pengine/tests/status/set_working_set_defaults_test.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2022 the Pacemaker project contributors
+ * Copyright 2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -10,8 +10,9 @@
#include <crm_internal.h>
#include <crm/common/unittest_internal.h>
+
+#include <crm/common/scheduler.h>
#include <crm/pengine/internal.h>
-#include <crm/pengine/pe_types.h>
#include <crm/pengine/status.h>
#include "mock_private.h"
@@ -19,27 +20,29 @@
static void
check_defaults(void **state) {
uint32_t flags;
- pe_working_set_t *data_set = calloc(1, sizeof(pe_working_set_t));
+ pcmk_scheduler_t *scheduler = calloc(1, sizeof(pcmk_scheduler_t));
- set_working_set_defaults(data_set);
+ set_working_set_defaults(scheduler);
- flags = pe_flag_stop_rsc_orphans|pe_flag_symmetric_cluster|pe_flag_stop_action_orphans;
+ flags = pcmk_sched_symmetric_cluster
+ |pcmk_sched_stop_removed_resources
+ |pcmk_sched_cancel_removed_actions;
if (!strcmp(PCMK__CONCURRENT_FENCING_DEFAULT, "true")) {
- flags |= pe_flag_concurrent_fencing;
+ flags |= pcmk_sched_concurrent_fencing;
}
- assert_null(data_set->priv);
- assert_int_equal(data_set->order_id, 1);
- assert_int_equal(data_set->action_id, 1);
- assert_int_equal(data_set->no_quorum_policy, no_quorum_stop);
- assert_int_equal(data_set->flags, flags);
+ assert_null(scheduler->priv);
+ assert_int_equal(scheduler->order_id, 1);
+ assert_int_equal(scheduler->action_id, 1);
+ assert_int_equal(scheduler->no_quorum_policy, pcmk_no_quorum_stop);
+ assert_int_equal(scheduler->flags, flags);
/* Avoid calling pe_free_working_set here so we don't artificially
* inflate the coverage numbers.
*/
- free(data_set);
+ free(scheduler);
}
PCMK__UNIT_TEST(NULL, NULL,
diff --git a/lib/pengine/tests/utils/Makefile.am b/lib/pengine/tests/utils/Makefile.am
index 4a3e8a2..64421e2 100644
--- a/lib/pengine/tests/utils/Makefile.am
+++ b/lib/pengine/tests/utils/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2022 the Pacemaker project contributors
+# Copyright 2022-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -14,8 +14,7 @@ AM_CPPFLAGS += -I$(top_srcdir)/lib/pengine
LDADD += $(top_builddir)/lib/pengine/libpe_status_test.la
# Add "_test" to the end of all test program names to simplify .gitignore.
-check_PROGRAMS = \
- pe__cmp_node_name_test \
+check_PROGRAMS = pe__cmp_node_name_test \
pe__cmp_rsc_priority_test
TESTS = $(check_PROGRAMS)
diff --git a/lib/pengine/tests/utils/pe__cmp_node_name_test.c b/lib/pengine/tests/utils/pe__cmp_node_name_test.c
index 45d87ee..4d602e4 100644
--- a/lib/pengine/tests/utils/pe__cmp_node_name_test.c
+++ b/lib/pengine/tests/utils/pe__cmp_node_name_test.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2022 the Pacemaker project contributors
+ * Copyright 2022-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -15,8 +15,8 @@
struct pe_node_shared_s node1_details;
struct pe_node_shared_s node2_details;
-pe_node_t node1 = {.details = &node1_details};
-pe_node_t node2 = {.details = &node2_details};
+pcmk_node_t node1 = { .details = &node1_details };
+pcmk_node_t node2 = { .details = &node2_details };
static void
nodes_equal(void **state)
diff --git a/lib/pengine/tests/utils/pe__cmp_rsc_priority_test.c b/lib/pengine/tests/utils/pe__cmp_rsc_priority_test.c
index 669e7a9..24c1731 100644
--- a/lib/pengine/tests/utils/pe__cmp_rsc_priority_test.c
+++ b/lib/pengine/tests/utils/pe__cmp_rsc_priority_test.c
@@ -14,8 +14,8 @@
#include "pe_status_private.h"
-pe_resource_t rsc1;
-pe_resource_t rsc2;
+pcmk_resource_t rsc1;
+pcmk_resource_t rsc2;
static void
rscs_equal(void **state)
diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c
index 2bd6707..3429d56 100644
--- a/lib/pengine/unpack.c
+++ b/lib/pengine/unpack.c
@@ -29,8 +29,8 @@ CRM_TRACE_INIT_DATA(pe_status);
// A (parsed) resource action history entry
struct action_history {
- pe_resource_t *rsc; // Resource that history is for
- pe_node_t *node; // Node that history is for
+ pcmk_resource_t *rsc; // Resource that history is for
+ pcmk_node_t *node; // Node that history is for
xmlNode *xml; // History entry XML
// Parsed from entry XML
@@ -49,43 +49,40 @@ struct action_history {
* use pe__set_working_set_flags()/pe__clear_working_set_flags() so that the
* flag is stringified more readably in log messages.
*/
-#define set_config_flag(data_set, option, flag) do { \
- const char *scf_value = pe_pref((data_set)->config_hash, (option)); \
- if (scf_value != NULL) { \
- if (crm_is_true(scf_value)) { \
- (data_set)->flags = pcmk__set_flags_as(__func__, __LINE__, \
- LOG_TRACE, "Working set", \
- crm_system_name, (data_set)->flags, \
- (flag), #flag); \
- } else { \
- (data_set)->flags = pcmk__clear_flags_as(__func__, __LINE__,\
- LOG_TRACE, "Working set", \
- crm_system_name, (data_set)->flags, \
- (flag), #flag); \
- } \
- } \
+#define set_config_flag(scheduler, option, flag) do { \
+ const char *scf_value = pe_pref((scheduler)->config_hash, (option)); \
+ if (scf_value != NULL) { \
+ if (crm_is_true(scf_value)) { \
+ (scheduler)->flags = pcmk__set_flags_as(__func__, __LINE__, \
+ LOG_TRACE, "Scheduler", \
+ crm_system_name, (scheduler)->flags, \
+ (flag), #flag); \
+ } else { \
+ (scheduler)->flags = pcmk__clear_flags_as(__func__, __LINE__, \
+ LOG_TRACE, "Scheduler", \
+ crm_system_name, (scheduler)->flags, \
+ (flag), #flag); \
+ } \
+ } \
} while(0)
-static void unpack_rsc_op(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
- xmlNode **last_failure,
+static void unpack_rsc_op(pcmk_resource_t *rsc, pcmk_node_t *node,
+ xmlNode *xml_op, xmlNode **last_failure,
enum action_fail_response *failed);
-static void determine_remote_online_status(pe_working_set_t *data_set,
- pe_node_t *this_node);
-static void add_node_attrs(const xmlNode *xml_obj, pe_node_t *node,
- bool overwrite, pe_working_set_t *data_set);
+static void determine_remote_online_status(pcmk_scheduler_t *scheduler,
+ pcmk_node_t *this_node);
+static void add_node_attrs(const xmlNode *xml_obj, pcmk_node_t *node,
+ bool overwrite, pcmk_scheduler_t *scheduler);
static void determine_online_status(const xmlNode *node_state,
- pe_node_t *this_node,
- pe_working_set_t *data_set);
+ pcmk_node_t *this_node,
+ pcmk_scheduler_t *scheduler);
-static void unpack_node_lrm(pe_node_t *node, const xmlNode *xml,
- pe_working_set_t *data_set);
+static void unpack_node_lrm(pcmk_node_t *node, const xmlNode *xml,
+ pcmk_scheduler_t *scheduler);
-// Bitmask for warnings we only want to print once
-uint32_t pe_wo = 0;
-
static gboolean
-is_dangling_guest_node(pe_node_t *node)
+is_dangling_guest_node(pcmk_node_t *node)
{
/* we are looking for a remote-node that was supposed to be mapped to a
* container resource, but all traces of that container have disappeared
@@ -94,7 +91,7 @@ is_dangling_guest_node(pe_node_t *node)
node->details->remote_rsc &&
node->details->remote_rsc->container == NULL &&
pcmk_is_set(node->details->remote_rsc->flags,
- pe_rsc_orphan_container_filler)) {
+ pcmk_rsc_removed_filler)) {
return TRUE;
}
@@ -104,23 +101,23 @@ is_dangling_guest_node(pe_node_t *node)
/*!
* \brief Schedule a fence action for a node
*
- * \param[in,out] data_set Current working set of cluster
- * \param[in,out] node Node to fence
- * \param[in] reason Text description of why fencing is needed
+ * \param[in,out] scheduler Scheduler data
+ * \param[in,out] node Node to fence
+ * \param[in] reason Text description of why fencing is needed
* \param[in] priority_delay Whether to consider `priority-fencing-delay`
*/
void
-pe_fence_node(pe_working_set_t * data_set, pe_node_t * node,
+pe_fence_node(pcmk_scheduler_t *scheduler, pcmk_node_t *node,
const char *reason, bool priority_delay)
{
CRM_CHECK(node, return);
/* A guest node is fenced by marking its container as failed */
if (pe__is_guest_node(node)) {
- pe_resource_t *rsc = node->details->remote_rsc->container;
+ pcmk_resource_t *rsc = node->details->remote_rsc->container;
- if (!pcmk_is_set(rsc->flags, pe_rsc_failed)) {
- if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_failed)) {
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
crm_notice("Not fencing guest node %s "
"(otherwise would because %s): "
"its guest resource %s is unmanaged",
@@ -135,7 +132,8 @@ pe_fence_node(pe_working_set_t * data_set, pe_node_t * node,
* in this transition if the recovery succeeds.
*/
node->details->remote_requires_reset = TRUE;
- pe__set_resource_flags(rsc, pe_rsc_failed|pe_rsc_stop);
+ pe__set_resource_flags(rsc,
+ pcmk_rsc_failed|pcmk_rsc_stop_if_failed);
}
}
@@ -145,12 +143,12 @@ pe_fence_node(pe_working_set_t * data_set, pe_node_t * node,
"and guest resource no longer exists",
pe__node_name(node), reason);
pe__set_resource_flags(node->details->remote_rsc,
- pe_rsc_failed|pe_rsc_stop);
+ pcmk_rsc_failed|pcmk_rsc_stop_if_failed);
} else if (pe__is_remote_node(node)) {
- pe_resource_t *rsc = node->details->remote_rsc;
+ pcmk_resource_t *rsc = node->details->remote_rsc;
- if ((rsc != NULL) && !pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ if ((rsc != NULL) && !pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
crm_notice("Not fencing remote node %s "
"(otherwise would because %s): connection is unmanaged",
pe__node_name(node), reason);
@@ -158,26 +156,26 @@ pe_fence_node(pe_working_set_t * data_set, pe_node_t * node,
node->details->remote_requires_reset = TRUE;
crm_warn("Remote node %s %s: %s",
pe__node_name(node),
- pe_can_fence(data_set, node)? "will be fenced" : "is unclean",
+ pe_can_fence(scheduler, node)? "will be fenced" : "is unclean",
reason);
}
node->details->unclean = TRUE;
// No need to apply `priority-fencing-delay` for remote nodes
- pe_fence_op(node, NULL, TRUE, reason, FALSE, data_set);
+ pe_fence_op(node, NULL, TRUE, reason, FALSE, scheduler);
} else if (node->details->unclean) {
crm_trace("Cluster node %s %s because %s",
pe__node_name(node),
- pe_can_fence(data_set, node)? "would also be fenced" : "also is unclean",
+ pe_can_fence(scheduler, node)? "would also be fenced" : "also is unclean",
reason);
} else {
crm_warn("Cluster node %s %s: %s",
pe__node_name(node),
- pe_can_fence(data_set, node)? "will be fenced" : "is unclean",
+ pe_can_fence(scheduler, node)? "will be fenced" : "is unclean",
reason);
node->details->unclean = TRUE;
- pe_fence_op(node, NULL, TRUE, reason, priority_delay, data_set);
+ pe_fence_op(node, NULL, TRUE, reason, priority_delay, scheduler);
}
}
@@ -197,215 +195,258 @@ pe_fence_node(pe_working_set_t * data_set, pe_node_t * node,
"/" XML_TAG_META_SETS "/" XPATH_UNFENCING_NVPAIR
static void
-set_if_xpath(uint64_t flag, const char *xpath, pe_working_set_t *data_set)
+set_if_xpath(uint64_t flag, const char *xpath, pcmk_scheduler_t *scheduler)
{
xmlXPathObjectPtr result = NULL;
- if (!pcmk_is_set(data_set->flags, flag)) {
- result = xpath_search(data_set->input, xpath);
+ if (!pcmk_is_set(scheduler->flags, flag)) {
+ result = xpath_search(scheduler->input, xpath);
if (result && (numXpathResults(result) > 0)) {
- pe__set_working_set_flags(data_set, flag);
+ pe__set_working_set_flags(scheduler, flag);
}
freeXpathObject(result);
}
}
gboolean
-unpack_config(xmlNode * config, pe_working_set_t * data_set)
+unpack_config(xmlNode *config, pcmk_scheduler_t *scheduler)
{
const char *value = NULL;
GHashTable *config_hash = pcmk__strkey_table(free, free);
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
- .role = RSC_ROLE_UNKNOWN,
- .now = data_set->now,
+ .role = pcmk_role_unknown,
+ .now = scheduler->now,
.match_data = NULL,
.rsc_data = NULL,
.op_data = NULL
};
- data_set->config_hash = config_hash;
+ scheduler->config_hash = config_hash;
pe__unpack_dataset_nvpairs(config, XML_CIB_TAG_PROPSET, &rule_data, config_hash,
- CIB_OPTIONS_FIRST, FALSE, data_set);
+ CIB_OPTIONS_FIRST, FALSE, scheduler);
- verify_pe_options(data_set->config_hash);
+ verify_pe_options(scheduler->config_hash);
- set_config_flag(data_set, "enable-startup-probes", pe_flag_startup_probes);
- if (!pcmk_is_set(data_set->flags, pe_flag_startup_probes)) {
+ set_config_flag(scheduler, "enable-startup-probes",
+ pcmk_sched_probe_resources);
+ if (!pcmk_is_set(scheduler->flags, pcmk_sched_probe_resources)) {
crm_info("Startup probes: disabled (dangerous)");
}
- value = pe_pref(data_set->config_hash, XML_ATTR_HAVE_WATCHDOG);
+ value = pe_pref(scheduler->config_hash, XML_ATTR_HAVE_WATCHDOG);
if (value && crm_is_true(value)) {
crm_info("Watchdog-based self-fencing will be performed via SBD if "
"fencing is required and stonith-watchdog-timeout is nonzero");
- pe__set_working_set_flags(data_set, pe_flag_have_stonith_resource);
+ pe__set_working_set_flags(scheduler, pcmk_sched_have_fencing);
}
/* Set certain flags via xpath here, so they can be used before the relevant
* configuration sections are unpacked.
*/
- set_if_xpath(pe_flag_enable_unfencing, XPATH_ENABLE_UNFENCING, data_set);
+ set_if_xpath(pcmk_sched_enable_unfencing, XPATH_ENABLE_UNFENCING,
+ scheduler);
- value = pe_pref(data_set->config_hash, "stonith-timeout");
- data_set->stonith_timeout = (int) crm_parse_interval_spec(value);
- crm_debug("STONITH timeout: %d", data_set->stonith_timeout);
+ value = pe_pref(scheduler->config_hash, "stonith-timeout");
+ scheduler->stonith_timeout = (int) crm_parse_interval_spec(value);
+ crm_debug("STONITH timeout: %d", scheduler->stonith_timeout);
- set_config_flag(data_set, "stonith-enabled", pe_flag_stonith_enabled);
- crm_debug("STONITH of failed nodes is %s",
- pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)? "enabled" : "disabled");
+ set_config_flag(scheduler, "stonith-enabled", pcmk_sched_fencing_enabled);
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
+ crm_debug("STONITH of failed nodes is enabled");
+ } else {
+ crm_debug("STONITH of failed nodes is disabled");
+ }
- data_set->stonith_action = pe_pref(data_set->config_hash, "stonith-action");
- if (!strcmp(data_set->stonith_action, "poweroff")) {
- pe_warn_once(pe_wo_poweroff,
+ scheduler->stonith_action = pe_pref(scheduler->config_hash,
+ "stonith-action");
+ if (!strcmp(scheduler->stonith_action, "poweroff")) {
+ pe_warn_once(pcmk__wo_poweroff,
"Support for stonith-action of 'poweroff' is deprecated "
"and will be removed in a future release (use 'off' instead)");
- data_set->stonith_action = "off";
+ scheduler->stonith_action = PCMK_ACTION_OFF;
}
- crm_trace("STONITH will %s nodes", data_set->stonith_action);
+ crm_trace("STONITH will %s nodes", scheduler->stonith_action);
- set_config_flag(data_set, "concurrent-fencing", pe_flag_concurrent_fencing);
- crm_debug("Concurrent fencing is %s",
- pcmk_is_set(data_set->flags, pe_flag_concurrent_fencing)? "enabled" : "disabled");
+ set_config_flag(scheduler, "concurrent-fencing",
+ pcmk_sched_concurrent_fencing);
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_concurrent_fencing)) {
+ crm_debug("Concurrent fencing is enabled");
+ } else {
+ crm_debug("Concurrent fencing is disabled");
+ }
- value = pe_pref(data_set->config_hash,
+ value = pe_pref(scheduler->config_hash,
XML_CONFIG_ATTR_PRIORITY_FENCING_DELAY);
if (value) {
- data_set->priority_fencing_delay = crm_parse_interval_spec(value) / 1000;
- crm_trace("Priority fencing delay is %ds", data_set->priority_fencing_delay);
+ scheduler->priority_fencing_delay = crm_parse_interval_spec(value)
+ / 1000;
+ crm_trace("Priority fencing delay is %ds",
+ scheduler->priority_fencing_delay);
}
- set_config_flag(data_set, "stop-all-resources", pe_flag_stop_everything);
+ set_config_flag(scheduler, "stop-all-resources", pcmk_sched_stop_all);
crm_debug("Stop all active resources: %s",
- pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_stop_everything)));
+ pcmk__btoa(pcmk_is_set(scheduler->flags, pcmk_sched_stop_all)));
- set_config_flag(data_set, "symmetric-cluster", pe_flag_symmetric_cluster);
- if (pcmk_is_set(data_set->flags, pe_flag_symmetric_cluster)) {
+ set_config_flag(scheduler, "symmetric-cluster",
+ pcmk_sched_symmetric_cluster);
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_symmetric_cluster)) {
crm_debug("Cluster is symmetric" " - resources can run anywhere by default");
}
- value = pe_pref(data_set->config_hash, "no-quorum-policy");
+ value = pe_pref(scheduler->config_hash, "no-quorum-policy");
if (pcmk__str_eq(value, "ignore", pcmk__str_casei)) {
- data_set->no_quorum_policy = no_quorum_ignore;
+ scheduler->no_quorum_policy = pcmk_no_quorum_ignore;
} else if (pcmk__str_eq(value, "freeze", pcmk__str_casei)) {
- data_set->no_quorum_policy = no_quorum_freeze;
+ scheduler->no_quorum_policy = pcmk_no_quorum_freeze;
} else if (pcmk__str_eq(value, "demote", pcmk__str_casei)) {
- data_set->no_quorum_policy = no_quorum_demote;
+ scheduler->no_quorum_policy = pcmk_no_quorum_demote;
} else if (pcmk__str_eq(value, "suicide", pcmk__str_casei)) {
- if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
int do_panic = 0;
- crm_element_value_int(data_set->input, XML_ATTR_QUORUM_PANIC,
+ crm_element_value_int(scheduler->input, XML_ATTR_QUORUM_PANIC,
&do_panic);
- if (do_panic || pcmk_is_set(data_set->flags, pe_flag_have_quorum)) {
- data_set->no_quorum_policy = no_quorum_suicide;
+ if (do_panic || pcmk_is_set(scheduler->flags, pcmk_sched_quorate)) {
+ scheduler->no_quorum_policy = pcmk_no_quorum_fence;
} else {
crm_notice("Resetting no-quorum-policy to 'stop': cluster has never had quorum");
- data_set->no_quorum_policy = no_quorum_stop;
+ scheduler->no_quorum_policy = pcmk_no_quorum_stop;
}
} else {
pcmk__config_err("Resetting no-quorum-policy to 'stop' because "
"fencing is disabled");
- data_set->no_quorum_policy = no_quorum_stop;
+ scheduler->no_quorum_policy = pcmk_no_quorum_stop;
}
} else {
- data_set->no_quorum_policy = no_quorum_stop;
+ scheduler->no_quorum_policy = pcmk_no_quorum_stop;
}
- switch (data_set->no_quorum_policy) {
- case no_quorum_freeze:
+ switch (scheduler->no_quorum_policy) {
+ case pcmk_no_quorum_freeze:
crm_debug("On loss of quorum: Freeze resources");
break;
- case no_quorum_stop:
+ case pcmk_no_quorum_stop:
crm_debug("On loss of quorum: Stop ALL resources");
break;
- case no_quorum_demote:
+ case pcmk_no_quorum_demote:
crm_debug("On loss of quorum: "
"Demote promotable resources and stop other resources");
break;
- case no_quorum_suicide:
+ case pcmk_no_quorum_fence:
crm_notice("On loss of quorum: Fence all remaining nodes");
break;
- case no_quorum_ignore:
+ case pcmk_no_quorum_ignore:
crm_notice("On loss of quorum: Ignore");
break;
}
- set_config_flag(data_set, "stop-orphan-resources", pe_flag_stop_rsc_orphans);
- crm_trace("Orphan resources are %s",
- pcmk_is_set(data_set->flags, pe_flag_stop_rsc_orphans)? "stopped" : "ignored");
+ set_config_flag(scheduler, "stop-orphan-resources",
+ pcmk_sched_stop_removed_resources);
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_stop_removed_resources)) {
+ crm_trace("Orphan resources are stopped");
+ } else {
+ crm_trace("Orphan resources are ignored");
+ }
- set_config_flag(data_set, "stop-orphan-actions", pe_flag_stop_action_orphans);
- crm_trace("Orphan resource actions are %s",
- pcmk_is_set(data_set->flags, pe_flag_stop_action_orphans)? "stopped" : "ignored");
+ set_config_flag(scheduler, "stop-orphan-actions",
+ pcmk_sched_cancel_removed_actions);
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_cancel_removed_actions)) {
+ crm_trace("Orphan resource actions are stopped");
+ } else {
+ crm_trace("Orphan resource actions are ignored");
+ }
- value = pe_pref(data_set->config_hash, "remove-after-stop");
+ value = pe_pref(scheduler->config_hash, "remove-after-stop");
if (value != NULL) {
if (crm_is_true(value)) {
- pe__set_working_set_flags(data_set, pe_flag_remove_after_stop);
+ pe__set_working_set_flags(scheduler, pcmk_sched_remove_after_stop);
#ifndef PCMK__COMPAT_2_0
- pe_warn_once(pe_wo_remove_after,
+ pe_warn_once(pcmk__wo_remove_after,
"Support for the remove-after-stop cluster property is"
" deprecated and will be removed in a future release");
#endif
} else {
- pe__clear_working_set_flags(data_set, pe_flag_remove_after_stop);
+ pe__clear_working_set_flags(scheduler,
+ pcmk_sched_remove_after_stop);
}
}
- set_config_flag(data_set, "maintenance-mode", pe_flag_maintenance_mode);
+ set_config_flag(scheduler, "maintenance-mode", pcmk_sched_in_maintenance);
crm_trace("Maintenance mode: %s",
- pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)));
+ pcmk__btoa(pcmk_is_set(scheduler->flags,
+ pcmk_sched_in_maintenance)));
- set_config_flag(data_set, "start-failure-is-fatal", pe_flag_start_failure_fatal);
- crm_trace("Start failures are %s",
- pcmk_is_set(data_set->flags, pe_flag_start_failure_fatal)? "always fatal" : "handled by failcount");
+ set_config_flag(scheduler, "start-failure-is-fatal",
+ pcmk_sched_start_failure_fatal);
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_start_failure_fatal)) {
+ crm_trace("Start failures are always fatal");
+ } else {
+ crm_trace("Start failures are handled by failcount");
+ }
- if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
- set_config_flag(data_set, "startup-fencing", pe_flag_startup_fencing);
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
+ set_config_flag(scheduler, "startup-fencing",
+ pcmk_sched_startup_fencing);
}
- if (pcmk_is_set(data_set->flags, pe_flag_startup_fencing)) {
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_startup_fencing)) {
crm_trace("Unseen nodes will be fenced");
} else {
- pe_warn_once(pe_wo_blind, "Blind faith: not fencing unseen nodes");
+ pe_warn_once(pcmk__wo_blind, "Blind faith: not fencing unseen nodes");
}
- pe__unpack_node_health_scores(data_set);
+ pe__unpack_node_health_scores(scheduler);
- data_set->placement_strategy = pe_pref(data_set->config_hash, "placement-strategy");
- crm_trace("Placement strategy: %s", data_set->placement_strategy);
+ scheduler->placement_strategy = pe_pref(scheduler->config_hash,
+ "placement-strategy");
+ crm_trace("Placement strategy: %s", scheduler->placement_strategy);
- set_config_flag(data_set, "shutdown-lock", pe_flag_shutdown_lock);
- crm_trace("Resources will%s be locked to cleanly shut down nodes",
- (pcmk_is_set(data_set->flags, pe_flag_shutdown_lock)? "" : " not"));
- if (pcmk_is_set(data_set->flags, pe_flag_shutdown_lock)) {
- value = pe_pref(data_set->config_hash,
+ set_config_flag(scheduler, "shutdown-lock", pcmk_sched_shutdown_lock);
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_shutdown_lock)) {
+ value = pe_pref(scheduler->config_hash,
XML_CONFIG_ATTR_SHUTDOWN_LOCK_LIMIT);
- data_set->shutdown_lock = crm_parse_interval_spec(value) / 1000;
- crm_trace("Shutdown locks expire after %us", data_set->shutdown_lock);
+ scheduler->shutdown_lock = crm_parse_interval_spec(value) / 1000;
+ crm_trace("Resources will be locked to nodes that were cleanly "
+ "shut down (locks expire after %s)",
+ pcmk__readable_interval(scheduler->shutdown_lock));
+ } else {
+ crm_trace("Resources will not be locked to nodes that were cleanly "
+ "shut down");
+ }
+
+ value = pe_pref(scheduler->config_hash,
+ XML_CONFIG_ATTR_NODE_PENDING_TIMEOUT);
+ scheduler->node_pending_timeout = crm_parse_interval_spec(value) / 1000;
+ if (scheduler->node_pending_timeout == 0) {
+ crm_trace("Do not fence pending nodes");
+ } else {
+ crm_trace("Fence pending nodes after %s",
+ pcmk__readable_interval(scheduler->node_pending_timeout
+ * 1000));
}
return TRUE;
}
-pe_node_t *
+pcmk_node_t *
pe_create_node(const char *id, const char *uname, const char *type,
- const char *score, pe_working_set_t * data_set)
+ const char *score, pcmk_scheduler_t *scheduler)
{
- pe_node_t *new_node = NULL;
+ pcmk_node_t *new_node = NULL;
- if (pe_find_node(data_set->nodes, uname) != NULL) {
+ if (pe_find_node(scheduler->nodes, uname) != NULL) {
pcmk__config_warn("More than one node entry has name '%s'", uname);
}
- new_node = calloc(1, sizeof(pe_node_t));
+ new_node = calloc(1, sizeof(pcmk_node_t));
if (new_node == NULL) {
return NULL;
}
@@ -425,14 +466,14 @@ pe_create_node(const char *id, const char *uname, const char *type,
new_node->details->shutdown = FALSE;
new_node->details->rsc_discovery_enabled = TRUE;
new_node->details->running_rsc = NULL;
- new_node->details->data_set = data_set;
+ new_node->details->data_set = scheduler;
if (pcmk__str_eq(type, "member", pcmk__str_null_matches | pcmk__str_casei)) {
- new_node->details->type = node_member;
+ new_node->details->type = pcmk_node_variant_cluster;
} else if (pcmk__str_eq(type, "remote", pcmk__str_casei)) {
- new_node->details->type = node_remote;
- pe__set_working_set_flags(data_set, pe_flag_have_remote_nodes);
+ new_node->details->type = pcmk_node_variant_remote;
+ pe__set_working_set_flags(scheduler, pcmk_sched_have_remote_nodes);
} else {
/* @COMPAT 'ping' is the default for backward compatibility, but it
@@ -443,7 +484,7 @@ pe_create_node(const char *id, const char *uname, const char *type,
"assuming 'ping'", pcmk__s(uname, "without name"),
type);
}
- pe_warn_once(pe_wo_ping_node,
+ pe_warn_once(pcmk__wo_ping_node,
"Support for nodes of type 'ping' (such as %s) is "
"deprecated and will be removed in a future release",
pcmk__s(uname, "unnamed node"));
@@ -464,13 +505,13 @@ pe_create_node(const char *id, const char *uname, const char *type,
new_node->details->digest_cache = pcmk__strkey_table(free,
pe__free_digests);
- data_set->nodes = g_list_insert_sorted(data_set->nodes, new_node,
- pe__cmp_node_name);
+ scheduler->nodes = g_list_insert_sorted(scheduler->nodes, new_node,
+ pe__cmp_node_name);
return new_node;
}
static const char *
-expand_remote_rsc_meta(xmlNode *xml_obj, xmlNode *parent, pe_working_set_t *data)
+expand_remote_rsc_meta(xmlNode *xml_obj, xmlNode *parent, pcmk_scheduler_t *data)
{
xmlNode *attr_set = NULL;
xmlNode *attr = NULL;
@@ -527,9 +568,10 @@ expand_remote_rsc_meta(xmlNode *xml_obj, xmlNode *parent, pe_working_set_t *data
}
static void
-handle_startup_fencing(pe_working_set_t *data_set, pe_node_t *new_node)
+handle_startup_fencing(pcmk_scheduler_t *scheduler, pcmk_node_t *new_node)
{
- if ((new_node->details->type == node_remote) && (new_node->details->remote_rsc == NULL)) {
+ if ((new_node->details->type == pcmk_node_variant_remote)
+ && (new_node->details->remote_rsc == NULL)) {
/* Ignore fencing for remote nodes that don't have a connection resource
* associated with them. This happens when remote node entries get left
* in the nodes section after the connection resource is removed.
@@ -537,7 +579,7 @@ handle_startup_fencing(pe_working_set_t *data_set, pe_node_t *new_node)
return;
}
- if (pcmk_is_set(data_set->flags, pe_flag_startup_fencing)) {
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_startup_fencing)) {
// All nodes are unclean until we've seen their status entry
new_node->details->unclean = TRUE;
@@ -552,10 +594,10 @@ handle_startup_fencing(pe_working_set_t *data_set, pe_node_t *new_node)
}
gboolean
-unpack_nodes(xmlNode * xml_nodes, pe_working_set_t * data_set)
+unpack_nodes(xmlNode *xml_nodes, pcmk_scheduler_t *scheduler)
{
xmlNode *xml_obj = NULL;
- pe_node_t *new_node = NULL;
+ pcmk_node_t *new_node = NULL;
const char *id = NULL;
const char *uname = NULL;
const char *type = NULL;
@@ -578,46 +620,48 @@ unpack_nodes(xmlNode * xml_nodes, pe_working_set_t * data_set)
"> entry in configuration without id");
continue;
}
- new_node = pe_create_node(id, uname, type, score, data_set);
+ new_node = pe_create_node(id, uname, type, score, scheduler);
if (new_node == NULL) {
return FALSE;
}
- handle_startup_fencing(data_set, new_node);
+ handle_startup_fencing(scheduler, new_node);
- add_node_attrs(xml_obj, new_node, FALSE, data_set);
+ add_node_attrs(xml_obj, new_node, FALSE, scheduler);
crm_trace("Done with node %s", crm_element_value(xml_obj, XML_ATTR_UNAME));
}
}
- if (data_set->localhost && pe_find_node(data_set->nodes, data_set->localhost) == NULL) {
+ if (scheduler->localhost
+ && (pe_find_node(scheduler->nodes, scheduler->localhost) == NULL)) {
crm_info("Creating a fake local node");
- pe_create_node(data_set->localhost, data_set->localhost, NULL, 0,
- data_set);
+ pe_create_node(scheduler->localhost, scheduler->localhost, NULL, 0,
+ scheduler);
}
return TRUE;
}
static void
-setup_container(pe_resource_t * rsc, pe_working_set_t * data_set)
+setup_container(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler)
{
const char *container_id = NULL;
if (rsc->children) {
- g_list_foreach(rsc->children, (GFunc) setup_container, data_set);
+ g_list_foreach(rsc->children, (GFunc) setup_container, scheduler);
return;
}
container_id = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_CONTAINER);
if (container_id && !pcmk__str_eq(container_id, rsc->id, pcmk__str_casei)) {
- pe_resource_t *container = pe_find_resource(data_set->resources, container_id);
+ pcmk_resource_t *container = pe_find_resource(scheduler->resources,
+ container_id);
if (container) {
rsc->container = container;
- pe__set_resource_flags(container, pe_rsc_is_container);
+ pe__set_resource_flags(container, pcmk_rsc_has_filler);
container->fillers = g_list_append(container->fillers, rsc);
pe_rsc_trace(rsc, "Resource %s's container is %s", rsc->id, container_id);
} else {
@@ -627,7 +671,7 @@ setup_container(pe_resource_t * rsc, pe_working_set_t * data_set)
}
gboolean
-unpack_remote_nodes(xmlNode * xml_resources, pe_working_set_t * data_set)
+unpack_remote_nodes(xmlNode *xml_resources, pcmk_scheduler_t *scheduler)
{
xmlNode *xml_obj = NULL;
@@ -646,11 +690,12 @@ unpack_remote_nodes(xmlNode * xml_resources, pe_working_set_t * data_set)
new_node_id = ID(xml_obj);
/* The "pe_find_node" check is here to make sure we don't iterate over
* an expanded node that has already been added to the node list. */
- if (new_node_id && pe_find_node(data_set->nodes, new_node_id) == NULL) {
+ if (new_node_id
+ && (pe_find_node(scheduler->nodes, new_node_id) == NULL)) {
crm_trace("Found remote node %s defined by resource %s",
new_node_id, ID(xml_obj));
pe_create_node(new_node_id, new_node_id, "remote", NULL,
- data_set);
+ scheduler);
}
continue;
}
@@ -663,12 +708,14 @@ unpack_remote_nodes(xmlNode * xml_resources, pe_working_set_t * data_set)
* configuration for the guest node's connection, to be unpacked
* later.
*/
- new_node_id = expand_remote_rsc_meta(xml_obj, xml_resources, data_set);
- if (new_node_id && pe_find_node(data_set->nodes, new_node_id) == NULL) {
+ new_node_id = expand_remote_rsc_meta(xml_obj, xml_resources,
+ scheduler);
+ if (new_node_id
+ && (pe_find_node(scheduler->nodes, new_node_id) == NULL)) {
crm_trace("Found guest node %s in resource %s",
new_node_id, ID(xml_obj));
pe_create_node(new_node_id, new_node_id, "remote", NULL,
- data_set);
+ scheduler);
}
continue;
}
@@ -681,13 +728,15 @@ unpack_remote_nodes(xmlNode * xml_resources, pe_working_set_t * data_set)
for (xml_obj2 = pcmk__xe_first_child(xml_obj); xml_obj2 != NULL;
xml_obj2 = pcmk__xe_next(xml_obj2)) {
- new_node_id = expand_remote_rsc_meta(xml_obj2, xml_resources, data_set);
+ new_node_id = expand_remote_rsc_meta(xml_obj2, xml_resources,
+ scheduler);
- if (new_node_id && pe_find_node(data_set->nodes, new_node_id) == NULL) {
+ if (new_node_id
+ && (pe_find_node(scheduler->nodes, new_node_id) == NULL)) {
crm_trace("Found guest node %s in resource %s inside group %s",
new_node_id, ID(xml_obj2), ID(xml_obj));
pe_create_node(new_node_id, new_node_id, "remote", NULL,
- data_set);
+ scheduler);
}
}
}
@@ -704,20 +753,20 @@ unpack_remote_nodes(xmlNode * xml_resources, pe_working_set_t * data_set)
* easy access to the connection resource during the scheduler calculations.
*/
static void
-link_rsc2remotenode(pe_working_set_t *data_set, pe_resource_t *new_rsc)
+link_rsc2remotenode(pcmk_scheduler_t *scheduler, pcmk_resource_t *new_rsc)
{
- pe_node_t *remote_node = NULL;
+ pcmk_node_t *remote_node = NULL;
if (new_rsc->is_remote_node == FALSE) {
return;
}
- if (pcmk_is_set(data_set->flags, pe_flag_quick_location)) {
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_location_only)) {
/* remote_nodes and remote_resources are not linked in quick location calculations */
return;
}
- remote_node = pe_find_node(data_set->nodes, new_rsc->id);
+ remote_node = pe_find_node(scheduler->nodes, new_rsc->id);
CRM_CHECK(remote_node != NULL, return);
pe_rsc_trace(new_rsc, "Linking remote connection resource %s to %s",
@@ -728,7 +777,7 @@ link_rsc2remotenode(pe_working_set_t *data_set, pe_resource_t *new_rsc)
/* Handle start-up fencing for remote nodes (as opposed to guest nodes)
* the same as is done for cluster nodes.
*/
- handle_startup_fencing(data_set, remote_node);
+ handle_startup_fencing(scheduler, remote_node);
} else {
/* pe_create_node() marks the new node as "remote" or "cluster"; now
@@ -742,7 +791,7 @@ link_rsc2remotenode(pe_working_set_t *data_set, pe_resource_t *new_rsc)
static void
destroy_tag(gpointer data)
{
- pe_tag_t *tag = data;
+ pcmk_tag_t *tag = data;
if (tag) {
free(tag->id);
@@ -756,7 +805,7 @@ destroy_tag(gpointer data)
* \brief Parse configuration XML for resource information
*
* \param[in] xml_resources Top of resource configuration XML
- * \param[in,out] data_set Where to put resource information
+ * \param[in,out] scheduler Scheduler data
*
* \return TRUE
*
@@ -764,63 +813,64 @@ destroy_tag(gpointer data)
* be used when pe__unpack_resource() calls resource_location()
*/
gboolean
-unpack_resources(const xmlNode *xml_resources, pe_working_set_t * data_set)
+unpack_resources(const xmlNode *xml_resources, pcmk_scheduler_t *scheduler)
{
xmlNode *xml_obj = NULL;
GList *gIter = NULL;
- data_set->template_rsc_sets = pcmk__strkey_table(free, destroy_tag);
+ scheduler->template_rsc_sets = pcmk__strkey_table(free, destroy_tag);
for (xml_obj = pcmk__xe_first_child(xml_resources); xml_obj != NULL;
xml_obj = pcmk__xe_next(xml_obj)) {
- pe_resource_t *new_rsc = NULL;
+ pcmk_resource_t *new_rsc = NULL;
const char *id = ID(xml_obj);
if (pcmk__str_empty(id)) {
pcmk__config_err("Ignoring <%s> resource without ID",
- crm_element_name(xml_obj));
+ xml_obj->name);
continue;
}
if (pcmk__str_eq((const char *) xml_obj->name, XML_CIB_TAG_RSC_TEMPLATE,
pcmk__str_none)) {
- if (g_hash_table_lookup_extended(data_set->template_rsc_sets, id,
+ if (g_hash_table_lookup_extended(scheduler->template_rsc_sets, id,
NULL, NULL) == FALSE) {
/* Record the template's ID for the knowledge of its existence anyway. */
- g_hash_table_insert(data_set->template_rsc_sets, strdup(id), NULL);
+ g_hash_table_insert(scheduler->template_rsc_sets, strdup(id),
+ NULL);
}
continue;
}
crm_trace("Unpacking <%s " XML_ATTR_ID "='%s'>",
- crm_element_name(xml_obj), id);
+ xml_obj->name, id);
if (pe__unpack_resource(xml_obj, &new_rsc, NULL,
- data_set) == pcmk_rc_ok) {
- data_set->resources = g_list_append(data_set->resources, new_rsc);
+ scheduler) == pcmk_rc_ok) {
+ scheduler->resources = g_list_append(scheduler->resources, new_rsc);
pe_rsc_trace(new_rsc, "Added resource %s", new_rsc->id);
} else {
pcmk__config_err("Ignoring <%s> resource '%s' "
"because configuration is invalid",
- crm_element_name(xml_obj), id);
+ xml_obj->name, id);
}
}
- for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *rsc = (pe_resource_t *) gIter->data;
+ for (gIter = scheduler->resources; gIter != NULL; gIter = gIter->next) {
+ pcmk_resource_t *rsc = (pcmk_resource_t *) gIter->data;
- setup_container(rsc, data_set);
- link_rsc2remotenode(data_set, rsc);
+ setup_container(rsc, scheduler);
+ link_rsc2remotenode(scheduler, rsc);
}
- data_set->resources = g_list_sort(data_set->resources,
+ scheduler->resources = g_list_sort(scheduler->resources,
pe__cmp_rsc_priority);
- if (pcmk_is_set(data_set->flags, pe_flag_quick_location)) {
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_location_only)) {
/* Ignore */
- } else if (pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)
- && !pcmk_is_set(data_set->flags, pe_flag_have_stonith_resource)) {
+ } else if (pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)
+ && !pcmk_is_set(scheduler->flags, pcmk_sched_have_fencing)) {
pcmk__config_err("Resource start-up disabled since no STONITH resources have been defined");
pcmk__config_err("Either configure some or disable STONITH with the stonith-enabled option");
@@ -831,11 +881,11 @@ unpack_resources(const xmlNode *xml_resources, pe_working_set_t * data_set)
}
gboolean
-unpack_tags(xmlNode * xml_tags, pe_working_set_t * data_set)
+unpack_tags(xmlNode *xml_tags, pcmk_scheduler_t *scheduler)
{
xmlNode *xml_tag = NULL;
- data_set->tags = pcmk__strkey_table(free, destroy_tag);
+ scheduler->tags = pcmk__strkey_table(free, destroy_tag);
for (xml_tag = pcmk__xe_first_child(xml_tags); xml_tag != NULL;
xml_tag = pcmk__xe_next(xml_tag)) {
@@ -849,7 +899,7 @@ unpack_tags(xmlNode * xml_tags, pe_working_set_t * data_set)
if (tag_id == NULL) {
pcmk__config_err("Ignoring <%s> without " XML_ATTR_ID,
- crm_element_name(xml_tag));
+ (const char *) xml_tag->name);
continue;
}
@@ -864,11 +914,11 @@ unpack_tags(xmlNode * xml_tags, pe_working_set_t * data_set)
if (obj_ref == NULL) {
pcmk__config_err("Ignoring <%s> for tag '%s' without " XML_ATTR_ID,
- crm_element_name(xml_obj_ref), tag_id);
+ xml_obj_ref->name, tag_id);
continue;
}
- if (add_tag_ref(data_set->tags, tag_id, obj_ref) == FALSE) {
+ if (add_tag_ref(scheduler->tags, tag_id, obj_ref) == FALSE) {
return FALSE;
}
}
@@ -880,7 +930,7 @@ unpack_tags(xmlNode * xml_tags, pe_working_set_t * data_set)
/* The ticket state section:
* "/cib/status/tickets/ticket_state" */
static gboolean
-unpack_ticket_state(xmlNode * xml_ticket, pe_working_set_t * data_set)
+unpack_ticket_state(xmlNode *xml_ticket, pcmk_scheduler_t *scheduler)
{
const char *ticket_id = NULL;
const char *granted = NULL;
@@ -888,7 +938,7 @@ unpack_ticket_state(xmlNode * xml_ticket, pe_working_set_t * data_set)
const char *standby = NULL;
xmlAttrPtr xIter = NULL;
- pe_ticket_t *ticket = NULL;
+ pcmk_ticket_t *ticket = NULL;
ticket_id = ID(xml_ticket);
if (pcmk__str_empty(ticket_id)) {
@@ -897,9 +947,9 @@ unpack_ticket_state(xmlNode * xml_ticket, pe_working_set_t * data_set)
crm_trace("Processing ticket state for %s", ticket_id);
- ticket = g_hash_table_lookup(data_set->tickets, ticket_id);
+ ticket = g_hash_table_lookup(scheduler->tickets, ticket_id);
if (ticket == NULL) {
- ticket = ticket_new(ticket_id, data_set);
+ ticket = ticket_new(ticket_id, scheduler);
if (ticket == NULL) {
return FALSE;
}
@@ -907,7 +957,7 @@ unpack_ticket_state(xmlNode * xml_ticket, pe_working_set_t * data_set)
for (xIter = xml_ticket->properties; xIter; xIter = xIter->next) {
const char *prop_name = (const char *)xIter->name;
- const char *prop_value = crm_element_value(xml_ticket, prop_name);
+ const char *prop_value = pcmk__xml_attr_value(xIter);
if (pcmk__str_eq(prop_name, XML_ATTR_ID, pcmk__str_none)) {
continue;
@@ -948,7 +998,7 @@ unpack_ticket_state(xmlNode * xml_ticket, pe_working_set_t * data_set)
}
static gboolean
-unpack_tickets_state(xmlNode * xml_tickets, pe_working_set_t * data_set)
+unpack_tickets_state(xmlNode *xml_tickets, pcmk_scheduler_t *scheduler)
{
xmlNode *xml_obj = NULL;
@@ -958,19 +1008,19 @@ unpack_tickets_state(xmlNode * xml_tickets, pe_working_set_t * data_set)
if (!pcmk__str_eq((const char *)xml_obj->name, XML_CIB_TAG_TICKET_STATE, pcmk__str_none)) {
continue;
}
- unpack_ticket_state(xml_obj, data_set);
+ unpack_ticket_state(xml_obj, scheduler);
}
return TRUE;
}
static void
-unpack_handle_remote_attrs(pe_node_t *this_node, const xmlNode *state,
- pe_working_set_t *data_set)
+unpack_handle_remote_attrs(pcmk_node_t *this_node, const xmlNode *state,
+ pcmk_scheduler_t *scheduler)
{
const char *resource_discovery_enabled = NULL;
const xmlNode *attrs = NULL;
- pe_resource_t *rsc = NULL;
+ pcmk_resource_t *rsc = NULL;
if (!pcmk__str_eq((const char *)state->name, XML_CIB_TAG_STATE, pcmk__str_none)) {
return;
@@ -990,7 +1040,7 @@ unpack_handle_remote_attrs(pe_node_t *this_node, const xmlNode *state,
this_node->details->unseen = FALSE;
}
attrs = find_xml_node(state, XML_TAG_TRANSIENT_NODEATTRS, FALSE);
- add_node_attrs(attrs, this_node, TRUE, data_set);
+ add_node_attrs(attrs, this_node, TRUE, scheduler);
if (pe__shutdown_requested(this_node)) {
crm_info("%s is shutting down", pe__node_name(this_node));
@@ -1003,7 +1053,7 @@ unpack_handle_remote_attrs(pe_node_t *this_node, const xmlNode *state,
}
if (crm_is_true(pe_node_attribute_raw(this_node, "maintenance")) ||
- ((rsc != NULL) && !pcmk_is_set(rsc->flags, pe_rsc_managed))) {
+ ((rsc != NULL) && !pcmk_is_set(rsc->flags, pcmk_rsc_managed))) {
crm_info("%s is in maintenance mode", pe__node_name(this_node));
this_node->details->maintenance = TRUE;
}
@@ -1011,7 +1061,7 @@ unpack_handle_remote_attrs(pe_node_t *this_node, const xmlNode *state,
resource_discovery_enabled = pe_node_attribute_raw(this_node, XML_NODE_ATTR_RSC_DISCOVERY);
if (resource_discovery_enabled && !crm_is_true(resource_discovery_enabled)) {
if (pe__is_remote_node(this_node)
- && !pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
+ && !pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
crm_warn("Ignoring " XML_NODE_ATTR_RSC_DISCOVERY
" attribute on Pacemaker Remote node %s"
" because fencing is disabled",
@@ -1033,19 +1083,19 @@ unpack_handle_remote_attrs(pe_node_t *this_node, const xmlNode *state,
* \internal
* \brief Unpack a cluster node's transient attributes
*
- * \param[in] state CIB node state XML
- * \param[in,out] node Cluster node whose attributes are being unpacked
- * \param[in,out] data_set Cluster working set
+ * \param[in] state CIB node state XML
+ * \param[in,out] node Cluster node whose attributes are being unpacked
+ * \param[in,out] scheduler Scheduler data
*/
static void
-unpack_transient_attributes(const xmlNode *state, pe_node_t *node,
- pe_working_set_t *data_set)
+unpack_transient_attributes(const xmlNode *state, pcmk_node_t *node,
+ pcmk_scheduler_t *scheduler)
{
const char *discovery = NULL;
const xmlNode *attrs = find_xml_node(state, XML_TAG_TRANSIENT_NODEATTRS,
FALSE);
- add_node_attrs(attrs, node, TRUE, data_set);
+ add_node_attrs(attrs, node, TRUE, scheduler);
if (crm_is_true(pe_node_attribute_raw(node, "standby"))) {
crm_info("%s is in standby mode", pe__node_name(node));
@@ -1074,15 +1124,15 @@ unpack_transient_attributes(const xmlNode *state, pe_node_t *node,
* resource history inside it. Multiple passes through the status are needed to
* fully unpack everything.
*
- * \param[in] state CIB node state XML
- * \param[in,out] data_set Cluster working set
+ * \param[in] state CIB node state XML
+ * \param[in,out] scheduler Scheduler data
*/
static void
-unpack_node_state(const xmlNode *state, pe_working_set_t *data_set)
+unpack_node_state(const xmlNode *state, pcmk_scheduler_t *scheduler)
{
const char *id = NULL;
const char *uname = NULL;
- pe_node_t *this_node = NULL;
+ pcmk_node_t *this_node = NULL;
id = crm_element_value(state, XML_ATTR_ID);
if (id == NULL) {
@@ -1093,15 +1143,21 @@ unpack_node_state(const xmlNode *state, pe_working_set_t *data_set)
uname = crm_element_value(state, XML_ATTR_UNAME);
if (uname == NULL) {
- crm_warn("Ignoring malformed " XML_CIB_TAG_STATE " entry without "
- XML_ATTR_UNAME);
- return;
+ /* If a joining peer makes the cluster acquire the quorum from corosync
+ * meanwhile it has not joined CPG membership of pacemaker-controld yet,
+ * it's possible that the created node_state entry doesn't have an uname
+ * yet. We should recognize the node as `pending` and wait for it to
+ * join CPG.
+ */
+ crm_trace("Handling " XML_CIB_TAG_STATE " entry with id=\"%s\" without "
+ XML_ATTR_UNAME, id);
}
- this_node = pe_find_node_any(data_set->nodes, id, uname);
+ this_node = pe_find_node_any(scheduler->nodes, id, uname);
if (this_node == NULL) {
- pcmk__config_warn("Ignoring recorded node state for '%s' because "
- "it is no longer in the configuration", uname);
+ pcmk__config_warn("Ignoring recorded node state for id=\"%s\" (%s) "
+ "because it is no longer in the configuration",
+ id, pcmk__s(uname, "uname unknown"));
return;
}
@@ -1116,7 +1172,7 @@ unpack_node_state(const xmlNode *state, pe_working_set_t *data_set)
return;
}
- unpack_transient_attributes(state, this_node, data_set);
+ unpack_transient_attributes(state, this_node, scheduler);
/* Provisionally mark this cluster node as clean. We have at least seen it
* in the current cluster's lifetime.
@@ -1126,16 +1182,16 @@ unpack_node_state(const xmlNode *state, pe_working_set_t *data_set)
crm_trace("Determining online status of cluster node %s (id %s)",
pe__node_name(this_node), id);
- determine_online_status(state, this_node, data_set);
+ determine_online_status(state, this_node, scheduler);
- if (!pcmk_is_set(data_set->flags, pe_flag_have_quorum)
+ if (!pcmk_is_set(scheduler->flags, pcmk_sched_quorate)
&& this_node->details->online
- && (data_set->no_quorum_policy == no_quorum_suicide)) {
+ && (scheduler->no_quorum_policy == pcmk_no_quorum_fence)) {
/* Everything else should flow from this automatically
* (at least until the scheduler becomes able to migrate off
* healthy resources)
*/
- pe_fence_node(data_set, this_node, "cluster does not have quorum",
+ pe_fence_node(scheduler, this_node, "cluster does not have quorum",
FALSE);
}
}
@@ -1150,16 +1206,16 @@ unpack_node_state(const xmlNode *state, pe_working_set_t *data_set)
* in another node's history, so it might take multiple passes to unpack
* everything.
*
- * \param[in] status CIB XML status section
- * \param[in] fence If true, treat any not-yet-unpacked nodes as unseen
- * \param[in,out] data_set Cluster working set
+ * \param[in] status CIB XML status section
+ * \param[in] fence If true, treat any not-yet-unpacked nodes as unseen
+ * \param[in,out] scheduler Scheduler data
*
* \return Standard Pacemaker return code (specifically pcmk_rc_ok if done,
* or EAGAIN if more unpacking remains to be done)
*/
static int
unpack_node_history(const xmlNode *status, bool fence,
- pe_working_set_t *data_set)
+ pcmk_scheduler_t *scheduler)
{
int rc = pcmk_rc_ok;
@@ -1169,7 +1225,7 @@ unpack_node_history(const xmlNode *status, bool fence,
const char *id = ID(state);
const char *uname = crm_element_value(state, XML_ATTR_UNAME);
- pe_node_t *this_node = NULL;
+ pcmk_node_t *this_node = NULL;
if ((id == NULL) || (uname == NULL)) {
// Warning already logged in first pass through status section
@@ -1178,7 +1234,7 @@ unpack_node_history(const xmlNode *status, bool fence,
continue;
}
- this_node = pe_find_node_any(data_set->nodes, id, uname);
+ this_node = pe_find_node_any(scheduler->nodes, id, uname);
if (this_node == NULL) {
// Warning already logged in first pass through status section
crm_trace("Not unpacking resource history for node %s because "
@@ -1200,10 +1256,10 @@ unpack_node_history(const xmlNode *status, bool fence,
* other resource history to the point that we know that the node's
* connection and containing resource are both up.
*/
- pe_resource_t *rsc = this_node->details->remote_rsc;
+ pcmk_resource_t *rsc = this_node->details->remote_rsc;
- if ((rsc == NULL) || (rsc->role != RSC_ROLE_STARTED)
- || (rsc->container->role != RSC_ROLE_STARTED)) {
+ if ((rsc == NULL) || (rsc->role != pcmk_role_started)
+ || (rsc->container->role != pcmk_role_started)) {
crm_trace("Not unpacking resource history for guest node %s "
"because container and connection are not known to "
"be up", id);
@@ -1216,11 +1272,11 @@ unpack_node_history(const xmlNode *status, bool fence,
* connection is up, with the exception of when shutdown locks are
* in use.
*/
- pe_resource_t *rsc = this_node->details->remote_rsc;
+ pcmk_resource_t *rsc = this_node->details->remote_rsc;
if ((rsc == NULL)
- || (!pcmk_is_set(data_set->flags, pe_flag_shutdown_lock)
- && (rsc->role != RSC_ROLE_STARTED))) {
+ || (!pcmk_is_set(scheduler->flags, pcmk_sched_shutdown_lock)
+ && (rsc->role != pcmk_role_started))) {
crm_trace("Not unpacking resource history for remote node %s "
"because connection is not known to be up", id);
continue;
@@ -1231,8 +1287,9 @@ unpack_node_history(const xmlNode *status, bool fence,
* nodes have been unpacked. This allows us to number active clone
* instances first.
*/
- } else if (!pcmk_any_flags_set(data_set->flags, pe_flag_stonith_enabled
- |pe_flag_shutdown_lock)
+ } else if (!pcmk_any_flags_set(scheduler->flags,
+ pcmk_sched_fencing_enabled
+ |pcmk_sched_shutdown_lock)
&& !this_node->details->online) {
crm_trace("Not unpacking resource history for offline "
"cluster node %s", id);
@@ -1240,15 +1297,15 @@ unpack_node_history(const xmlNode *status, bool fence,
}
if (pe__is_guest_or_remote_node(this_node)) {
- determine_remote_online_status(data_set, this_node);
- unpack_handle_remote_attrs(this_node, state, data_set);
+ determine_remote_online_status(scheduler, this_node);
+ unpack_handle_remote_attrs(this_node, state, scheduler);
}
crm_trace("Unpacking resource history for %snode %s",
(fence? "unseen " : ""), id);
this_node->details->unpacked = TRUE;
- unpack_node_lrm(this_node, state, data_set);
+ unpack_node_lrm(this_node, state, scheduler);
rc = EAGAIN; // Other node histories might depend on this one
}
@@ -1259,172 +1316,324 @@ unpack_node_history(const xmlNode *status, bool fence,
/* create positive rsc_to_node constraints between resources and the nodes they are running on */
/* anything else? */
gboolean
-unpack_status(xmlNode * status, pe_working_set_t * data_set)
+unpack_status(xmlNode *status, pcmk_scheduler_t *scheduler)
{
xmlNode *state = NULL;
crm_trace("Beginning unpack");
- if (data_set->tickets == NULL) {
- data_set->tickets = pcmk__strkey_table(free, destroy_ticket);
+ if (scheduler->tickets == NULL) {
+ scheduler->tickets = pcmk__strkey_table(free, destroy_ticket);
}
for (state = pcmk__xe_first_child(status); state != NULL;
state = pcmk__xe_next(state)) {
if (pcmk__str_eq((const char *)state->name, XML_CIB_TAG_TICKETS, pcmk__str_none)) {
- unpack_tickets_state((xmlNode *) state, data_set);
+ unpack_tickets_state((xmlNode *) state, scheduler);
} else if (pcmk__str_eq((const char *)state->name, XML_CIB_TAG_STATE, pcmk__str_none)) {
- unpack_node_state(state, data_set);
+ unpack_node_state(state, scheduler);
}
}
- while (unpack_node_history(status, FALSE, data_set) == EAGAIN) {
+ while (unpack_node_history(status, FALSE, scheduler) == EAGAIN) {
crm_trace("Another pass through node resource histories is needed");
}
// Now catch any nodes we didn't see
unpack_node_history(status,
- pcmk_is_set(data_set->flags, pe_flag_stonith_enabled),
- data_set);
+ pcmk_is_set(scheduler->flags,
+ pcmk_sched_fencing_enabled),
+ scheduler);
/* Now that we know where resources are, we can schedule stops of containers
* with failed bundle connections
*/
- if (data_set->stop_needed != NULL) {
- for (GList *item = data_set->stop_needed; item; item = item->next) {
- pe_resource_t *container = item->data;
- pe_node_t *node = pe__current_node(container);
+ if (scheduler->stop_needed != NULL) {
+ for (GList *item = scheduler->stop_needed; item; item = item->next) {
+ pcmk_resource_t *container = item->data;
+ pcmk_node_t *node = pe__current_node(container);
if (node) {
stop_action(container, node, FALSE);
}
}
- g_list_free(data_set->stop_needed);
- data_set->stop_needed = NULL;
+ g_list_free(scheduler->stop_needed);
+ scheduler->stop_needed = NULL;
}
/* Now that we know status of all Pacemaker Remote connections and nodes,
* we can stop connections for node shutdowns, and check the online status
* of remote/guest nodes that didn't have any node history to unpack.
*/
- for (GList *gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
- pe_node_t *this_node = gIter->data;
+ for (GList *gIter = scheduler->nodes; gIter != NULL; gIter = gIter->next) {
+ pcmk_node_t *this_node = gIter->data;
if (!pe__is_guest_or_remote_node(this_node)) {
continue;
}
if (this_node->details->shutdown
&& (this_node->details->remote_rsc != NULL)) {
- pe__set_next_role(this_node->details->remote_rsc, RSC_ROLE_STOPPED,
+ pe__set_next_role(this_node->details->remote_rsc, pcmk_role_stopped,
"remote shutdown");
}
if (!this_node->details->unpacked) {
- determine_remote_online_status(data_set, this_node);
+ determine_remote_online_status(scheduler, this_node);
}
}
return TRUE;
}
+/*!
+ * \internal
+ * \brief Unpack node's time when it became a member at the cluster layer
+ *
+ * \param[in] node_state Node's node_state entry
+ * \param[in,out] scheduler Scheduler data
+ *
+ * \return Epoch time when node became a cluster member
+ * (or scheduler effective time for legacy entries) if a member,
+ * 0 if not a member, or -1 if no valid information available
+ */
+static long long
+unpack_node_member(const xmlNode *node_state, pcmk_scheduler_t *scheduler)
+{
+ const char *member_time = crm_element_value(node_state, PCMK__XA_IN_CCM);
+ int member = 0;
+
+ if (member_time == NULL) {
+ return -1LL;
+
+ } else if (crm_str_to_boolean(member_time, &member) == 1) {
+ /* If in_ccm=0, we'll return 0 here. If in_ccm=1, either the entry was
+ * recorded as a boolean for a DC < 2.1.7, or the node is pending
+ * shutdown and has left the CPG, in which case it was set to 1 to avoid
+ * fencing for node-pending-timeout.
+ *
+ * We return the effective time for in_ccm=1 because what's important to
+ * avoid fencing is that effective time minus this value is less than
+ * the pending node timeout.
+ */
+ return member? (long long) get_effective_time(scheduler) : 0LL;
+
+ } else {
+ long long when_member = 0LL;
+
+ if ((pcmk__scan_ll(member_time, &when_member,
+ 0LL) != pcmk_rc_ok) || (when_member < 0LL)) {
+ crm_warn("Unrecognized value '%s' for " PCMK__XA_IN_CCM
+ " in " XML_CIB_TAG_STATE " entry", member_time);
+ return -1LL;
+ }
+ return when_member;
+ }
+}
+
+/*!
+ * \internal
+ * \brief Unpack node's time when it became online in process group
+ *
+ * \param[in] node_state Node's node_state entry
+ *
+ * \return Epoch time when node became online in process group (or 0 if not
+ * online, or 1 for legacy online entries)
+ */
+static long long
+unpack_node_online(const xmlNode *node_state)
+{
+ const char *peer_time = crm_element_value(node_state, PCMK__XA_CRMD);
+
+ // @COMPAT Entries recorded for DCs < 2.1.7 have "online" or "offline"
+ if (pcmk__str_eq(peer_time, OFFLINESTATUS,
+ pcmk__str_casei|pcmk__str_null_matches)) {
+ return 0LL;
+
+ } else if (pcmk__str_eq(peer_time, ONLINESTATUS, pcmk__str_casei)) {
+ return 1LL;
+
+ } else {
+ long long when_online = 0LL;
+
+ if ((pcmk__scan_ll(peer_time, &when_online, 0LL) != pcmk_rc_ok)
+ || (when_online < 0)) {
+ crm_warn("Unrecognized value '%s' for " PCMK__XA_CRMD " in "
+ XML_CIB_TAG_STATE " entry, assuming offline", peer_time);
+ return 0LL;
+ }
+ return when_online;
+ }
+}
+
+/*!
+ * \internal
+ * \brief Unpack node attribute for user-requested fencing
+ *
+ * \param[in] node Node to check
+ * \param[in] node_state Node's node_state entry in CIB status
+ *
+ * \return \c true if fencing has been requested for \p node, otherwise \c false
+ */
+static bool
+unpack_node_terminate(const pcmk_node_t *node, const xmlNode *node_state)
+{
+ long long value = 0LL;
+ int value_i = 0;
+ const char *value_s = pe_node_attribute_raw(node, PCMK_NODE_ATTR_TERMINATE);
+
+ // Value may be boolean or an epoch time
+ if (crm_str_to_boolean(value_s, &value_i) == 1) {
+ return (value_i != 0);
+ }
+ if (pcmk__scan_ll(value_s, &value, 0LL) == pcmk_rc_ok) {
+ return (value > 0);
+ }
+ crm_warn("Ignoring unrecognized value '%s' for " PCMK_NODE_ATTR_TERMINATE
+ "node attribute for %s", value_s, pe__node_name(node));
+ return false;
+}
+
static gboolean
-determine_online_status_no_fencing(pe_working_set_t *data_set,
+determine_online_status_no_fencing(pcmk_scheduler_t *scheduler,
const xmlNode *node_state,
- pe_node_t *this_node)
+ pcmk_node_t *this_node)
{
gboolean online = FALSE;
- const char *join = crm_element_value(node_state, XML_NODE_JOIN_STATE);
- const char *is_peer = crm_element_value(node_state, XML_NODE_IS_PEER);
- const char *in_cluster = crm_element_value(node_state, XML_NODE_IN_CLUSTER);
- const char *exp_state = crm_element_value(node_state, XML_NODE_EXPECTED);
+ const char *join = crm_element_value(node_state, PCMK__XA_JOIN);
+ const char *exp_state = crm_element_value(node_state, PCMK__XA_EXPECTED);
+ long long when_member = unpack_node_member(node_state, scheduler);
+ long long when_online = unpack_node_online(node_state);
- if (!crm_is_true(in_cluster)) {
- crm_trace("Node is down: in_cluster=%s",
- pcmk__s(in_cluster, "<null>"));
+ if (when_member <= 0) {
+ crm_trace("Node %s is %sdown", pe__node_name(this_node),
+ ((when_member < 0)? "presumed " : ""));
- } else if (pcmk__str_eq(is_peer, ONLINESTATUS, pcmk__str_casei)) {
+ } else if (when_online > 0) {
if (pcmk__str_eq(join, CRMD_JOINSTATE_MEMBER, pcmk__str_casei)) {
online = TRUE;
} else {
- crm_debug("Node is not ready to run resources: %s", join);
+ crm_debug("Node %s is not ready to run resources: %s",
+ pe__node_name(this_node), join);
}
} else if (this_node->details->expected_up == FALSE) {
- crm_trace("Controller is down: "
- "in_cluster=%s is_peer=%s join=%s expected=%s",
- pcmk__s(in_cluster, "<null>"), pcmk__s(is_peer, "<null>"),
+ crm_trace("Node %s controller is down: "
+ "member@%lld online@%lld join=%s expected=%s",
+ pe__node_name(this_node), when_member, when_online,
pcmk__s(join, "<null>"), pcmk__s(exp_state, "<null>"));
} else {
/* mark it unclean */
- pe_fence_node(data_set, this_node, "peer is unexpectedly down", FALSE);
- crm_info("in_cluster=%s is_peer=%s join=%s expected=%s",
- pcmk__s(in_cluster, "<null>"), pcmk__s(is_peer, "<null>"),
+ pe_fence_node(scheduler, this_node, "peer is unexpectedly down", FALSE);
+ crm_info("Node %s member@%lld online@%lld join=%s expected=%s",
+ pe__node_name(this_node), when_member, when_online,
pcmk__s(join, "<null>"), pcmk__s(exp_state, "<null>"));
}
return online;
}
-static gboolean
-determine_online_status_fencing(pe_working_set_t *data_set,
- const xmlNode *node_state, pe_node_t *this_node)
+/*!
+ * \internal
+ * \brief Check whether a node has taken too long to join controller group
+ *
+ * \param[in,out] scheduler Scheduler data
+ * \param[in] node Node to check
+ * \param[in] when_member Epoch time when node became a cluster member
+ * \param[in] when_online Epoch time when node joined controller group
+ *
+ * \return true if node has been pending (on the way up) longer than
+ * node-pending-timeout, otherwise false
+ * \note This will also update the cluster's recheck time if appropriate.
+ */
+static inline bool
+pending_too_long(pcmk_scheduler_t *scheduler, const pcmk_node_t *node,
+ long long when_member, long long when_online)
{
- gboolean online = FALSE;
- gboolean do_terminate = FALSE;
- bool crmd_online = FALSE;
- const char *join = crm_element_value(node_state, XML_NODE_JOIN_STATE);
- const char *is_peer = crm_element_value(node_state, XML_NODE_IS_PEER);
- const char *in_cluster = crm_element_value(node_state, XML_NODE_IN_CLUSTER);
- const char *exp_state = crm_element_value(node_state, XML_NODE_EXPECTED);
- const char *terminate = pe_node_attribute_raw(this_node, "terminate");
-
-/*
- - XML_NODE_IN_CLUSTER ::= true|false
- - XML_NODE_IS_PEER ::= online|offline
- - XML_NODE_JOIN_STATE ::= member|down|pending|banned
- - XML_NODE_EXPECTED ::= member|down
-*/
+ if ((scheduler->node_pending_timeout > 0)
+ && (when_member > 0) && (when_online <= 0)) {
+ // There is a timeout on pending nodes, and node is pending
- if (crm_is_true(terminate)) {
- do_terminate = TRUE;
+ time_t timeout = when_member + scheduler->node_pending_timeout;
- } else if (terminate != NULL && strlen(terminate) > 0) {
- /* could be a time() value */
- char t = terminate[0];
-
- if (t != '0' && isdigit(t)) {
- do_terminate = TRUE;
+ if (get_effective_time(node->details->data_set) >= timeout) {
+ return true; // Node has timed out
}
+
+ // Node is pending, but still has time
+ pe__update_recheck_time(timeout, scheduler, "pending node timeout");
}
+ return false;
+}
+
+static bool
+determine_online_status_fencing(pcmk_scheduler_t *scheduler,
+ const xmlNode *node_state,
+ pcmk_node_t *this_node)
+{
+ bool termination_requested = unpack_node_terminate(this_node, node_state);
+ const char *join = crm_element_value(node_state, PCMK__XA_JOIN);
+ const char *exp_state = crm_element_value(node_state, PCMK__XA_EXPECTED);
+ long long when_member = unpack_node_member(node_state, scheduler);
+ long long when_online = unpack_node_online(node_state);
+
+/*
+ - PCMK__XA_JOIN ::= member|down|pending|banned
+ - PCMK__XA_EXPECTED ::= member|down
- crm_trace("%s: in_cluster=%s is_peer=%s join=%s expected=%s term=%d",
- pe__node_name(this_node), pcmk__s(in_cluster, "<null>"),
- pcmk__s(is_peer, "<null>"), pcmk__s(join, "<null>"),
- pcmk__s(exp_state, "<null>"), do_terminate);
+ @COMPAT with entries recorded for DCs < 2.1.7
+ - PCMK__XA_IN_CCM ::= true|false
+ - PCMK__XA_CRMD ::= online|offline
- online = crm_is_true(in_cluster);
- crmd_online = pcmk__str_eq(is_peer, ONLINESTATUS, pcmk__str_casei);
- if (exp_state == NULL) {
- exp_state = CRMD_JOINSTATE_DOWN;
- }
+ Since crm_feature_set 3.18.0 (pacemaker-2.1.7):
+ - PCMK__XA_IN_CCM ::= <timestamp>|0
+ Since when node has been a cluster member. A value 0 of means the node is not
+ a cluster member.
+
+ - PCMK__XA_CRMD ::= <timestamp>|0
+ Since when peer has been online in CPG. A value 0 means the peer is offline
+ in CPG.
+*/
+
+ crm_trace("Node %s member@%lld online@%lld join=%s expected=%s%s",
+ pe__node_name(this_node), when_member, when_online,
+ pcmk__s(join, "<null>"), pcmk__s(exp_state, "<null>"),
+ (termination_requested? " (termination requested)" : ""));
if (this_node->details->shutdown) {
crm_debug("%s is shutting down", pe__node_name(this_node));
/* Slightly different criteria since we can't shut down a dead peer */
- online = crmd_online;
+ return (when_online > 0);
+ }
- } else if (in_cluster == NULL) {
- pe_fence_node(data_set, this_node, "peer has not been seen by the cluster", FALSE);
+ if (when_member < 0) {
+ pe_fence_node(scheduler, this_node,
+ "peer has not been seen by the cluster", FALSE);
+ return false;
+ }
- } else if (pcmk__str_eq(join, CRMD_JOINSTATE_NACK, pcmk__str_casei)) {
- pe_fence_node(data_set, this_node,
+ if (pcmk__str_eq(join, CRMD_JOINSTATE_NACK, pcmk__str_none)) {
+ pe_fence_node(scheduler, this_node,
"peer failed Pacemaker membership criteria", FALSE);
- } else if (do_terminate == FALSE && pcmk__str_eq(exp_state, CRMD_JOINSTATE_DOWN, pcmk__str_casei)) {
+ } else if (termination_requested) {
+ if ((when_member <= 0) && (when_online <= 0)
+ && pcmk__str_eq(join, CRMD_JOINSTATE_DOWN, pcmk__str_none)) {
+ crm_info("%s was fenced as requested", pe__node_name(this_node));
+ return false;
+ }
+ pe_fence_node(scheduler, this_node, "fencing was requested", false);
+
+ } else if (pcmk__str_eq(exp_state, CRMD_JOINSTATE_DOWN,
+ pcmk__str_null_matches)) {
- if (crm_is_true(in_cluster) || crmd_online) {
+ if (pending_too_long(scheduler, this_node, when_member, when_online)) {
+ pe_fence_node(scheduler, this_node,
+ "peer pending timed out on joining the process group",
+ FALSE);
+
+ } else if ((when_member > 0) || (when_online > 0)) {
crm_info("- %s is not ready to run resources",
pe__node_name(this_node));
this_node->details->standby = TRUE;
@@ -1435,48 +1644,41 @@ determine_online_status_fencing(pe_working_set_t *data_set,
pe__node_name(this_node));
}
- } else if (do_terminate && pcmk__str_eq(join, CRMD_JOINSTATE_DOWN, pcmk__str_casei)
- && crm_is_true(in_cluster) == FALSE && !crmd_online) {
- crm_info("%s was just shot", pe__node_name(this_node));
- online = FALSE;
-
- } else if (crm_is_true(in_cluster) == FALSE) {
+ } else if (when_member <= 0) {
// Consider `priority-fencing-delay` for lost nodes
- pe_fence_node(data_set, this_node, "peer is no longer part of the cluster", TRUE);
+ pe_fence_node(scheduler, this_node,
+ "peer is no longer part of the cluster", TRUE);
- } else if (!crmd_online) {
- pe_fence_node(data_set, this_node, "peer process is no longer available", FALSE);
+ } else if (when_online <= 0) {
+ pe_fence_node(scheduler, this_node,
+ "peer process is no longer available", FALSE);
/* Everything is running at this point, now check join state */
- } else if (do_terminate) {
- pe_fence_node(data_set, this_node, "termination was requested", FALSE);
- } else if (pcmk__str_eq(join, CRMD_JOINSTATE_MEMBER, pcmk__str_casei)) {
+ } else if (pcmk__str_eq(join, CRMD_JOINSTATE_MEMBER, pcmk__str_none)) {
crm_info("%s is active", pe__node_name(this_node));
- } else if (pcmk__strcase_any_of(join, CRMD_JOINSTATE_PENDING, CRMD_JOINSTATE_DOWN, NULL)) {
+ } else if (pcmk__str_any_of(join, CRMD_JOINSTATE_PENDING,
+ CRMD_JOINSTATE_DOWN, NULL)) {
crm_info("%s is not ready to run resources", pe__node_name(this_node));
this_node->details->standby = TRUE;
this_node->details->pending = TRUE;
} else {
- pe_fence_node(data_set, this_node, "peer was in an unknown state", FALSE);
- crm_warn("%s: in-cluster=%s is-peer=%s join=%s expected=%s term=%d shutdown=%d",
- pe__node_name(this_node), pcmk__s(in_cluster, "<null>"),
- pcmk__s(is_peer, "<null>"), pcmk__s(join, "<null>"),
- pcmk__s(exp_state, "<null>"), do_terminate,
- this_node->details->shutdown);
+ pe_fence_node(scheduler, this_node, "peer was in an unknown state",
+ FALSE);
}
- return online;
+ return (when_member > 0);
}
static void
-determine_remote_online_status(pe_working_set_t * data_set, pe_node_t * this_node)
+determine_remote_online_status(pcmk_scheduler_t *scheduler,
+ pcmk_node_t *this_node)
{
- pe_resource_t *rsc = this_node->details->remote_rsc;
- pe_resource_t *container = NULL;
- pe_node_t *host = NULL;
+ pcmk_resource_t *rsc = this_node->details->remote_rsc;
+ pcmk_resource_t *container = NULL;
+ pcmk_node_t *host = NULL;
/* If there is a node state entry for a (former) Pacemaker Remote node
* but no resource creating that node, the node's connection resource will
@@ -1494,33 +1696,36 @@ determine_remote_online_status(pe_working_set_t * data_set, pe_node_t * this_nod
}
/* If the resource is currently started, mark it online. */
- if (rsc->role == RSC_ROLE_STARTED) {
+ if (rsc->role == pcmk_role_started) {
crm_trace("%s node %s presumed ONLINE because connection resource is started",
(container? "Guest" : "Remote"), this_node->details->id);
this_node->details->online = TRUE;
}
/* consider this node shutting down if transitioning start->stop */
- if (rsc->role == RSC_ROLE_STARTED && rsc->next_role == RSC_ROLE_STOPPED) {
+ if ((rsc->role == pcmk_role_started)
+ && (rsc->next_role == pcmk_role_stopped)) {
+
crm_trace("%s node %s shutting down because connection resource is stopping",
(container? "Guest" : "Remote"), this_node->details->id);
this_node->details->shutdown = TRUE;
}
/* Now check all the failure conditions. */
- if(container && pcmk_is_set(container->flags, pe_rsc_failed)) {
+ if(container && pcmk_is_set(container->flags, pcmk_rsc_failed)) {
crm_trace("Guest node %s UNCLEAN because guest resource failed",
this_node->details->id);
this_node->details->online = FALSE;
this_node->details->remote_requires_reset = TRUE;
- } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
+ } else if (pcmk_is_set(rsc->flags, pcmk_rsc_failed)) {
crm_trace("%s node %s OFFLINE because connection resource failed",
(container? "Guest" : "Remote"), this_node->details->id);
this_node->details->online = FALSE;
- } else if (rsc->role == RSC_ROLE_STOPPED
- || (container && container->role == RSC_ROLE_STOPPED)) {
+ } else if ((rsc->role == pcmk_role_stopped)
+ || ((container != NULL)
+ && (container->role == pcmk_role_stopped))) {
crm_trace("%s node %s OFFLINE because its resource is stopped",
(container? "Guest" : "Remote"), this_node->details->id);
@@ -1541,11 +1746,11 @@ remote_online_done:
}
static void
-determine_online_status(const xmlNode *node_state, pe_node_t *this_node,
- pe_working_set_t *data_set)
+determine_online_status(const xmlNode *node_state, pcmk_node_t *this_node,
+ pcmk_scheduler_t *scheduler)
{
gboolean online = FALSE;
- const char *exp_state = crm_element_value(node_state, XML_NODE_EXPECTED);
+ const char *exp_state = crm_element_value(node_state, PCMK__XA_EXPECTED);
CRM_CHECK(this_node != NULL, return);
@@ -1566,11 +1771,13 @@ determine_online_status(const xmlNode *node_state, pe_node_t *this_node,
* Anyone caught abusing this logic will be shot
*/
- } else if (!pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
- online = determine_online_status_no_fencing(data_set, node_state, this_node);
+ } else if (!pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
+ online = determine_online_status_no_fencing(scheduler, node_state,
+ this_node);
} else {
- online = determine_online_status_fencing(data_set, node_state, this_node);
+ online = determine_online_status_fencing(scheduler, node_state,
+ this_node);
}
if (online) {
@@ -1692,30 +1899,30 @@ clone_zero(const char *last_rsc_id)
return zero;
}
-static pe_resource_t *
+static pcmk_resource_t *
create_fake_resource(const char *rsc_id, const xmlNode *rsc_entry,
- pe_working_set_t *data_set)
+ pcmk_scheduler_t *scheduler)
{
- pe_resource_t *rsc = NULL;
+ pcmk_resource_t *rsc = NULL;
xmlNode *xml_rsc = create_xml_node(NULL, XML_CIB_TAG_RESOURCE);
copy_in_properties(xml_rsc, rsc_entry);
crm_xml_add(xml_rsc, XML_ATTR_ID, rsc_id);
crm_log_xml_debug(xml_rsc, "Orphan resource");
- if (pe__unpack_resource(xml_rsc, &rsc, NULL, data_set) != pcmk_rc_ok) {
+ if (pe__unpack_resource(xml_rsc, &rsc, NULL, scheduler) != pcmk_rc_ok) {
return NULL;
}
if (xml_contains_remote_node(xml_rsc)) {
- pe_node_t *node;
+ pcmk_node_t *node;
crm_debug("Detected orphaned remote node %s", rsc_id);
- node = pe_find_node(data_set->nodes, rsc_id);
+ node = pe_find_node(scheduler->nodes, rsc_id);
if (node == NULL) {
- node = pe_create_node(rsc_id, rsc_id, "remote", NULL, data_set);
+ node = pe_create_node(rsc_id, rsc_id, "remote", NULL, scheduler);
}
- link_rsc2remotenode(data_set, rsc);
+ link_rsc2remotenode(scheduler, rsc);
if (node) {
crm_trace("Setting node %s as shutting down due to orphaned connection resource", rsc_id);
@@ -1726,10 +1933,10 @@ create_fake_resource(const char *rsc_id, const xmlNode *rsc_entry,
if (crm_element_value(rsc_entry, XML_RSC_ATTR_CONTAINER)) {
/* This orphaned rsc needs to be mapped to a container. */
crm_trace("Detected orphaned container filler %s", rsc_id);
- pe__set_resource_flags(rsc, pe_rsc_orphan_container_filler);
+ pe__set_resource_flags(rsc, pcmk_rsc_removed_filler);
}
- pe__set_resource_flags(rsc, pe_rsc_orphan);
- data_set->resources = g_list_append(data_set->resources, rsc);
+ pe__set_resource_flags(rsc, pcmk_rsc_removed);
+ scheduler->resources = g_list_append(scheduler->resources, rsc);
return rsc;
}
@@ -1737,21 +1944,22 @@ create_fake_resource(const char *rsc_id, const xmlNode *rsc_entry,
* \internal
* \brief Create orphan instance for anonymous clone resource history
*
- * \param[in,out] parent Clone resource that orphan will be added to
- * \param[in] rsc_id Orphan's resource ID
- * \param[in] node Where orphan is active (for logging only)
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] parent Clone resource that orphan will be added to
+ * \param[in] rsc_id Orphan's resource ID
+ * \param[in] node Where orphan is active (for logging only)
+ * \param[in,out] scheduler Scheduler data
*
* \return Newly added orphaned instance of \p parent
*/
-static pe_resource_t *
-create_anonymous_orphan(pe_resource_t *parent, const char *rsc_id,
- const pe_node_t *node, pe_working_set_t *data_set)
+static pcmk_resource_t *
+create_anonymous_orphan(pcmk_resource_t *parent, const char *rsc_id,
+ const pcmk_node_t *node, pcmk_scheduler_t *scheduler)
{
- pe_resource_t *top = pe__create_clone_child(parent, data_set);
+ pcmk_resource_t *top = pe__create_clone_child(parent, scheduler);
// find_rsc() because we might be a cloned group
- pe_resource_t *orphan = top->fns->find_rsc(top, rsc_id, NULL, pe_find_clone);
+ pcmk_resource_t *orphan = top->fns->find_rsc(top, rsc_id, NULL,
+ pcmk_rsc_match_clone_only);
pe_rsc_debug(parent, "Created orphan %s for %s: %s on %s",
top->id, parent->id, rsc_id, pe__node_name(node));
@@ -1767,30 +1975,30 @@ create_anonymous_orphan(pe_resource_t *parent, const char *rsc_id,
* (2) an inactive instance (i.e. within the total of clone-max instances);
* (3) a newly created orphan (i.e. clone-max instances are already active).
*
- * \param[in,out] data_set Cluster information
- * \param[in] node Node on which to check for instance
- * \param[in,out] parent Clone to check
- * \param[in] rsc_id Name of cloned resource in history (without instance)
+ * \param[in,out] scheduler Scheduler data
+ * \param[in] node Node on which to check for instance
+ * \param[in,out] parent Clone to check
+ * \param[in] rsc_id Name of cloned resource in history (no instance)
*/
-static pe_resource_t *
-find_anonymous_clone(pe_working_set_t *data_set, const pe_node_t *node,
- pe_resource_t *parent, const char *rsc_id)
+static pcmk_resource_t *
+find_anonymous_clone(pcmk_scheduler_t *scheduler, const pcmk_node_t *node,
+ pcmk_resource_t *parent, const char *rsc_id)
{
GList *rIter = NULL;
- pe_resource_t *rsc = NULL;
- pe_resource_t *inactive_instance = NULL;
+ pcmk_resource_t *rsc = NULL;
+ pcmk_resource_t *inactive_instance = NULL;
gboolean skip_inactive = FALSE;
CRM_ASSERT(parent != NULL);
CRM_ASSERT(pe_rsc_is_clone(parent));
- CRM_ASSERT(!pcmk_is_set(parent->flags, pe_rsc_unique));
+ CRM_ASSERT(!pcmk_is_set(parent->flags, pcmk_rsc_unique));
// Check for active (or partially active, for cloned groups) instance
pe_rsc_trace(parent, "Looking for %s on %s in %s",
rsc_id, pe__node_name(node), parent->id);
for (rIter = parent->children; rsc == NULL && rIter; rIter = rIter->next) {
GList *locations = NULL;
- pe_resource_t *child = rIter->data;
+ pcmk_resource_t *child = rIter->data;
/* Check whether this instance is already known to be active or pending
* anywhere, at this stage of unpacking. Because this function is called
@@ -1804,8 +2012,8 @@ find_anonymous_clone(pe_working_set_t *data_set, const pe_node_t *node,
* (2) when we've already unpacked the history of another numbered
* instance on the same node (which can happen if globally-unique
* was flipped from true to false); and
- * (3) when we re-run calculations on the same data set as part of a
- * simulation.
+ * (3) when we re-run calculations on the same scheduler data as part of
+ * a simulation.
*/
child->fns->location(child, &locations, 2);
if (locations) {
@@ -1815,7 +2023,7 @@ find_anonymous_clone(pe_working_set_t *data_set, const pe_node_t *node,
*/
CRM_LOG_ASSERT(locations->next == NULL);
- if (((pe_node_t *)locations->data)->details == node->details) {
+ if (((pcmk_node_t *) locations->data)->details == node->details) {
/* This child instance is active on the requested node, so check
* for a corresponding configured resource. We use find_rsc()
* instead of child because child may be a cloned group, and we
@@ -1823,7 +2031,8 @@ find_anonymous_clone(pe_working_set_t *data_set, const pe_node_t *node,
*
* If the history entry is orphaned, rsc will be NULL.
*/
- rsc = parent->fns->find_rsc(child, rsc_id, NULL, pe_find_clone);
+ rsc = parent->fns->find_rsc(child, rsc_id, NULL,
+ pcmk_rsc_match_clone_only);
if (rsc) {
/* If there are multiple instance history entries for an
* anonymous clone in a single node's history (which can
@@ -1848,10 +2057,10 @@ find_anonymous_clone(pe_working_set_t *data_set, const pe_node_t *node,
} else {
pe_rsc_trace(parent, "Resource %s, skip inactive", child->id);
if (!skip_inactive && !inactive_instance
- && !pcmk_is_set(child->flags, pe_rsc_block)) {
+ && !pcmk_is_set(child->flags, pcmk_rsc_blocked)) {
// Remember one inactive instance in case we don't find active
inactive_instance = parent->fns->find_rsc(child, rsc_id, NULL,
- pe_find_clone);
+ pcmk_rsc_match_clone_only);
/* ... but don't use it if it was already associated with a
* pending action on another node
@@ -1881,30 +2090,30 @@ find_anonymous_clone(pe_working_set_t *data_set, const pe_node_t *node,
* @TODO Ideally, we'd use an inactive instance number if it is not needed
* for any clean instances. However, we don't know that at this point.
*/
- if ((rsc != NULL) && !pcmk_is_set(rsc->flags, pe_rsc_needs_fencing)
+ if ((rsc != NULL) && !pcmk_is_set(rsc->flags, pcmk_rsc_needs_fencing)
&& (!node->details->online || node->details->unclean)
&& !pe__is_guest_node(node)
- && !pe__is_universal_clone(parent, data_set)) {
+ && !pe__is_universal_clone(parent, scheduler)) {
rsc = NULL;
}
if (rsc == NULL) {
- rsc = create_anonymous_orphan(parent, rsc_id, node, data_set);
+ rsc = create_anonymous_orphan(parent, rsc_id, node, scheduler);
pe_rsc_trace(parent, "Resource %s, orphan", rsc->id);
}
return rsc;
}
-static pe_resource_t *
-unpack_find_resource(pe_working_set_t *data_set, const pe_node_t *node,
+static pcmk_resource_t *
+unpack_find_resource(pcmk_scheduler_t *scheduler, const pcmk_node_t *node,
const char *rsc_id)
{
- pe_resource_t *rsc = NULL;
- pe_resource_t *parent = NULL;
+ pcmk_resource_t *rsc = NULL;
+ pcmk_resource_t *parent = NULL;
crm_trace("looking for %s", rsc_id);
- rsc = pe_find_resource(data_set->resources, rsc_id);
+ rsc = pe_find_resource(scheduler->resources, rsc_id);
if (rsc == NULL) {
/* If we didn't find the resource by its name in the operation history,
@@ -1912,9 +2121,10 @@ unpack_find_resource(pe_working_set_t *data_set, const pe_node_t *node,
* a single :0 orphan to match against here.
*/
char *clone0_id = clone_zero(rsc_id);
- pe_resource_t *clone0 = pe_find_resource(data_set->resources, clone0_id);
+ pcmk_resource_t *clone0 = pe_find_resource(scheduler->resources,
+ clone0_id);
- if (clone0 && !pcmk_is_set(clone0->flags, pe_rsc_unique)) {
+ if (clone0 && !pcmk_is_set(clone0->flags, pcmk_rsc_unique)) {
rsc = clone0;
parent = uber_parent(clone0);
crm_trace("%s found as %s (%s)", rsc_id, clone0_id, parent->id);
@@ -1924,7 +2134,7 @@ unpack_find_resource(pe_working_set_t *data_set, const pe_node_t *node,
}
free(clone0_id);
- } else if (rsc->variant > pe_native) {
+ } else if (rsc->variant > pcmk_rsc_variant_primitive) {
crm_trace("Resource history for %s is orphaned because it is no longer primitive",
rsc_id);
return NULL;
@@ -1940,7 +2150,7 @@ unpack_find_resource(pe_working_set_t *data_set, const pe_node_t *node,
} else {
char *base = clone_strip(rsc_id);
- rsc = find_anonymous_clone(data_set, node, parent, base);
+ rsc = find_anonymous_clone(scheduler, node, parent, base);
free(base);
CRM_ASSERT(rsc != NULL);
}
@@ -1952,42 +2162,43 @@ unpack_find_resource(pe_working_set_t *data_set, const pe_node_t *node,
pcmk__str_update(&rsc->clone_name, rsc_id);
pe_rsc_debug(rsc, "Internally renamed %s on %s to %s%s",
rsc_id, pe__node_name(node), rsc->id,
- (pcmk_is_set(rsc->flags, pe_rsc_orphan)? " (ORPHAN)" : ""));
+ (pcmk_is_set(rsc->flags, pcmk_rsc_removed)? " (ORPHAN)" : ""));
}
return rsc;
}
-static pe_resource_t *
-process_orphan_resource(const xmlNode *rsc_entry, const pe_node_t *node,
- pe_working_set_t *data_set)
+static pcmk_resource_t *
+process_orphan_resource(const xmlNode *rsc_entry, const pcmk_node_t *node,
+ pcmk_scheduler_t *scheduler)
{
- pe_resource_t *rsc = NULL;
+ pcmk_resource_t *rsc = NULL;
const char *rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID);
crm_debug("Detected orphan resource %s on %s", rsc_id, pe__node_name(node));
- rsc = create_fake_resource(rsc_id, rsc_entry, data_set);
+ rsc = create_fake_resource(rsc_id, rsc_entry, scheduler);
if (rsc == NULL) {
return NULL;
}
- if (!pcmk_is_set(data_set->flags, pe_flag_stop_rsc_orphans)) {
- pe__clear_resource_flags(rsc, pe_rsc_managed);
+ if (!pcmk_is_set(scheduler->flags, pcmk_sched_stop_removed_resources)) {
+ pe__clear_resource_flags(rsc, pcmk_rsc_managed);
} else {
CRM_CHECK(rsc != NULL, return NULL);
pe_rsc_trace(rsc, "Added orphan %s", rsc->id);
- resource_location(rsc, NULL, -INFINITY, "__orphan_do_not_run__", data_set);
+ resource_location(rsc, NULL, -INFINITY, "__orphan_do_not_run__",
+ scheduler);
}
return rsc;
}
static void
-process_rsc_state(pe_resource_t * rsc, pe_node_t * node,
+process_rsc_state(pcmk_resource_t *rsc, pcmk_node_t *node,
enum action_fail_response on_fail)
{
- pe_node_t *tmpnode = NULL;
+ pcmk_node_t *tmpnode = NULL;
char *reason = NULL;
- enum action_fail_response save_on_fail = action_fail_ignore;
+ enum action_fail_response save_on_fail = pcmk_on_fail_ignore;
CRM_ASSERT(rsc);
pe_rsc_trace(rsc, "Resource %s is %s on %s: on_fail=%s",
@@ -1995,12 +2206,12 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node,
fail2text(on_fail));
/* process current state */
- if (rsc->role != RSC_ROLE_UNKNOWN) {
- pe_resource_t *iter = rsc;
+ if (rsc->role != pcmk_role_unknown) {
+ pcmk_resource_t *iter = rsc;
while (iter) {
if (g_hash_table_lookup(iter->known_on, node->details->id) == NULL) {
- pe_node_t *n = pe__copy_node(node);
+ pcmk_node_t *n = pe__copy_node(node);
pe_rsc_trace(rsc, "%s%s%s known on %s",
rsc->id,
@@ -2009,7 +2220,7 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node,
pe__node_name(n));
g_hash_table_insert(iter->known_on, (gpointer) n->details->id, n);
}
- if (pcmk_is_set(iter->flags, pe_rsc_unique)) {
+ if (pcmk_is_set(iter->flags, pcmk_rsc_unique)) {
break;
}
iter = iter->parent;
@@ -2017,10 +2228,10 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node,
}
/* If a managed resource is believed to be running, but node is down ... */
- if (rsc->role > RSC_ROLE_STOPPED
+ if ((rsc->role > pcmk_role_stopped)
&& node->details->online == FALSE
&& node->details->maintenance == FALSE
- && pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ && pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
gboolean should_fence = FALSE;
@@ -2032,12 +2243,15 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node,
* resource to run again once we are sure we know its state.
*/
if (pe__is_guest_node(node)) {
- pe__set_resource_flags(rsc, pe_rsc_failed|pe_rsc_stop);
+ pe__set_resource_flags(rsc,
+ pcmk_rsc_failed|pcmk_rsc_stop_if_failed);
should_fence = TRUE;
- } else if (pcmk_is_set(rsc->cluster->flags, pe_flag_stonith_enabled)) {
+ } else if (pcmk_is_set(rsc->cluster->flags,
+ pcmk_sched_fencing_enabled)) {
if (pe__is_remote_node(node) && node->details->remote_rsc
- && !pcmk_is_set(node->details->remote_rsc->flags, pe_rsc_failed)) {
+ && !pcmk_is_set(node->details->remote_rsc->flags,
+ pcmk_rsc_failed)) {
/* Setting unseen means that fencing of the remote node will
* occur only if the connection resource is not going to start
@@ -2070,20 +2284,20 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node,
/* No extra processing needed
* Also allows resources to be started again after a node is shot
*/
- on_fail = action_fail_ignore;
+ on_fail = pcmk_on_fail_ignore;
}
switch (on_fail) {
- case action_fail_ignore:
+ case pcmk_on_fail_ignore:
/* nothing to do */
break;
- case action_fail_demote:
- pe__set_resource_flags(rsc, pe_rsc_failed);
+ case pcmk_on_fail_demote:
+ pe__set_resource_flags(rsc, pcmk_rsc_failed);
demote_action(rsc, node, FALSE);
break;
- case action_fail_fence:
+ case pcmk_on_fail_fence_node:
/* treat it as if it is still running
* but also mark the node as unclean
*/
@@ -2092,20 +2306,20 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node,
free(reason);
break;
- case action_fail_standby:
+ case pcmk_on_fail_standby_node:
node->details->standby = TRUE;
node->details->standby_onfail = TRUE;
break;
- case action_fail_block:
+ case pcmk_on_fail_block:
/* is_managed == FALSE will prevent any
* actions being sent for the resource
*/
- pe__clear_resource_flags(rsc, pe_rsc_managed);
- pe__set_resource_flags(rsc, pe_rsc_block);
+ pe__clear_resource_flags(rsc, pcmk_rsc_managed);
+ pe__set_resource_flags(rsc, pcmk_rsc_blocked);
break;
- case action_fail_migrate:
+ case pcmk_on_fail_ban:
/* make sure it comes up somewhere else
* or not at all
*/
@@ -2113,19 +2327,22 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node,
rsc->cluster);
break;
- case action_fail_stop:
- pe__set_next_role(rsc, RSC_ROLE_STOPPED, "on-fail=stop");
+ case pcmk_on_fail_stop:
+ pe__set_next_role(rsc, pcmk_role_stopped, "on-fail=stop");
break;
- case action_fail_recover:
- if (rsc->role != RSC_ROLE_STOPPED && rsc->role != RSC_ROLE_UNKNOWN) {
- pe__set_resource_flags(rsc, pe_rsc_failed|pe_rsc_stop);
+ case pcmk_on_fail_restart:
+ if ((rsc->role != pcmk_role_stopped)
+ && (rsc->role != pcmk_role_unknown)) {
+ pe__set_resource_flags(rsc,
+ pcmk_rsc_failed|pcmk_rsc_stop_if_failed);
stop_action(rsc, node, FALSE);
}
break;
- case action_fail_restart_container:
- pe__set_resource_flags(rsc, pe_rsc_failed|pe_rsc_stop);
+ case pcmk_on_fail_restart_container:
+ pe__set_resource_flags(rsc,
+ pcmk_rsc_failed|pcmk_rsc_stop_if_failed);
if (rsc->container && pe_rsc_is_bundled(rsc)) {
/* A bundle's remote connection can run on a different node than
* the bundle's container. We don't necessarily know where the
@@ -2136,14 +2353,16 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node,
g_list_prepend(rsc->cluster->stop_needed, rsc->container);
} else if (rsc->container) {
stop_action(rsc->container, node, FALSE);
- } else if (rsc->role != RSC_ROLE_STOPPED && rsc->role != RSC_ROLE_UNKNOWN) {
+ } else if ((rsc->role != pcmk_role_stopped)
+ && (rsc->role != pcmk_role_unknown)) {
stop_action(rsc, node, FALSE);
}
break;
- case action_fail_reset_remote:
- pe__set_resource_flags(rsc, pe_rsc_failed|pe_rsc_stop);
- if (pcmk_is_set(rsc->cluster->flags, pe_flag_stonith_enabled)) {
+ case pcmk_on_fail_reset_remote:
+ pe__set_resource_flags(rsc,
+ pcmk_rsc_failed|pcmk_rsc_stop_if_failed);
+ if (pcmk_is_set(rsc->cluster->flags, pcmk_sched_fencing_enabled)) {
tmpnode = NULL;
if (rsc->is_remote_node) {
tmpnode = pe_find_node(rsc->cluster->nodes, rsc->id);
@@ -2161,14 +2380,14 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node,
}
/* require the stop action regardless if fencing is occurring or not. */
- if (rsc->role > RSC_ROLE_STOPPED) {
+ if (rsc->role > pcmk_role_stopped) {
stop_action(rsc, node, FALSE);
}
/* if reconnect delay is in use, prevent the connection from exiting the
* "STOPPED" role until the failure is cleared by the delay timeout. */
if (rsc->remote_reconnect_ms) {
- pe__set_next_role(rsc, RSC_ROLE_STOPPED, "remote reset");
+ pe__set_next_role(rsc, pcmk_role_stopped, "remote reset");
}
break;
}
@@ -2177,16 +2396,17 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node,
* to be fenced. By setting unseen = FALSE, the remote-node failure will
* result in a fencing operation regardless if we're going to attempt to
* reconnect to the remote-node in this transition or not. */
- if (pcmk_is_set(rsc->flags, pe_rsc_failed) && rsc->is_remote_node) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_failed) && rsc->is_remote_node) {
tmpnode = pe_find_node(rsc->cluster->nodes, rsc->id);
if (tmpnode && tmpnode->details->unclean) {
tmpnode->details->unseen = FALSE;
}
}
- if (rsc->role != RSC_ROLE_STOPPED && rsc->role != RSC_ROLE_UNKNOWN) {
- if (pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
- if (pcmk_is_set(rsc->flags, pe_rsc_managed)) {
+ if ((rsc->role != pcmk_role_stopped)
+ && (rsc->role != pcmk_role_unknown)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_removed)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
pcmk__config_warn("Detected active orphan %s running on %s",
rsc->id, pe__node_name(node));
} else {
@@ -2198,16 +2418,17 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node,
}
native_add_running(rsc, node, rsc->cluster,
- (save_on_fail != action_fail_ignore));
+ (save_on_fail != pcmk_on_fail_ignore));
switch (on_fail) {
- case action_fail_ignore:
+ case pcmk_on_fail_ignore:
break;
- case action_fail_demote:
- case action_fail_block:
- pe__set_resource_flags(rsc, pe_rsc_failed);
+ case pcmk_on_fail_demote:
+ case pcmk_on_fail_block:
+ pe__set_resource_flags(rsc, pcmk_rsc_failed);
break;
default:
- pe__set_resource_flags(rsc, pe_rsc_failed|pe_rsc_stop);
+ pe__set_resource_flags(rsc,
+ pcmk_rsc_failed|pcmk_rsc_stop_if_failed);
break;
}
@@ -2220,14 +2441,14 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node,
rsc->clone_name = NULL;
} else {
- GList *possible_matches = pe__resource_actions(rsc, node, RSC_STOP,
- FALSE);
+ GList *possible_matches = pe__resource_actions(rsc, node,
+ PCMK_ACTION_STOP, FALSE);
GList *gIter = possible_matches;
for (; gIter != NULL; gIter = gIter->next) {
- pe_action_t *stop = (pe_action_t *) gIter->data;
+ pcmk_action_t *stop = (pcmk_action_t *) gIter->data;
- pe__set_action_flags(stop, pe_action_optional);
+ pe__set_action_flags(stop, pcmk_action_optional);
}
g_list_free(possible_matches);
@@ -2236,21 +2457,21 @@ process_rsc_state(pe_resource_t * rsc, pe_node_t * node,
/* A successful stop after migrate_to on the migration source doesn't make
* the partially migrated resource stopped on the migration target.
*/
- if (rsc->role == RSC_ROLE_STOPPED
+ if ((rsc->role == pcmk_role_stopped)
&& rsc->partial_migration_source
&& rsc->partial_migration_source->details == node->details
&& rsc->partial_migration_target
&& rsc->running_on) {
- rsc->role = RSC_ROLE_STARTED;
+ rsc->role = pcmk_role_started;
}
}
/* create active recurring operations as optional */
static void
-process_recurring(pe_node_t * node, pe_resource_t * rsc,
+process_recurring(pcmk_node_t *node, pcmk_resource_t *rsc,
int start_index, int stop_index,
- GList *sorted_op_list, pe_working_set_t * data_set)
+ GList *sorted_op_list, pcmk_scheduler_t *scheduler)
{
int counter = -1;
const char *task = NULL;
@@ -2303,7 +2524,7 @@ process_recurring(pe_node_t * node, pe_resource_t * rsc,
/* create the action */
key = pcmk__op_key(rsc->id, task, interval_ms);
pe_rsc_trace(rsc, "Creating %s on %s", key, pe__node_name(node));
- custom_action(rsc, key, task, node, TRUE, TRUE, data_set);
+ custom_action(rsc, key, task, node, TRUE, scheduler);
}
}
@@ -2328,20 +2549,24 @@ calculate_active_ops(const GList *sorted_op_list, int *start_index,
task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK);
status = crm_element_value(rsc_op, XML_LRM_ATTR_OPSTATUS);
- if (pcmk__str_eq(task, CRMD_ACTION_STOP, pcmk__str_casei)
+ if (pcmk__str_eq(task, PCMK_ACTION_STOP, pcmk__str_casei)
&& pcmk__str_eq(status, "0", pcmk__str_casei)) {
*stop_index = counter;
- } else if (pcmk__strcase_any_of(task, CRMD_ACTION_START, CRMD_ACTION_MIGRATED, NULL)) {
+ } else if (pcmk__strcase_any_of(task, PCMK_ACTION_START,
+ PCMK_ACTION_MIGRATE_FROM, NULL)) {
*start_index = counter;
- } else if ((implied_monitor_start <= *stop_index) && pcmk__str_eq(task, CRMD_ACTION_STATUS, pcmk__str_casei)) {
+ } else if ((implied_monitor_start <= *stop_index)
+ && pcmk__str_eq(task, PCMK_ACTION_MONITOR,
+ pcmk__str_casei)) {
const char *rc = crm_element_value(rsc_op, XML_LRM_ATTR_RC);
if (pcmk__strcase_any_of(rc, "0", "8", NULL)) {
implied_monitor_start = counter;
}
- } else if (pcmk__strcase_any_of(task, CRMD_ACTION_PROMOTE, CRMD_ACTION_DEMOTE, NULL)) {
+ } else if (pcmk__strcase_any_of(task, PCMK_ACTION_PROMOTE,
+ PCMK_ACTION_DEMOTE, NULL)) {
implied_clone_start = counter;
}
}
@@ -2357,26 +2582,26 @@ calculate_active_ops(const GList *sorted_op_list, int *start_index,
// If resource history entry has shutdown lock, remember lock node and time
static void
-unpack_shutdown_lock(const xmlNode *rsc_entry, pe_resource_t *rsc,
- const pe_node_t *node, pe_working_set_t *data_set)
+unpack_shutdown_lock(const xmlNode *rsc_entry, pcmk_resource_t *rsc,
+ const pcmk_node_t *node, pcmk_scheduler_t *scheduler)
{
time_t lock_time = 0; // When lock started (i.e. node shutdown time)
if ((crm_element_value_epoch(rsc_entry, XML_CONFIG_ATTR_SHUTDOWN_LOCK,
&lock_time) == pcmk_ok) && (lock_time != 0)) {
- if ((data_set->shutdown_lock > 0)
- && (get_effective_time(data_set)
- > (lock_time + data_set->shutdown_lock))) {
+ if ((scheduler->shutdown_lock > 0)
+ && (get_effective_time(scheduler)
+ > (lock_time + scheduler->shutdown_lock))) {
pe_rsc_info(rsc, "Shutdown lock for %s on %s expired",
rsc->id, pe__node_name(node));
- pe__clear_resource_history(rsc, node, data_set);
+ pe__clear_resource_history(rsc, node);
} else {
/* @COMPAT I don't like breaking const signatures, but
* rsc->lock_node should really be const -- we just can't change it
* until the next API compatibility break.
*/
- rsc->lock_node = (pe_node_t *) node;
+ rsc->lock_node = (pcmk_node_t *) node;
rsc->lock_time = lock_time;
}
}
@@ -2388,30 +2613,30 @@ unpack_shutdown_lock(const xmlNode *rsc_entry, pe_resource_t *rsc,
*
* \param[in,out] node Node whose status is being unpacked
* \param[in] rsc_entry lrm_resource XML being unpacked
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*
* \return Resource corresponding to the entry, or NULL if no operation history
*/
-static pe_resource_t *
-unpack_lrm_resource(pe_node_t *node, const xmlNode *lrm_resource,
- pe_working_set_t *data_set)
+static pcmk_resource_t *
+unpack_lrm_resource(pcmk_node_t *node, const xmlNode *lrm_resource,
+ pcmk_scheduler_t *scheduler)
{
GList *gIter = NULL;
int stop_index = -1;
int start_index = -1;
- enum rsc_role_e req_role = RSC_ROLE_UNKNOWN;
+ enum rsc_role_e req_role = pcmk_role_unknown;
const char *rsc_id = ID(lrm_resource);
- pe_resource_t *rsc = NULL;
+ pcmk_resource_t *rsc = NULL;
GList *op_list = NULL;
GList *sorted_op_list = NULL;
xmlNode *rsc_op = NULL;
xmlNode *last_failure = NULL;
- enum action_fail_response on_fail = action_fail_ignore;
- enum rsc_role_e saved_role = RSC_ROLE_UNKNOWN;
+ enum action_fail_response on_fail = pcmk_on_fail_ignore;
+ enum rsc_role_e saved_role = pcmk_role_unknown;
if (rsc_id == NULL) {
crm_warn("Ignoring malformed " XML_LRM_TAG_RESOURCE
@@ -2428,7 +2653,7 @@ unpack_lrm_resource(pe_node_t *node, const xmlNode *lrm_resource,
op_list = g_list_prepend(op_list, rsc_op);
}
- if (!pcmk_is_set(data_set->flags, pe_flag_shutdown_lock)) {
+ if (!pcmk_is_set(scheduler->flags, pcmk_sched_shutdown_lock)) {
if (op_list == NULL) {
// If there are no operations, there is nothing to do
return NULL;
@@ -2436,25 +2661,25 @@ unpack_lrm_resource(pe_node_t *node, const xmlNode *lrm_resource,
}
/* find the resource */
- rsc = unpack_find_resource(data_set, node, rsc_id);
+ rsc = unpack_find_resource(scheduler, node, rsc_id);
if (rsc == NULL) {
if (op_list == NULL) {
// If there are no operations, there is nothing to do
return NULL;
} else {
- rsc = process_orphan_resource(lrm_resource, node, data_set);
+ rsc = process_orphan_resource(lrm_resource, node, scheduler);
}
}
CRM_ASSERT(rsc != NULL);
// Check whether the resource is "shutdown-locked" to this node
- if (pcmk_is_set(data_set->flags, pe_flag_shutdown_lock)) {
- unpack_shutdown_lock(lrm_resource, rsc, node, data_set);
+ if (pcmk_is_set(scheduler->flags, pcmk_sched_shutdown_lock)) {
+ unpack_shutdown_lock(lrm_resource, rsc, node, scheduler);
}
/* process operations */
saved_role = rsc->role;
- rsc->role = RSC_ROLE_UNKNOWN;
+ rsc->role = pcmk_role_unknown;
sorted_op_list = g_list_sort(op_list, sort_op_by_callid);
for (gIter = sorted_op_list; gIter != NULL; gIter = gIter->next) {
@@ -2465,7 +2690,8 @@ unpack_lrm_resource(pe_node_t *node, const xmlNode *lrm_resource,
/* create active recurring operations as optional */
calculate_active_ops(sorted_op_list, &start_index, &stop_index);
- process_recurring(node, rsc, start_index, stop_index, sorted_op_list, data_set);
+ process_recurring(node, rsc, start_index, stop_index, sorted_op_list,
+ scheduler);
/* no need to free the contents */
g_list_free(sorted_op_list);
@@ -2473,7 +2699,9 @@ unpack_lrm_resource(pe_node_t *node, const xmlNode *lrm_resource,
process_rsc_state(rsc, node, on_fail);
if (get_target_role(rsc, &req_role)) {
- if (rsc->next_role == RSC_ROLE_UNKNOWN || req_role < rsc->next_role) {
+ if ((rsc->next_role == pcmk_role_unknown)
+ || (req_role < rsc->next_role)) {
+
pe__set_next_role(rsc, req_role, XML_RSC_ATTR_TARGET_ROLE);
} else if (req_role > rsc->next_role) {
@@ -2492,13 +2720,13 @@ unpack_lrm_resource(pe_node_t *node, const xmlNode *lrm_resource,
static void
handle_orphaned_container_fillers(const xmlNode *lrm_rsc_list,
- pe_working_set_t *data_set)
+ pcmk_scheduler_t *scheduler)
{
for (const xmlNode *rsc_entry = pcmk__xe_first_child(lrm_rsc_list);
rsc_entry != NULL; rsc_entry = pcmk__xe_next(rsc_entry)) {
- pe_resource_t *rsc;
- pe_resource_t *container;
+ pcmk_resource_t *rsc;
+ pcmk_resource_t *container;
const char *rsc_id;
const char *container_id;
@@ -2512,15 +2740,14 @@ handle_orphaned_container_fillers(const xmlNode *lrm_rsc_list,
continue;
}
- container = pe_find_resource(data_set->resources, container_id);
+ container = pe_find_resource(scheduler->resources, container_id);
if (container == NULL) {
continue;
}
- rsc = pe_find_resource(data_set->resources, rsc_id);
- if (rsc == NULL ||
- !pcmk_is_set(rsc->flags, pe_rsc_orphan_container_filler) ||
- rsc->container != NULL) {
+ rsc = pe_find_resource(scheduler->resources, rsc_id);
+ if ((rsc == NULL) || (rsc->container != NULL)
+ || !pcmk_is_set(rsc->flags, pcmk_rsc_removed_filler)) {
continue;
}
@@ -2535,12 +2762,13 @@ handle_orphaned_container_fillers(const xmlNode *lrm_rsc_list,
* \internal
* \brief Unpack one node's lrm status section
*
- * \param[in,out] node Node whose status is being unpacked
- * \param[in] xml CIB node state XML
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] node Node whose status is being unpacked
+ * \param[in] xml CIB node state XML
+ * \param[in,out] scheduler Scheduler data
*/
static void
-unpack_node_lrm(pe_node_t *node, const xmlNode *xml, pe_working_set_t *data_set)
+unpack_node_lrm(pcmk_node_t *node, const xmlNode *xml,
+ pcmk_scheduler_t *scheduler)
{
bool found_orphaned_container_filler = false;
@@ -2558,10 +2786,10 @@ unpack_node_lrm(pe_node_t *node, const xmlNode *xml, pe_working_set_t *data_set)
for (const xmlNode *rsc_entry = first_named_child(xml, XML_LRM_TAG_RESOURCE);
rsc_entry != NULL; rsc_entry = crm_next_same_xml(rsc_entry)) {
- pe_resource_t *rsc = unpack_lrm_resource(node, rsc_entry, data_set);
+ pcmk_resource_t *rsc = unpack_lrm_resource(node, rsc_entry, scheduler);
if ((rsc != NULL)
- && pcmk_is_set(rsc->flags, pe_rsc_orphan_container_filler)) {
+ && pcmk_is_set(rsc->flags, pcmk_rsc_removed_filler)) {
found_orphaned_container_filler = true;
}
}
@@ -2570,26 +2798,26 @@ unpack_node_lrm(pe_node_t *node, const xmlNode *xml, pe_working_set_t *data_set)
* orphaned container fillers to their container resource.
*/
if (found_orphaned_container_filler) {
- handle_orphaned_container_fillers(xml, data_set);
+ handle_orphaned_container_fillers(xml, scheduler);
}
}
static void
-set_active(pe_resource_t * rsc)
+set_active(pcmk_resource_t *rsc)
{
- const pe_resource_t *top = pe__const_top_resource(rsc, false);
+ const pcmk_resource_t *top = pe__const_top_resource(rsc, false);
- if (top && pcmk_is_set(top->flags, pe_rsc_promotable)) {
- rsc->role = RSC_ROLE_UNPROMOTED;
+ if (top && pcmk_is_set(top->flags, pcmk_rsc_promotable)) {
+ rsc->role = pcmk_role_unpromoted;
} else {
- rsc->role = RSC_ROLE_STARTED;
+ rsc->role = pcmk_role_started;
}
}
static void
set_node_score(gpointer key, gpointer value, gpointer user_data)
{
- pe_node_t *node = value;
+ pcmk_node_t *node = value;
int *score = user_data;
node->weight = *score;
@@ -2604,7 +2832,7 @@ set_node_score(gpointer key, gpointer value, gpointer user_data)
static xmlNode *
find_lrm_op(const char *resource, const char *op, const char *node, const char *source,
- int target_rc, pe_working_set_t *data_set)
+ int target_rc, pcmk_scheduler_t *scheduler)
{
GString *xpath = NULL;
xmlNode *xml = NULL;
@@ -2620,12 +2848,13 @@ find_lrm_op(const char *resource, const char *op, const char *node, const char *
NULL);
/* Need to check against transition_magic too? */
- if ((source != NULL) && (strcmp(op, CRMD_ACTION_MIGRATE) == 0)) {
+ if ((source != NULL) && (strcmp(op, PCMK_ACTION_MIGRATE_TO) == 0)) {
pcmk__g_strcat(xpath,
" and @" XML_LRM_ATTR_MIGRATE_TARGET "='", source, "']",
NULL);
- } else if ((source != NULL) && (strcmp(op, CRMD_ACTION_MIGRATED) == 0)) {
+ } else if ((source != NULL)
+ && (strcmp(op, PCMK_ACTION_MIGRATE_FROM) == 0)) {
pcmk__g_strcat(xpath,
" and @" XML_LRM_ATTR_MIGRATE_SOURCE "='", source, "']",
NULL);
@@ -2633,7 +2862,7 @@ find_lrm_op(const char *resource, const char *op, const char *node, const char *
g_string_append_c(xpath, ']');
}
- xml = get_xpath_object((const char *) xpath->str, data_set->input,
+ xml = get_xpath_object((const char *) xpath->str, scheduler->input,
LOG_DEBUG);
g_string_free(xpath, TRUE);
@@ -2652,7 +2881,7 @@ find_lrm_op(const char *resource, const char *op, const char *node, const char *
static xmlNode *
find_lrm_resource(const char *rsc_id, const char *node_name,
- pe_working_set_t *data_set)
+ pcmk_scheduler_t *scheduler)
{
GString *xpath = NULL;
xmlNode *xml = NULL;
@@ -2665,7 +2894,7 @@ find_lrm_resource(const char *rsc_id, const char *node_name,
SUB_XPATH_LRM_RESOURCE "[@" XML_ATTR_ID "='", rsc_id, "']",
NULL);
- xml = get_xpath_object((const char *) xpath->str, data_set->input,
+ xml = get_xpath_object((const char *) xpath->str, scheduler->input,
LOG_DEBUG);
g_string_free(xpath, TRUE);
@@ -2682,7 +2911,7 @@ find_lrm_resource(const char *rsc_id, const char *node_name,
* \return true if \p rsc_id is unknown on \p node_name, otherwise false
*/
static bool
-unknown_on_node(pe_resource_t *rsc, const char *node_name)
+unknown_on_node(pcmk_resource_t *rsc, const char *node_name)
{
bool result = false;
xmlXPathObjectPtr search;
@@ -2708,20 +2937,20 @@ unknown_on_node(pe_resource_t *rsc, const char *node_name)
* \param[in] node_name Node being checked
* \param[in] xml_op Event that monitor is being compared to
* \param[in] same_node Whether the operations are on the same node
- * \param[in,out] data_set Cluster working set
+ * \param[in,out] scheduler Scheduler data
*
* \return true if such a monitor happened after event, false otherwise
*/
static bool
monitor_not_running_after(const char *rsc_id, const char *node_name,
const xmlNode *xml_op, bool same_node,
- pe_working_set_t *data_set)
+ pcmk_scheduler_t *scheduler)
{
/* Any probe/monitor operation on the node indicating it was not running
* there
*/
- xmlNode *monitor = find_lrm_op(rsc_id, CRMD_ACTION_STATUS, node_name,
- NULL, PCMK_OCF_NOT_RUNNING, data_set);
+ xmlNode *monitor = find_lrm_op(rsc_id, PCMK_ACTION_MONITOR, node_name,
+ NULL, PCMK_OCF_NOT_RUNNING, scheduler);
return (monitor && pe__is_newer_op(monitor, xml_op, same_node) > 0);
}
@@ -2730,22 +2959,22 @@ monitor_not_running_after(const char *rsc_id, const char *node_name,
* \brief Check whether any non-monitor operation on a node happened after some
* event
*
- * \param[in] rsc_id Resource being checked
- * \param[in] node_name Node being checked
- * \param[in] xml_op Event that non-monitor is being compared to
- * \param[in] same_node Whether the operations are on the same node
- * \param[in,out] data_set Cluster working set
+ * \param[in] rsc_id Resource being checked
+ * \param[in] node_name Node being checked
+ * \param[in] xml_op Event that non-monitor is being compared to
+ * \param[in] same_node Whether the operations are on the same node
+ * \param[in,out] scheduler Scheduler data
*
* \return true if such a operation happened after event, false otherwise
*/
static bool
non_monitor_after(const char *rsc_id, const char *node_name,
const xmlNode *xml_op, bool same_node,
- pe_working_set_t *data_set)
+ pcmk_scheduler_t *scheduler)
{
xmlNode *lrm_resource = NULL;
- lrm_resource = find_lrm_resource(rsc_id, node_name, data_set);
+ lrm_resource = find_lrm_resource(rsc_id, node_name, scheduler);
if (lrm_resource == NULL) {
return false;
}
@@ -2760,8 +2989,9 @@ non_monitor_after(const char *rsc_id, const char *node_name,
task = crm_element_value(op, XML_LRM_ATTR_TASK);
- if (pcmk__str_any_of(task, CRMD_ACTION_START, CRMD_ACTION_STOP,
- CRMD_ACTION_MIGRATE, CRMD_ACTION_MIGRATED, NULL)
+ if (pcmk__str_any_of(task, PCMK_ACTION_START, PCMK_ACTION_STOP,
+ PCMK_ACTION_MIGRATE_TO, PCMK_ACTION_MIGRATE_FROM,
+ NULL)
&& pe__is_newer_op(op, xml_op, same_node) > 0) {
return true;
}
@@ -2774,11 +3004,11 @@ non_monitor_after(const char *rsc_id, const char *node_name,
* \brief Check whether the resource has newer state on a node after a migration
* attempt
*
- * \param[in] rsc_id Resource being checked
- * \param[in] node_name Node being checked
- * \param[in] migrate_to Any migrate_to event that is being compared to
- * \param[in] migrate_from Any migrate_from event that is being compared to
- * \param[in,out] data_set Cluster working set
+ * \param[in] rsc_id Resource being checked
+ * \param[in] node_name Node being checked
+ * \param[in] migrate_to Any migrate_to event that is being compared to
+ * \param[in] migrate_from Any migrate_from event that is being compared to
+ * \param[in,out] scheduler Scheduler data
*
* \return true if such a operation happened after event, false otherwise
*/
@@ -2786,7 +3016,7 @@ static bool
newer_state_after_migrate(const char *rsc_id, const char *node_name,
const xmlNode *migrate_to,
const xmlNode *migrate_from,
- pe_working_set_t *data_set)
+ pcmk_scheduler_t *scheduler)
{
const xmlNode *xml_op = migrate_to;
const char *source = NULL;
@@ -2826,9 +3056,9 @@ newer_state_after_migrate(const char *rsc_id, const char *node_name,
* probe/monitor operation on the node indicating it was not running there,
* the migration events potentially no longer matter for the node.
*/
- return non_monitor_after(rsc_id, node_name, xml_op, same_node, data_set)
+ return non_monitor_after(rsc_id, node_name, xml_op, same_node, scheduler)
|| monitor_not_running_after(rsc_id, node_name, xml_op, same_node,
- data_set);
+ scheduler);
}
/*!
@@ -2844,8 +3074,8 @@ newer_state_after_migrate(const char *rsc_id, const char *node_name,
* \return Standard Pacemaker return code
*/
static int
-get_migration_node_names(const xmlNode *entry, const pe_node_t *source_node,
- const pe_node_t *target_node,
+get_migration_node_names(const xmlNode *entry, const pcmk_node_t *source_node,
+ const pcmk_node_t *target_node,
const char **source_name, const char **target_name)
{
*source_name = crm_element_value(entry, XML_LRM_ATTR_MIGRATE_SOURCE);
@@ -2891,11 +3121,11 @@ get_migration_node_names(const xmlNode *entry, const pe_node_t *source_node,
* \param[in] node Migration source
*/
static void
-add_dangling_migration(pe_resource_t *rsc, const pe_node_t *node)
+add_dangling_migration(pcmk_resource_t *rsc, const pcmk_node_t *node)
{
pe_rsc_trace(rsc, "Dangling migration of %s requires stop on %s",
rsc->id, pe__node_name(node));
- rsc->role = RSC_ROLE_STOPPED;
+ rsc->role = pcmk_role_stopped;
rsc->dangling_migrations = g_list_prepend(rsc->dangling_migrations,
(gpointer) node);
}
@@ -2942,7 +3172,7 @@ unpack_migrate_to_success(struct action_history *history)
*/
int from_rc = PCMK_OCF_OK;
int from_status = PCMK_EXEC_PENDING;
- pe_node_t *target_node = NULL;
+ pcmk_node_t *target_node = NULL;
xmlNode *migrate_from = NULL;
const char *source = NULL;
const char *target = NULL;
@@ -2961,8 +3191,8 @@ unpack_migrate_to_success(struct action_history *history)
true, history->rsc->cluster);
// Check for a migrate_from action from this source on the target
- migrate_from = find_lrm_op(history->rsc->id, CRMD_ACTION_MIGRATED, target,
- source, -1, history->rsc->cluster);
+ migrate_from = find_lrm_op(history->rsc->id, PCMK_ACTION_MIGRATE_FROM,
+ target, source, -1, history->rsc->cluster);
if (migrate_from != NULL) {
if (source_newer_op) {
/* There's a newer non-monitor operation on the source and a
@@ -2998,7 +3228,7 @@ unpack_migrate_to_success(struct action_history *history)
/* Without newer state, this migrate_to implies the resource is active.
* (Clones are not allowed to migrate, so role can't be promoted.)
*/
- history->rsc->role = RSC_ROLE_STARTED;
+ history->rsc->role = pcmk_role_started;
target_node = pe_find_node(history->rsc->cluster->nodes, target);
active_on_target = !target_newer_state && (target_node != NULL)
@@ -3010,8 +3240,9 @@ unpack_migrate_to_success(struct action_history *history)
TRUE);
} else {
// Mark resource as failed, require recovery, and prevent migration
- pe__set_resource_flags(history->rsc, pe_rsc_failed|pe_rsc_stop);
- pe__clear_resource_flags(history->rsc, pe_rsc_allow_migrate);
+ pe__set_resource_flags(history->rsc,
+ pcmk_rsc_failed|pcmk_rsc_stop_if_failed);
+ pe__clear_resource_flags(history->rsc, pcmk_rsc_migratable);
}
return;
}
@@ -3028,8 +3259,8 @@ unpack_migrate_to_success(struct action_history *history)
}
if (active_on_target) {
- pe_node_t *source_node = pe_find_node(history->rsc->cluster->nodes,
- source);
+ pcmk_node_t *source_node = pe_find_node(history->rsc->cluster->nodes,
+ source);
native_add_running(history->rsc, target_node, history->rsc->cluster,
FALSE);
@@ -3046,8 +3277,9 @@ unpack_migrate_to_success(struct action_history *history)
} else if (!source_newer_op) {
// Mark resource as failed, require recovery, and prevent migration
- pe__set_resource_flags(history->rsc, pe_rsc_failed|pe_rsc_stop);
- pe__clear_resource_flags(history->rsc, pe_rsc_allow_migrate);
+ pe__set_resource_flags(history->rsc,
+ pcmk_rsc_failed|pcmk_rsc_stop_if_failed);
+ pe__clear_resource_flags(history->rsc, pcmk_rsc_migratable);
}
}
@@ -3073,12 +3305,12 @@ unpack_migrate_to_failure(struct action_history *history)
/* If a migration failed, we have to assume the resource is active. Clones
* are not allowed to migrate, so role can't be promoted.
*/
- history->rsc->role = RSC_ROLE_STARTED;
+ history->rsc->role = pcmk_role_started;
// Check for migrate_from on the target
- target_migrate_from = find_lrm_op(history->rsc->id, CRMD_ACTION_MIGRATED,
- target, source, PCMK_OCF_OK,
- history->rsc->cluster);
+ target_migrate_from = find_lrm_op(history->rsc->id,
+ PCMK_ACTION_MIGRATE_FROM, target, source,
+ PCMK_OCF_OK, history->rsc->cluster);
if (/* If the resource state is unknown on the target, it will likely be
* probed there.
@@ -3096,8 +3328,8 @@ unpack_migrate_to_failure(struct action_history *history)
* active there.
* (if it is up).
*/
- pe_node_t *target_node = pe_find_node(history->rsc->cluster->nodes,
- target);
+ pcmk_node_t *target_node = pe_find_node(history->rsc->cluster->nodes,
+ target);
if (target_node && target_node->details->online) {
native_add_running(history->rsc, target_node, history->rsc->cluster,
@@ -3140,10 +3372,10 @@ unpack_migrate_from_failure(struct action_history *history)
/* If a migration failed, we have to assume the resource is active. Clones
* are not allowed to migrate, so role can't be promoted.
*/
- history->rsc->role = RSC_ROLE_STARTED;
+ history->rsc->role = pcmk_role_started;
// Check for a migrate_to on the source
- source_migrate_to = find_lrm_op(history->rsc->id, CRMD_ACTION_MIGRATE,
+ source_migrate_to = find_lrm_op(history->rsc->id, PCMK_ACTION_MIGRATE_TO,
source, target, PCMK_OCF_OK,
history->rsc->cluster);
@@ -3162,8 +3394,8 @@ unpack_migrate_from_failure(struct action_history *history)
/* The resource has no newer state on the source, so assume it's still
* active there (if it is up).
*/
- pe_node_t *source_node = pe_find_node(history->rsc->cluster->nodes,
- source);
+ pcmk_node_t *source_node = pe_find_node(history->rsc->cluster->nodes,
+ source);
if (source_node && source_node->details->online) {
native_add_running(history->rsc, source_node, history->rsc->cluster,
@@ -3250,38 +3482,38 @@ static int
cmp_on_fail(enum action_fail_response first, enum action_fail_response second)
{
switch (first) {
- case action_fail_demote:
+ case pcmk_on_fail_demote:
switch (second) {
- case action_fail_ignore:
+ case pcmk_on_fail_ignore:
return 1;
- case action_fail_demote:
+ case pcmk_on_fail_demote:
return 0;
default:
return -1;
}
break;
- case action_fail_reset_remote:
+ case pcmk_on_fail_reset_remote:
switch (second) {
- case action_fail_ignore:
- case action_fail_demote:
- case action_fail_recover:
+ case pcmk_on_fail_ignore:
+ case pcmk_on_fail_demote:
+ case pcmk_on_fail_restart:
return 1;
- case action_fail_reset_remote:
+ case pcmk_on_fail_reset_remote:
return 0;
default:
return -1;
}
break;
- case action_fail_restart_container:
+ case pcmk_on_fail_restart_container:
switch (second) {
- case action_fail_ignore:
- case action_fail_demote:
- case action_fail_recover:
- case action_fail_reset_remote:
+ case pcmk_on_fail_ignore:
+ case pcmk_on_fail_demote:
+ case pcmk_on_fail_restart:
+ case pcmk_on_fail_reset_remote:
return 1;
- case action_fail_restart_container:
+ case pcmk_on_fail_restart_container:
return 0;
default:
return -1;
@@ -3292,26 +3524,26 @@ cmp_on_fail(enum action_fail_response first, enum action_fail_response second)
break;
}
switch (second) {
- case action_fail_demote:
- return (first == action_fail_ignore)? -1 : 1;
+ case pcmk_on_fail_demote:
+ return (first == pcmk_on_fail_ignore)? -1 : 1;
- case action_fail_reset_remote:
+ case pcmk_on_fail_reset_remote:
switch (first) {
- case action_fail_ignore:
- case action_fail_demote:
- case action_fail_recover:
+ case pcmk_on_fail_ignore:
+ case pcmk_on_fail_demote:
+ case pcmk_on_fail_restart:
return -1;
default:
return 1;
}
break;
- case action_fail_restart_container:
+ case pcmk_on_fail_restart_container:
switch (first) {
- case action_fail_ignore:
- case action_fail_demote:
- case action_fail_recover:
- case action_fail_reset_remote:
+ case pcmk_on_fail_ignore:
+ case pcmk_on_fail_demote:
+ case pcmk_on_fail_restart:
+ case pcmk_on_fail_reset_remote:
return -1;
default:
return 1;
@@ -3331,13 +3563,13 @@ cmp_on_fail(enum action_fail_response first, enum action_fail_response second)
* \param[in,out] rsc Resource to ban
*/
static void
-ban_from_all_nodes(pe_resource_t *rsc)
+ban_from_all_nodes(pcmk_resource_t *rsc)
{
int score = -INFINITY;
- pe_resource_t *fail_rsc = rsc;
+ pcmk_resource_t *fail_rsc = rsc;
if (fail_rsc->parent != NULL) {
- pe_resource_t *parent = uber_parent(fail_rsc);
+ pcmk_resource_t *parent = uber_parent(fail_rsc);
if (pe_rsc_is_anon_clone(parent)) {
/* For anonymous clones, if an operation with on-fail=stop fails for
@@ -3358,18 +3590,50 @@ ban_from_all_nodes(pe_resource_t *rsc)
/*!
* \internal
+ * \brief Get configured failure handling and role after failure for an action
+ *
+ * \param[in,out] history Unpacked action history entry
+ * \param[out] on_fail Where to set configured failure handling
+ * \param[out] fail_role Where to set to role after failure
+ */
+static void
+unpack_failure_handling(struct action_history *history,
+ enum action_fail_response *on_fail,
+ enum rsc_role_e *fail_role)
+{
+ xmlNode *config = pcmk__find_action_config(history->rsc, history->task,
+ history->interval_ms, true);
+
+ GHashTable *meta = pcmk__unpack_action_meta(history->rsc, history->node,
+ history->task,
+ history->interval_ms, config);
+
+ const char *on_fail_str = g_hash_table_lookup(meta, XML_OP_ATTR_ON_FAIL);
+
+ *on_fail = pcmk__parse_on_fail(history->rsc, history->task,
+ history->interval_ms, on_fail_str);
+ *fail_role = pcmk__role_after_failure(history->rsc, history->task, *on_fail,
+ meta);
+ g_hash_table_destroy(meta);
+}
+
+/*!
+ * \internal
* \brief Update resource role, failure handling, etc., after a failed action
*
- * \param[in,out] history Parsed action result history
- * \param[out] last_failure Set this to action XML
- * \param[in,out] on_fail What should be done about the result
+ * \param[in,out] history Parsed action result history
+ * \param[in] config_on_fail Action failure handling from configuration
+ * \param[in] fail_role Resource's role after failure of this action
+ * \param[out] last_failure This will be set to the history XML
+ * \param[in,out] on_fail Actual handling of action result
*/
static void
-unpack_rsc_op_failure(struct action_history *history, xmlNode **last_failure,
+unpack_rsc_op_failure(struct action_history *history,
+ enum action_fail_response config_on_fail,
+ enum rsc_role_e fail_role, xmlNode **last_failure,
enum action_fail_response *on_fail)
{
bool is_probe = false;
- pe_action_t *action = NULL;
char *last_change_s = NULL;
*last_failure = history->xml;
@@ -3377,7 +3641,7 @@ unpack_rsc_op_failure(struct action_history *history, xmlNode **last_failure,
is_probe = pcmk_xe_is_probe(history->xml);
last_change_s = last_change_str(history->xml);
- if (!pcmk_is_set(history->rsc->cluster->flags, pe_flag_symmetric_cluster)
+ if (!pcmk_is_set(history->rsc->cluster->flags, pcmk_sched_symmetric_cluster)
&& (history->exit_status == PCMK_OCF_NOT_INSTALLED)) {
crm_trace("Unexpected result (%s%s%s) was recorded for "
"%s of %s on %s at %s " CRM_XS " exit-status=%d id=%s",
@@ -3414,36 +3678,34 @@ unpack_rsc_op_failure(struct action_history *history, xmlNode **last_failure,
free(last_change_s);
- action = custom_action(history->rsc, strdup(history->key), history->task,
- NULL, TRUE, FALSE, history->rsc->cluster);
- if (cmp_on_fail(*on_fail, action->on_fail) < 0) {
- pe_rsc_trace(history->rsc, "on-fail %s -> %s for %s (%s)",
- fail2text(*on_fail), fail2text(action->on_fail),
- action->uuid, history->key);
- *on_fail = action->on_fail;
+ if (cmp_on_fail(*on_fail, config_on_fail) < 0) {
+ pe_rsc_trace(history->rsc, "on-fail %s -> %s for %s",
+ fail2text(*on_fail), fail2text(config_on_fail),
+ history->key);
+ *on_fail = config_on_fail;
}
- if (strcmp(history->task, CRMD_ACTION_STOP) == 0) {
+ if (strcmp(history->task, PCMK_ACTION_STOP) == 0) {
resource_location(history->rsc, history->node, -INFINITY,
"__stop_fail__", history->rsc->cluster);
- } else if (strcmp(history->task, CRMD_ACTION_MIGRATE) == 0) {
+ } else if (strcmp(history->task, PCMK_ACTION_MIGRATE_TO) == 0) {
unpack_migrate_to_failure(history);
- } else if (strcmp(history->task, CRMD_ACTION_MIGRATED) == 0) {
+ } else if (strcmp(history->task, PCMK_ACTION_MIGRATE_FROM) == 0) {
unpack_migrate_from_failure(history);
- } else if (strcmp(history->task, CRMD_ACTION_PROMOTE) == 0) {
- history->rsc->role = RSC_ROLE_PROMOTED;
+ } else if (strcmp(history->task, PCMK_ACTION_PROMOTE) == 0) {
+ history->rsc->role = pcmk_role_promoted;
- } else if (strcmp(history->task, CRMD_ACTION_DEMOTE) == 0) {
- if (action->on_fail == action_fail_block) {
- history->rsc->role = RSC_ROLE_PROMOTED;
- pe__set_next_role(history->rsc, RSC_ROLE_STOPPED,
+ } else if (strcmp(history->task, PCMK_ACTION_DEMOTE) == 0) {
+ if (config_on_fail == pcmk_on_fail_block) {
+ history->rsc->role = pcmk_role_promoted;
+ pe__set_next_role(history->rsc, pcmk_role_stopped,
"demote with on-fail=block");
} else if (history->exit_status == PCMK_OCF_NOT_RUNNING) {
- history->rsc->role = RSC_ROLE_STOPPED;
+ history->rsc->role = pcmk_role_stopped;
} else {
/* Staying in the promoted role would put the scheduler and
@@ -3451,16 +3713,16 @@ unpack_rsc_op_failure(struct action_history *history, xmlNode **last_failure,
* dangerous because the resource will be stopped as part of
* recovery, and any promotion will be ordered after that stop.
*/
- history->rsc->role = RSC_ROLE_UNPROMOTED;
+ history->rsc->role = pcmk_role_unpromoted;
}
}
if (is_probe && (history->exit_status == PCMK_OCF_NOT_INSTALLED)) {
/* leave stopped */
pe_rsc_trace(history->rsc, "Leaving %s stopped", history->rsc->id);
- history->rsc->role = RSC_ROLE_STOPPED;
+ history->rsc->role = pcmk_role_stopped;
- } else if (history->rsc->role < RSC_ROLE_STARTED) {
+ } else if (history->rsc->role < pcmk_role_started) {
pe_rsc_trace(history->rsc, "Setting %s active", history->rsc->id);
set_active(history->rsc);
}
@@ -3469,18 +3731,16 @@ unpack_rsc_op_failure(struct action_history *history, xmlNode **last_failure,
"Resource %s: role=%s, unclean=%s, on_fail=%s, fail_role=%s",
history->rsc->id, role2text(history->rsc->role),
pcmk__btoa(history->node->details->unclean),
- fail2text(action->on_fail), role2text(action->fail_role));
+ fail2text(config_on_fail), role2text(fail_role));
- if ((action->fail_role != RSC_ROLE_STARTED)
- && (history->rsc->next_role < action->fail_role)) {
- pe__set_next_role(history->rsc, action->fail_role, "failure");
+ if ((fail_role != pcmk_role_started)
+ && (history->rsc->next_role < fail_role)) {
+ pe__set_next_role(history->rsc, fail_role, "failure");
}
- if (action->fail_role == RSC_ROLE_STOPPED) {
+ if (fail_role == pcmk_role_stopped) {
ban_from_all_nodes(history->rsc);
}
-
- pe_free_action(action);
}
/*!
@@ -3497,7 +3757,7 @@ block_if_unrecoverable(struct action_history *history)
{
char *last_change_s = NULL;
- if (strcmp(history->task, CRMD_ACTION_STOP) != 0) {
+ if (strcmp(history->task, PCMK_ACTION_STOP) != 0) {
return; // All actions besides stop are always recoverable
}
if (pe_can_fence(history->node->details->data_set, history->node)) {
@@ -3516,8 +3776,8 @@ block_if_unrecoverable(struct action_history *history)
free(last_change_s);
- pe__clear_resource_flags(history->rsc, pe_rsc_managed);
- pe__set_resource_flags(history->rsc, pe_rsc_block);
+ pe__clear_resource_flags(history->rsc, pcmk_rsc_managed);
+ pe__set_resource_flags(history->rsc, pcmk_rsc_blocked);
}
/*!
@@ -3556,8 +3816,8 @@ remap_because(struct action_history *history, const char **why, int value,
* \param[in] expired Whether result is expired
*
* \note If the result is remapped and the node is not shutting down or failed,
- * the operation will be recorded in the data set's list of failed operations
- * to highlight it for the user.
+ * the operation will be recorded in the scheduler data's list of failed
+ * operations to highlight it for the user.
*
* \note This may update the resource's current and next role.
*/
@@ -3664,16 +3924,16 @@ remap_operation(struct action_history *history,
case PCMK_OCF_NOT_RUNNING:
if (is_probe
|| (history->expected_exit_status == history->exit_status)
- || !pcmk_is_set(history->rsc->flags, pe_rsc_managed)) {
+ || !pcmk_is_set(history->rsc->flags, pcmk_rsc_managed)) {
/* For probes, recurring monitors for the Stopped role, and
* unmanaged resources, "not running" is not considered a
* failure.
*/
remap_because(history, &why, PCMK_EXEC_DONE, "exit status");
- history->rsc->role = RSC_ROLE_STOPPED;
- *on_fail = action_fail_ignore;
- pe__set_next_role(history->rsc, RSC_ROLE_UNKNOWN,
+ history->rsc->role = pcmk_role_stopped;
+ *on_fail = pcmk_on_fail_ignore;
+ pe__set_next_role(history->rsc, pcmk_role_unknown,
"not running");
}
break;
@@ -3692,13 +3952,13 @@ remap_operation(struct action_history *history,
}
if (!expired
|| (history->exit_status == history->expected_exit_status)) {
- history->rsc->role = RSC_ROLE_PROMOTED;
+ history->rsc->role = pcmk_role_promoted;
}
break;
case PCMK_OCF_FAILED_PROMOTED:
if (!expired) {
- history->rsc->role = RSC_ROLE_PROMOTED;
+ history->rsc->role = pcmk_role_promoted;
}
remap_because(history, &why, PCMK_EXEC_ERROR, "exit status");
break;
@@ -3765,16 +4025,15 @@ remap_done:
// return TRUE if start or monitor last failure but parameters changed
static bool
should_clear_for_param_change(const xmlNode *xml_op, const char *task,
- pe_resource_t *rsc, pe_node_t *node)
+ pcmk_resource_t *rsc, pcmk_node_t *node)
{
- if (!strcmp(task, "start") || !strcmp(task, "monitor")) {
-
+ if (pcmk__str_any_of(task, PCMK_ACTION_START, PCMK_ACTION_MONITOR, NULL)) {
if (pe__bundle_needs_remote_name(rsc)) {
/* We haven't allocated resources yet, so we can't reliably
* substitute addr parameters for the REMOTE_CONTAINER_HACK.
* When that's needed, defer the check until later.
*/
- pe__add_param_check(xml_op, rsc, node, pe_check_last_failure,
+ pe__add_param_check(xml_op, rsc, node, pcmk__check_last_failure,
rsc->cluster);
} else {
@@ -3783,13 +4042,13 @@ should_clear_for_param_change(const xmlNode *xml_op, const char *task,
digest_data = rsc_action_digest_cmp(rsc, xml_op, node,
rsc->cluster);
switch (digest_data->rc) {
- case RSC_DIGEST_UNKNOWN:
+ case pcmk__digest_unknown:
crm_trace("Resource %s history entry %s on %s"
" has no digest to compare",
rsc->id, pe__xe_history_key(xml_op),
node->details->id);
break;
- case RSC_DIGEST_MATCH:
+ case pcmk__digest_match:
break;
default:
return TRUE;
@@ -3801,21 +4060,21 @@ should_clear_for_param_change(const xmlNode *xml_op, const char *task,
// Order action after fencing of remote node, given connection rsc
static void
-order_after_remote_fencing(pe_action_t *action, pe_resource_t *remote_conn,
- pe_working_set_t *data_set)
+order_after_remote_fencing(pcmk_action_t *action, pcmk_resource_t *remote_conn,
+ pcmk_scheduler_t *scheduler)
{
- pe_node_t *remote_node = pe_find_node(data_set->nodes, remote_conn->id);
+ pcmk_node_t *remote_node = pe_find_node(scheduler->nodes, remote_conn->id);
if (remote_node) {
- pe_action_t *fence = pe_fence_op(remote_node, NULL, TRUE, NULL,
- FALSE, data_set);
+ pcmk_action_t *fence = pe_fence_op(remote_node, NULL, TRUE, NULL,
+ FALSE, scheduler);
- order_actions(fence, action, pe_order_implies_then);
+ order_actions(fence, action, pcmk__ar_first_implies_then);
}
}
static bool
-should_ignore_failure_timeout(const pe_resource_t *rsc, const char *task,
+should_ignore_failure_timeout(const pcmk_resource_t *rsc, const char *task,
guint interval_ms, bool is_last_failure)
{
/* Clearing failures of recurring monitors has special concerns. The
@@ -3839,10 +4098,11 @@ should_ignore_failure_timeout(const pe_resource_t *rsc, const char *task,
* if the remote node hasn't been fenced.
*/
if (rsc->remote_reconnect_ms
- && pcmk_is_set(rsc->cluster->flags, pe_flag_stonith_enabled)
- && (interval_ms != 0) && pcmk__str_eq(task, CRMD_ACTION_STATUS, pcmk__str_casei)) {
+ && pcmk_is_set(rsc->cluster->flags, pcmk_sched_fencing_enabled)
+ && (interval_ms != 0)
+ && pcmk__str_eq(task, PCMK_ACTION_MONITOR, pcmk__str_casei)) {
- pe_node_t *remote_node = pe_find_node(rsc->cluster->nodes, rsc->id);
+ pcmk_node_t *remote_node = pe_find_node(rsc->cluster->nodes, rsc->id);
if (remote_node && !remote_node->details->remote_was_fenced) {
if (is_last_failure) {
@@ -3909,7 +4169,8 @@ check_operation_expiry(struct action_history *history)
// Does the resource as a whole have an unexpired fail count?
unexpired_fail_count = pe_get_failcount(history->node, history->rsc,
- &last_failure, pe_fc_effective,
+ &last_failure,
+ pcmk__fc_effective,
history->xml);
// Update scheduler recheck time according to *last* failure
@@ -3920,13 +4181,14 @@ check_operation_expiry(struct action_history *history)
history->rsc->failure_timeout, (long long) last_failure);
last_failure += history->rsc->failure_timeout + 1;
if (unexpired_fail_count && (now < last_failure)) {
- pe__update_recheck_time(last_failure, history->rsc->cluster);
+ pe__update_recheck_time(last_failure, history->rsc->cluster,
+ "fail count expiration");
}
}
if (expired) {
- if (pe_get_failcount(history->node, history->rsc, NULL, pe_fc_default,
- history->xml)) {
+ if (pe_get_failcount(history->node, history->rsc, NULL,
+ pcmk__fc_default, history->xml)) {
// There is a fail count ignoring timeout
if (unexpired_fail_count == 0) {
@@ -3963,12 +4225,14 @@ check_operation_expiry(struct action_history *history)
}
if (clear_reason != NULL) {
+ pcmk_action_t *clear_op = NULL;
+
// Schedule clearing of the fail count
- pe_action_t *clear_op = pe__clear_failcount(history->rsc, history->node,
- clear_reason,
- history->rsc->cluster);
+ clear_op = pe__clear_failcount(history->rsc, history->node,
+ clear_reason, history->rsc->cluster);
- if (pcmk_is_set(history->rsc->cluster->flags, pe_flag_stonith_enabled)
+ if (pcmk_is_set(history->rsc->cluster->flags,
+ pcmk_sched_fencing_enabled)
&& (history->rsc->remote_reconnect_ms != 0)) {
/* If we're clearing a remote connection due to a reconnect
* interval, we want to wait until any scheduled fencing
@@ -3987,7 +4251,7 @@ check_operation_expiry(struct action_history *history)
}
if (expired && (history->interval_ms == 0)
- && pcmk__str_eq(history->task, CRMD_ACTION_STATUS, pcmk__str_none)) {
+ && pcmk__str_eq(history->task, PCMK_ACTION_MONITOR, pcmk__str_none)) {
switch (history->exit_status) {
case PCMK_OCF_OK:
case PCMK_OCF_NOT_RUNNING:
@@ -4022,27 +4286,6 @@ pe__target_rc_from_xml(const xmlNode *xml_op)
/*!
* \internal
- * \brief Get the failure handling for an action
- *
- * \param[in,out] history Parsed action history entry
- *
- * \return Failure handling appropriate to action
- */
-static enum action_fail_response
-get_action_on_fail(struct action_history *history)
-{
- enum action_fail_response result = action_fail_recover;
- pe_action_t *action = custom_action(history->rsc, strdup(history->key),
- history->task, NULL, TRUE, FALSE,
- history->rsc->cluster);
-
- result = action->on_fail;
- pe_free_action(action);
- return result;
-}
-
-/*!
- * \internal
* \brief Update a resource's state for an action result
*
* \param[in,out] history Parsed action history entry
@@ -4060,53 +4303,53 @@ update_resource_state(struct action_history *history, int exit_status,
if ((exit_status == PCMK_OCF_NOT_INSTALLED)
|| (!pe_rsc_is_bundled(history->rsc)
&& pcmk_xe_mask_probe_failure(history->xml))) {
- history->rsc->role = RSC_ROLE_STOPPED;
+ history->rsc->role = pcmk_role_stopped;
} else if (exit_status == PCMK_OCF_NOT_RUNNING) {
clear_past_failure = true;
- } else if (pcmk__str_eq(history->task, CRMD_ACTION_STATUS,
+ } else if (pcmk__str_eq(history->task, PCMK_ACTION_MONITOR,
pcmk__str_none)) {
if ((last_failure != NULL)
&& pcmk__str_eq(history->key, pe__xe_history_key(last_failure),
pcmk__str_none)) {
clear_past_failure = true;
}
- if (history->rsc->role < RSC_ROLE_STARTED) {
+ if (history->rsc->role < pcmk_role_started) {
set_active(history->rsc);
}
- } else if (pcmk__str_eq(history->task, CRMD_ACTION_START, pcmk__str_none)) {
- history->rsc->role = RSC_ROLE_STARTED;
+ } else if (pcmk__str_eq(history->task, PCMK_ACTION_START, pcmk__str_none)) {
+ history->rsc->role = pcmk_role_started;
clear_past_failure = true;
- } else if (pcmk__str_eq(history->task, CRMD_ACTION_STOP, pcmk__str_none)) {
- history->rsc->role = RSC_ROLE_STOPPED;
+ } else if (pcmk__str_eq(history->task, PCMK_ACTION_STOP, pcmk__str_none)) {
+ history->rsc->role = pcmk_role_stopped;
clear_past_failure = true;
- } else if (pcmk__str_eq(history->task, CRMD_ACTION_PROMOTE,
+ } else if (pcmk__str_eq(history->task, PCMK_ACTION_PROMOTE,
pcmk__str_none)) {
- history->rsc->role = RSC_ROLE_PROMOTED;
+ history->rsc->role = pcmk_role_promoted;
clear_past_failure = true;
- } else if (pcmk__str_eq(history->task, CRMD_ACTION_DEMOTE,
+ } else if (pcmk__str_eq(history->task, PCMK_ACTION_DEMOTE,
pcmk__str_none)) {
- if (*on_fail == action_fail_demote) {
+ if (*on_fail == pcmk_on_fail_demote) {
// Demote clears an error only if on-fail=demote
clear_past_failure = true;
}
- history->rsc->role = RSC_ROLE_UNPROMOTED;
+ history->rsc->role = pcmk_role_unpromoted;
- } else if (pcmk__str_eq(history->task, CRMD_ACTION_MIGRATED,
+ } else if (pcmk__str_eq(history->task, PCMK_ACTION_MIGRATE_FROM,
pcmk__str_none)) {
- history->rsc->role = RSC_ROLE_STARTED;
+ history->rsc->role = pcmk_role_started;
clear_past_failure = true;
- } else if (pcmk__str_eq(history->task, CRMD_ACTION_MIGRATE,
+ } else if (pcmk__str_eq(history->task, PCMK_ACTION_MIGRATE_TO,
pcmk__str_none)) {
unpack_migrate_to_success(history);
- } else if (history->rsc->role < RSC_ROLE_STARTED) {
+ } else if (history->rsc->role < pcmk_role_started) {
pe_rsc_trace(history->rsc, "%s active on %s",
history->rsc->id, pe__node_name(history->node));
set_active(history->rsc);
@@ -4117,26 +4360,26 @@ update_resource_state(struct action_history *history, int exit_status,
}
switch (*on_fail) {
- case action_fail_stop:
- case action_fail_fence:
- case action_fail_migrate:
- case action_fail_standby:
+ case pcmk_on_fail_stop:
+ case pcmk_on_fail_ban:
+ case pcmk_on_fail_standby_node:
+ case pcmk_on_fail_fence_node:
pe_rsc_trace(history->rsc,
"%s (%s) is not cleared by a completed %s",
history->rsc->id, fail2text(*on_fail), history->task);
break;
- case action_fail_block:
- case action_fail_ignore:
- case action_fail_demote:
- case action_fail_recover:
- case action_fail_restart_container:
- *on_fail = action_fail_ignore;
- pe__set_next_role(history->rsc, RSC_ROLE_UNKNOWN,
+ case pcmk_on_fail_block:
+ case pcmk_on_fail_ignore:
+ case pcmk_on_fail_demote:
+ case pcmk_on_fail_restart:
+ case pcmk_on_fail_restart_container:
+ *on_fail = pcmk_on_fail_ignore;
+ pe__set_next_role(history->rsc, pcmk_role_unknown,
"clear past failures");
break;
- case action_fail_reset_remote:
+ case pcmk_on_fail_reset_remote:
if (history->rsc->remote_reconnect_ms == 0) {
/* With no reconnect interval, the connection is allowed to
* start again after the remote node is fenced and
@@ -4144,8 +4387,8 @@ update_resource_state(struct action_history *history, int exit_status,
* for the failure to be cleared entirely before attempting
* to reconnect.)
*/
- *on_fail = action_fail_ignore;
- pe__set_next_role(history->rsc, RSC_ROLE_UNKNOWN,
+ *on_fail = pcmk_on_fail_ignore;
+ pe__set_next_role(history->rsc, pcmk_role_unknown,
"clear past failures and reset remote");
}
break;
@@ -4170,14 +4413,14 @@ can_affect_state(struct action_history *history)
* Currently, unknown operations can affect whether a resource is considered
* active and/or failed.
*/
- return pcmk__str_any_of(history->task, CRMD_ACTION_STATUS,
- CRMD_ACTION_START, CRMD_ACTION_STOP,
- CRMD_ACTION_PROMOTE, CRMD_ACTION_DEMOTE,
- CRMD_ACTION_MIGRATE, CRMD_ACTION_MIGRATED,
+ return pcmk__str_any_of(history->task, PCMK_ACTION_MONITOR,
+ PCMK_ACTION_START, PCMK_ACTION_STOP,
+ PCMK_ACTION_PROMOTE, PCMK_ACTION_DEMOTE,
+ PCMK_ACTION_MIGRATE_TO, PCMK_ACTION_MIGRATE_FROM,
"asyncmon", NULL);
#else
- return !pcmk__str_any_of(history->task, CRMD_ACTION_NOTIFY,
- CRMD_ACTION_METADATA, NULL);
+ return !pcmk__str_any_of(history->task, PCMK_ACTION_NOTIFY,
+ PCMK_ACTION_META_DATA, NULL);
#endif
}
@@ -4244,8 +4487,8 @@ process_expired_result(struct action_history *history, int orig_exit_status)
&& pcmk_xe_mask_probe_failure(history->xml)
&& (orig_exit_status != history->expected_exit_status)) {
- if (history->rsc->role <= RSC_ROLE_STOPPED) {
- history->rsc->role = RSC_ROLE_UNKNOWN;
+ if (history->rsc->role <= pcmk_role_stopped) {
+ history->rsc->role = pcmk_role_unknown;
}
crm_trace("Ignoring resource history entry %s for probe of %s on %s: "
"Masked failure expired",
@@ -4303,9 +4546,9 @@ mask_probe_failure(struct action_history *history, int orig_exit_status,
const xmlNode *last_failure,
enum action_fail_response *on_fail)
{
- pe_resource_t *ban_rsc = history->rsc;
+ pcmk_resource_t *ban_rsc = history->rsc;
- if (!pcmk_is_set(history->rsc->flags, pe_rsc_unique)) {
+ if (!pcmk_is_set(history->rsc->flags, pcmk_rsc_unique)) {
ban_rsc = uber_parent(history->rsc);
}
@@ -4392,20 +4635,20 @@ process_pending_action(struct action_history *history,
return;
}
- if (strcmp(history->task, CRMD_ACTION_START) == 0) {
- pe__set_resource_flags(history->rsc, pe_rsc_start_pending);
+ if (strcmp(history->task, PCMK_ACTION_START) == 0) {
+ pe__set_resource_flags(history->rsc, pcmk_rsc_start_pending);
set_active(history->rsc);
- } else if (strcmp(history->task, CRMD_ACTION_PROMOTE) == 0) {
- history->rsc->role = RSC_ROLE_PROMOTED;
+ } else if (strcmp(history->task, PCMK_ACTION_PROMOTE) == 0) {
+ history->rsc->role = pcmk_role_promoted;
- } else if ((strcmp(history->task, CRMD_ACTION_MIGRATE) == 0)
+ } else if ((strcmp(history->task, PCMK_ACTION_MIGRATE_TO) == 0)
&& history->node->details->unclean) {
/* A migrate_to action is pending on a unclean source, so force a stop
* on the target.
*/
const char *migrate_target = NULL;
- pe_node_t *target = NULL;
+ pcmk_node_t *target = NULL;
migrate_target = crm_element_value(history->xml,
XML_LRM_ATTR_MIGRATE_TARGET);
@@ -4439,13 +4682,14 @@ process_pending_action(struct action_history *history,
}
static void
-unpack_rsc_op(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
+unpack_rsc_op(pcmk_resource_t *rsc, pcmk_node_t *node, xmlNode *xml_op,
xmlNode **last_failure, enum action_fail_response *on_fail)
{
int old_rc = 0;
bool expired = false;
- pe_resource_t *parent = rsc;
- enum action_fail_response failure_strategy = action_fail_recover;
+ pcmk_resource_t *parent = rsc;
+ enum rsc_role_e fail_role = pcmk_role_unknown;
+ enum action_fail_response failure_strategy = pcmk_on_fail_restart;
struct action_history history = {
.rsc = rsc,
@@ -4514,7 +4758,7 @@ unpack_rsc_op(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
goto done;
}
- if (!pcmk_is_set(rsc->flags, pe_rsc_unique)) {
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_unique)) {
parent = uber_parent(rsc);
}
@@ -4529,25 +4773,29 @@ unpack_rsc_op(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
goto done;
case PCMK_EXEC_NOT_INSTALLED:
- failure_strategy = get_action_on_fail(&history);
- if (failure_strategy == action_fail_ignore) {
+ unpack_failure_handling(&history, &failure_strategy, &fail_role);
+ if (failure_strategy == pcmk_on_fail_ignore) {
crm_warn("Cannot ignore failed %s of %s on %s: "
"Resource agent doesn't exist "
CRM_XS " status=%d rc=%d id=%s",
history.task, rsc->id, pe__node_name(node),
history.execution_status, history.exit_status,
history.id);
- /* Also for printing it as "FAILED" by marking it as pe_rsc_failed later */
- *on_fail = action_fail_migrate;
+ /* Also for printing it as "FAILED" by marking it as
+ * pcmk_rsc_failed later
+ */
+ *on_fail = pcmk_on_fail_ban;
}
resource_location(parent, node, -INFINITY, "hard-error",
rsc->cluster);
- unpack_rsc_op_failure(&history, last_failure, on_fail);
+ unpack_rsc_op_failure(&history, failure_strategy, fail_role,
+ last_failure, on_fail);
goto done;
case PCMK_EXEC_NOT_CONNECTED:
if (pe__is_guest_or_remote_node(node)
- && pcmk_is_set(node->details->remote_rsc->flags, pe_rsc_managed)) {
+ && pcmk_is_set(node->details->remote_rsc->flags,
+ pcmk_rsc_managed)) {
/* We should never get into a situation where a managed remote
* connection resource is considered OK but a resource action
* behind the connection gets a "not connected" status. But as a
@@ -4555,7 +4803,7 @@ unpack_rsc_op(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
* that, ensure the remote connection is considered failed.
*/
pe__set_resource_flags(node->details->remote_rsc,
- pe_rsc_failed|pe_rsc_stop);
+ pcmk_rsc_failed|pcmk_rsc_stop_if_failed);
}
break; // Not done, do error handling
@@ -4571,10 +4819,10 @@ unpack_rsc_op(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
break;
}
- failure_strategy = get_action_on_fail(&history);
- if ((failure_strategy == action_fail_ignore)
- || (failure_strategy == action_fail_restart_container
- && (strcmp(history.task, CRMD_ACTION_STOP) == 0))) {
+ unpack_failure_handling(&history, &failure_strategy, &fail_role);
+ if ((failure_strategy == pcmk_on_fail_ignore)
+ || ((failure_strategy == pcmk_on_fail_restart_container)
+ && (strcmp(history.task, PCMK_ACTION_STOP) == 0))) {
char *last_change_s = last_change_str(xml_op);
@@ -4589,17 +4837,18 @@ unpack_rsc_op(pe_resource_t *rsc, pe_node_t *node, xmlNode *xml_op,
update_resource_state(&history, history.expected_exit_status,
*last_failure, on_fail);
crm_xml_add(xml_op, XML_ATTR_UNAME, node->details->uname);
- pe__set_resource_flags(rsc, pe_rsc_failure_ignored);
+ pe__set_resource_flags(rsc, pcmk_rsc_ignore_failure);
record_failed_op(&history);
- if ((failure_strategy == action_fail_restart_container)
- && cmp_on_fail(*on_fail, action_fail_recover) <= 0) {
+ if ((failure_strategy == pcmk_on_fail_restart_container)
+ && cmp_on_fail(*on_fail, pcmk_on_fail_restart) <= 0) {
*on_fail = failure_strategy;
}
} else {
- unpack_rsc_op_failure(&history, last_failure, on_fail);
+ unpack_rsc_op_failure(&history, failure_strategy, fail_role,
+ last_failure, on_fail);
if (history.execution_status == PCMK_EXEC_ERROR_HARD) {
uint8_t log_level = LOG_ERR;
@@ -4635,15 +4884,15 @@ done:
}
static void
-add_node_attrs(const xmlNode *xml_obj, pe_node_t *node, bool overwrite,
- pe_working_set_t *data_set)
+add_node_attrs(const xmlNode *xml_obj, pcmk_node_t *node, bool overwrite,
+ pcmk_scheduler_t *scheduler)
{
const char *cluster_name = NULL;
pe_rule_eval_data_t rule_data = {
.node_hash = NULL,
- .role = RSC_ROLE_UNKNOWN,
- .now = data_set->now,
+ .role = pcmk_role_unknown,
+ .now = scheduler->now,
.match_data = NULL,
.rsc_data = NULL,
.op_data = NULL
@@ -4654,8 +4903,8 @@ add_node_attrs(const xmlNode *xml_obj, pe_node_t *node, bool overwrite,
g_hash_table_insert(node->details->attrs, strdup(CRM_ATTR_ID),
strdup(node->details->id));
- if (pcmk__str_eq(node->details->id, data_set->dc_uuid, pcmk__str_casei)) {
- data_set->dc_node = node;
+ if (pcmk__str_eq(node->details->id, scheduler->dc_uuid, pcmk__str_casei)) {
+ scheduler->dc_node = node;
node->details->is_dc = TRUE;
g_hash_table_insert(node->details->attrs,
strdup(CRM_ATTR_IS_DC), strdup(XML_BOOLEAN_TRUE));
@@ -4664,18 +4913,19 @@ add_node_attrs(const xmlNode *xml_obj, pe_node_t *node, bool overwrite,
strdup(CRM_ATTR_IS_DC), strdup(XML_BOOLEAN_FALSE));
}
- cluster_name = g_hash_table_lookup(data_set->config_hash, "cluster-name");
+ cluster_name = g_hash_table_lookup(scheduler->config_hash, "cluster-name");
if (cluster_name) {
g_hash_table_insert(node->details->attrs, strdup(CRM_ATTR_CLUSTER_NAME),
strdup(cluster_name));
}
pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_ATTR_SETS, &rule_data,
- node->details->attrs, NULL, overwrite, data_set);
+ node->details->attrs, NULL, overwrite,
+ scheduler);
pe__unpack_dataset_nvpairs(xml_obj, XML_TAG_UTILIZATION, &rule_data,
node->details->utilization, NULL,
- FALSE, data_set);
+ FALSE, scheduler);
if (pe_node_attribute_raw(node, CRM_ATTR_SITE_NAME) == NULL) {
const char *site_name = pe_node_attribute_raw(node, "site-name");
@@ -4760,15 +5010,15 @@ extract_operations(const char *node, const char *rsc, xmlNode * rsc_entry, gbool
GList *
find_operations(const char *rsc, const char *node, gboolean active_filter,
- pe_working_set_t * data_set)
+ pcmk_scheduler_t *scheduler)
{
GList *output = NULL;
GList *intermediate = NULL;
xmlNode *tmp = NULL;
- xmlNode *status = find_xml_node(data_set->input, XML_CIB_TAG_STATUS, TRUE);
+ xmlNode *status = find_xml_node(scheduler->input, XML_CIB_TAG_STATUS, TRUE);
- pe_node_t *this_node = NULL;
+ pcmk_node_t *this_node = NULL;
xmlNode *node_state = NULL;
@@ -4782,20 +5032,20 @@ find_operations(const char *rsc, const char *node, gboolean active_filter,
continue;
}
- this_node = pe_find_node(data_set->nodes, uname);
+ this_node = pe_find_node(scheduler->nodes, uname);
if(this_node == NULL) {
CRM_LOG_ASSERT(this_node != NULL);
continue;
} else if (pe__is_guest_or_remote_node(this_node)) {
- determine_remote_online_status(data_set, this_node);
+ determine_remote_online_status(scheduler, this_node);
} else {
- determine_online_status(node_state, this_node, data_set);
+ determine_online_status(node_state, this_node, scheduler);
}
if (this_node->details->online
- || pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
+ || pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
/* offline nodes run no resources...
* unless stonith is enabled in which case we need to
* make sure rsc start events happen after the stonith
diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c
index ef0a092..4055d6d 100644
--- a/lib/pengine/utils.c
+++ b/lib/pengine/utils.c
@@ -27,40 +27,40 @@ gboolean ghash_free_str_str(gpointer key, gpointer value, gpointer user_data);
* \internal
* \brief Check whether we can fence a particular node
*
- * \param[in] data_set Working set for cluster
- * \param[in] node Name of node to check
+ * \param[in] scheduler Scheduler data
+ * \param[in] node Name of node to check
*
* \return true if node can be fenced, false otherwise
*/
bool
-pe_can_fence(const pe_working_set_t *data_set, const pe_node_t *node)
+pe_can_fence(const pcmk_scheduler_t *scheduler, const pcmk_node_t *node)
{
if (pe__is_guest_node(node)) {
/* Guest nodes are fenced by stopping their container resource. We can
* do that if the container's host is either online or fenceable.
*/
- pe_resource_t *rsc = node->details->remote_rsc->container;
+ pcmk_resource_t *rsc = node->details->remote_rsc->container;
for (GList *n = rsc->running_on; n != NULL; n = n->next) {
- pe_node_t *container_node = n->data;
+ pcmk_node_t *container_node = n->data;
if (!container_node->details->online
- && !pe_can_fence(data_set, container_node)) {
+ && !pe_can_fence(scheduler, container_node)) {
return false;
}
}
return true;
- } else if (!pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
+ } else if (!pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
return false; /* Turned off */
- } else if (!pcmk_is_set(data_set->flags, pe_flag_have_stonith_resource)) {
+ } else if (!pcmk_is_set(scheduler->flags, pcmk_sched_have_fencing)) {
return false; /* No devices */
- } else if (pcmk_is_set(data_set->flags, pe_flag_have_quorum)) {
+ } else if (pcmk_is_set(scheduler->flags, pcmk_sched_quorate)) {
return true;
- } else if (data_set->no_quorum_policy == no_quorum_ignore) {
+ } else if (scheduler->no_quorum_policy == pcmk_no_quorum_ignore) {
return true;
} else if(node == NULL) {
@@ -85,65 +85,25 @@ pe_can_fence(const pe_working_set_t *data_set, const pe_node_t *node)
* \return Newly allocated shallow copy of this_node
* \note This function asserts on errors and is guaranteed to return non-NULL.
*/
-pe_node_t *
-pe__copy_node(const pe_node_t *this_node)
+pcmk_node_t *
+pe__copy_node(const pcmk_node_t *this_node)
{
- pe_node_t *new_node = NULL;
+ pcmk_node_t *new_node = NULL;
CRM_ASSERT(this_node != NULL);
- new_node = calloc(1, sizeof(pe_node_t));
+ new_node = calloc(1, sizeof(pcmk_node_t));
CRM_ASSERT(new_node != NULL);
new_node->rsc_discover_mode = this_node->rsc_discover_mode;
new_node->weight = this_node->weight;
new_node->fixed = this_node->fixed; // @COMPAT deprecated and unused
+ new_node->count = this_node->count;
new_node->details = this_node->details;
return new_node;
}
-/* any node in list1 or list2 and not in the other gets a score of -INFINITY */
-void
-node_list_exclude(GHashTable * hash, GList *list, gboolean merge_scores)
-{
- GHashTable *result = hash;
- pe_node_t *other_node = NULL;
- GList *gIter = list;
-
- GHashTableIter iter;
- pe_node_t *node = NULL;
-
- g_hash_table_iter_init(&iter, hash);
- while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
-
- other_node = pe_find_node_id(list, node->details->id);
- if (other_node == NULL) {
- node->weight = -INFINITY;
- crm_trace("Banning dependent from %s (no primary instance)",
- pe__node_name(node));
- } else if (merge_scores) {
- node->weight = pcmk__add_scores(node->weight, other_node->weight);
- crm_trace("Added primary's score %s to dependent's score for %s "
- "(now %s)", pcmk_readable_score(other_node->weight),
- pe__node_name(node), pcmk_readable_score(node->weight));
- }
- }
-
- for (; gIter != NULL; gIter = gIter->next) {
- pe_node_t *node = (pe_node_t *) gIter->data;
-
- other_node = pe_hash_table_lookup(result, node->details->id);
-
- if (other_node == NULL) {
- pe_node_t *new_node = pe__copy_node(node);
-
- new_node->weight = -INFINITY;
- g_hash_table_insert(result, (gpointer) new_node->details->id, new_node);
- }
- }
-}
-
/*!
* \internal
* \brief Create a node hash table from a node list
@@ -159,8 +119,9 @@ pe__node_list2table(const GList *list)
result = pcmk__strkey_table(NULL, free);
for (const GList *gIter = list; gIter != NULL; gIter = gIter->next) {
- pe_node_t *new_node = pe__copy_node((const pe_node_t *) gIter->data);
+ pcmk_node_t *new_node = NULL;
+ new_node = pe__copy_node((const pcmk_node_t *) gIter->data);
g_hash_table_insert(result, (gpointer) new_node->details->id, new_node);
}
return result;
@@ -184,8 +145,8 @@ pe__node_list2table(const GList *list)
gint
pe__cmp_node_name(gconstpointer a, gconstpointer b)
{
- const pe_node_t *node1 = (const pe_node_t *) a;
- const pe_node_t *node2 = (const pe_node_t *) b;
+ const pcmk_node_t *node1 = (const pcmk_node_t *) a;
+ const pcmk_node_t *node2 = (const pcmk_node_t *) b;
if ((node1 == NULL) && (node2 == NULL)) {
return 0;
@@ -207,23 +168,23 @@ pe__cmp_node_name(gconstpointer a, gconstpointer b)
* \internal
* \brief Output node weights to stdout
*
- * \param[in] rsc Use allowed nodes for this resource
- * \param[in] comment Text description to prefix lines with
- * \param[in] nodes If rsc is not specified, use these nodes
- * \param[in,out] data_set Cluster working set
+ * \param[in] rsc Use allowed nodes for this resource
+ * \param[in] comment Text description to prefix lines with
+ * \param[in] nodes If rsc is not specified, use these nodes
+ * \param[in,out] scheduler Scheduler data
*/
static void
-pe__output_node_weights(const pe_resource_t *rsc, const char *comment,
- GHashTable *nodes, pe_working_set_t *data_set)
+pe__output_node_weights(const pcmk_resource_t *rsc, const char *comment,
+ GHashTable *nodes, pcmk_scheduler_t *scheduler)
{
- pcmk__output_t *out = data_set->priv;
+ pcmk__output_t *out = scheduler->priv;
// Sort the nodes so the output is consistent for regression tests
GList *list = g_list_sort(g_hash_table_get_values(nodes),
pe__cmp_node_name);
for (const GList *gIter = list; gIter != NULL; gIter = gIter->next) {
- const pe_node_t *node = (const pe_node_t *) gIter->data;
+ const pcmk_node_t *node = (const pcmk_node_t *) gIter->data;
out->message(out, "node-weight", rsc, comment, node->details->uname,
pcmk_readable_score(node->weight));
@@ -244,11 +205,11 @@ pe__output_node_weights(const pe_resource_t *rsc, const char *comment,
*/
static void
pe__log_node_weights(const char *file, const char *function, int line,
- const pe_resource_t *rsc, const char *comment,
+ const pcmk_resource_t *rsc, const char *comment,
GHashTable *nodes)
{
GHashTableIter iter;
- pe_node_t *node = NULL;
+ pcmk_node_t *node = NULL;
// Don't waste time if we're not tracing at this point
pcmk__if_tracing({}, return);
@@ -275,23 +236,23 @@ pe__log_node_weights(const char *file, const char *function, int line,
* \internal
* \brief Log or output node weights
*
- * \param[in] file Caller's filename
- * \param[in] function Caller's function name
- * \param[in] line Caller's line number
- * \param[in] to_log Log if true, otherwise output
- * \param[in] rsc If not NULL, use this resource's ID in logs,
- * and show scores recursively for any children
- * \param[in] comment Text description to prefix lines with
- * \param[in] nodes Nodes whose scores should be shown
- * \param[in,out] data_set Cluster working set
+ * \param[in] file Caller's filename
+ * \param[in] function Caller's function name
+ * \param[in] line Caller's line number
+ * \param[in] to_log Log if true, otherwise output
+ * \param[in] rsc If not NULL, use this resource's ID in logs,
+ * and show scores recursively for any children
+ * \param[in] comment Text description to prefix lines with
+ * \param[in] nodes Nodes whose scores should be shown
+ * \param[in,out] scheduler Scheduler data
*/
void
-pe__show_node_weights_as(const char *file, const char *function, int line,
- bool to_log, const pe_resource_t *rsc,
- const char *comment, GHashTable *nodes,
- pe_working_set_t *data_set)
+pe__show_node_scores_as(const char *file, const char *function, int line,
+ bool to_log, const pcmk_resource_t *rsc,
+ const char *comment, GHashTable *nodes,
+ pcmk_scheduler_t *scheduler)
{
- if (rsc != NULL && pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
+ if ((rsc != NULL) && pcmk_is_set(rsc->flags, pcmk_rsc_removed)) {
// Don't show allocation scores for orphans
return;
}
@@ -303,16 +264,16 @@ pe__show_node_weights_as(const char *file, const char *function, int line,
if (to_log) {
pe__log_node_weights(file, function, line, rsc, comment, nodes);
} else {
- pe__output_node_weights(rsc, comment, nodes, data_set);
+ pe__output_node_weights(rsc, comment, nodes, scheduler);
}
// If this resource has children, repeat recursively for each
if (rsc && rsc->children) {
for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *child = (pcmk_resource_t *) gIter->data;
- pe__show_node_weights_as(file, function, line, to_log, child,
- comment, child->allowed_nodes, data_set);
+ pe__show_node_scores_as(file, function, line, to_log, child,
+ comment, child->allowed_nodes, scheduler);
}
}
}
@@ -334,8 +295,8 @@ pe__show_node_weights_as(const char *file, const char *function, int line,
gint
pe__cmp_rsc_priority(gconstpointer a, gconstpointer b)
{
- const pe_resource_t *resource1 = (const pe_resource_t *)a;
- const pe_resource_t *resource2 = (const pe_resource_t *)b;
+ const pcmk_resource_t *resource1 = (const pcmk_resource_t *)a;
+ const pcmk_resource_t *resource2 = (const pcmk_resource_t *)b;
if (a == NULL && b == NULL) {
return 0;
@@ -359,12 +320,13 @@ pe__cmp_rsc_priority(gconstpointer a, gconstpointer b)
}
static void
-resource_node_score(pe_resource_t *rsc, const pe_node_t *node, int score,
+resource_node_score(pcmk_resource_t *rsc, const pcmk_node_t *node, int score,
const char *tag)
{
- pe_node_t *match = NULL;
+ pcmk_node_t *match = NULL;
- if ((rsc->exclusive_discover || (node->rsc_discover_mode == pe_discover_never))
+ if ((rsc->exclusive_discover
+ || (node->rsc_discover_mode == pcmk_probe_never))
&& pcmk__str_eq(tag, "symmetric_default", pcmk__str_casei)) {
/* This string comparision may be fragile, but exclusive resources and
* exclusive nodes should not have the symmetric_default constraint
@@ -376,13 +338,13 @@ resource_node_score(pe_resource_t *rsc, const pe_node_t *node, int score,
GList *gIter = rsc->children;
for (; gIter != NULL; gIter = gIter->next) {
- pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *child_rsc = (pcmk_resource_t *) gIter->data;
resource_node_score(child_rsc, node, score, tag);
}
}
- match = pe_hash_table_lookup(rsc->allowed_nodes, node->details->id);
+ match = g_hash_table_lookup(rsc->allowed_nodes, node->details->id);
if (match == NULL) {
match = pe__copy_node(node);
g_hash_table_insert(rsc->allowed_nodes, (gpointer) match->details->id, match);
@@ -395,24 +357,24 @@ resource_node_score(pe_resource_t *rsc, const pe_node_t *node, int score,
}
void
-resource_location(pe_resource_t *rsc, const pe_node_t *node, int score,
- const char *tag, pe_working_set_t *data_set)
+resource_location(pcmk_resource_t *rsc, const pcmk_node_t *node, int score,
+ const char *tag, pcmk_scheduler_t *scheduler)
{
if (node != NULL) {
resource_node_score(rsc, node, score, tag);
- } else if (data_set != NULL) {
- GList *gIter = data_set->nodes;
+ } else if (scheduler != NULL) {
+ GList *gIter = scheduler->nodes;
for (; gIter != NULL; gIter = gIter->next) {
- pe_node_t *node_iter = (pe_node_t *) gIter->data;
+ pcmk_node_t *node_iter = (pcmk_node_t *) gIter->data;
resource_node_score(rsc, node_iter, score, tag);
}
} else {
GHashTableIter iter;
- pe_node_t *node_iter = NULL;
+ pcmk_node_t *node_iter = NULL;
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node_iter)) {
@@ -431,14 +393,14 @@ resource_location(pe_resource_t *rsc, const pe_node_t *node, int score,
}
time_t
-get_effective_time(pe_working_set_t * data_set)
+get_effective_time(pcmk_scheduler_t *scheduler)
{
- if(data_set) {
- if (data_set->now == NULL) {
+ if(scheduler) {
+ if (scheduler->now == NULL) {
crm_trace("Recording a new 'now'");
- data_set->now = crm_time_new(NULL);
+ scheduler->now = crm_time_new(NULL);
}
- return crm_time_get_seconds_since_epoch(data_set->now);
+ return crm_time_get_seconds_since_epoch(scheduler->now);
}
crm_trace("Defaulting to 'now'");
@@ -446,9 +408,9 @@ get_effective_time(pe_working_set_t * data_set)
}
gboolean
-get_target_role(const pe_resource_t *rsc, enum rsc_role_e *role)
+get_target_role(const pcmk_resource_t *rsc, enum rsc_role_e *role)
{
- enum rsc_role_e local_role = RSC_ROLE_UNKNOWN;
+ enum rsc_role_e local_role = pcmk_role_unknown;
const char *value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
CRM_CHECK(role != NULL, return FALSE);
@@ -459,15 +421,15 @@ get_target_role(const pe_resource_t *rsc, enum rsc_role_e *role)
}
local_role = text2role(value);
- if (local_role == RSC_ROLE_UNKNOWN) {
+ if (local_role == pcmk_role_unknown) {
pcmk__config_err("Ignoring '" XML_RSC_ATTR_TARGET_ROLE "' for %s "
"because '%s' is not valid", rsc->id, value);
return FALSE;
- } else if (local_role > RSC_ROLE_STARTED) {
+ } else if (local_role > pcmk_role_started) {
if (pcmk_is_set(pe__const_top_resource(rsc, false)->flags,
- pe_rsc_promotable)) {
- if (local_role > RSC_ROLE_UNPROMOTED) {
+ pcmk_rsc_promotable)) {
+ if (local_role > pcmk_role_unpromoted) {
/* This is what we'd do anyway, just leave the default to avoid messing up the placement algorithm */
return FALSE;
}
@@ -485,13 +447,14 @@ get_target_role(const pe_resource_t *rsc, enum rsc_role_e *role)
}
gboolean
-order_actions(pe_action_t * lh_action, pe_action_t * rh_action, enum pe_ordering order)
+order_actions(pcmk_action_t *lh_action, pcmk_action_t *rh_action,
+ uint32_t flags)
{
GList *gIter = NULL;
- pe_action_wrapper_t *wrapper = NULL;
+ pcmk__related_action_t *wrapper = NULL;
GList *list = NULL;
- if (order == pe_order_none) {
+ if (flags == pcmk__ar_none) {
return FALSE;
}
@@ -508,23 +471,23 @@ order_actions(pe_action_t * lh_action, pe_action_t * rh_action, enum pe_ordering
/* Filter dups, otherwise update_action_states() has too much work to do */
gIter = lh_action->actions_after;
for (; gIter != NULL; gIter = gIter->next) {
- pe_action_wrapper_t *after = (pe_action_wrapper_t *) gIter->data;
+ pcmk__related_action_t *after = gIter->data;
- if (after->action == rh_action && (after->type & order)) {
+ if (after->action == rh_action && (after->type & flags)) {
return FALSE;
}
}
- wrapper = calloc(1, sizeof(pe_action_wrapper_t));
+ wrapper = calloc(1, sizeof(pcmk__related_action_t));
wrapper->action = rh_action;
- wrapper->type = order;
+ wrapper->type = flags;
list = lh_action->actions_after;
list = g_list_prepend(list, wrapper);
lh_action->actions_after = list;
- wrapper = calloc(1, sizeof(pe_action_wrapper_t));
+ wrapper = calloc(1, sizeof(pcmk__related_action_t));
wrapper->action = lh_action;
- wrapper->type = order;
+ wrapper->type = flags;
list = rh_action->actions_before;
list = g_list_prepend(list, wrapper);
rh_action->actions_before = list;
@@ -534,7 +497,7 @@ order_actions(pe_action_t * lh_action, pe_action_t * rh_action, enum pe_ordering
void
destroy_ticket(gpointer data)
{
- pe_ticket_t *ticket = data;
+ pcmk_ticket_t *ticket = data;
if (ticket->state) {
g_hash_table_destroy(ticket->state);
@@ -543,23 +506,23 @@ destroy_ticket(gpointer data)
free(ticket);
}
-pe_ticket_t *
-ticket_new(const char *ticket_id, pe_working_set_t * data_set)
+pcmk_ticket_t *
+ticket_new(const char *ticket_id, pcmk_scheduler_t *scheduler)
{
- pe_ticket_t *ticket = NULL;
+ pcmk_ticket_t *ticket = NULL;
if (pcmk__str_empty(ticket_id)) {
return NULL;
}
- if (data_set->tickets == NULL) {
- data_set->tickets = pcmk__strkey_table(free, destroy_ticket);
+ if (scheduler->tickets == NULL) {
+ scheduler->tickets = pcmk__strkey_table(free, destroy_ticket);
}
- ticket = g_hash_table_lookup(data_set->tickets, ticket_id);
+ ticket = g_hash_table_lookup(scheduler->tickets, ticket_id);
if (ticket == NULL) {
- ticket = calloc(1, sizeof(pe_ticket_t));
+ ticket = calloc(1, sizeof(pcmk_ticket_t));
if (ticket == NULL) {
crm_err("Cannot allocate ticket '%s'", ticket_id);
return NULL;
@@ -573,55 +536,57 @@ ticket_new(const char *ticket_id, pe_working_set_t * data_set)
ticket->standby = FALSE;
ticket->state = pcmk__strkey_table(free, free);
- g_hash_table_insert(data_set->tickets, strdup(ticket->id), ticket);
+ g_hash_table_insert(scheduler->tickets, strdup(ticket->id), ticket);
}
return ticket;
}
const char *
-rsc_printable_id(const pe_resource_t *rsc)
+rsc_printable_id(const pcmk_resource_t *rsc)
{
- return pcmk_is_set(rsc->flags, pe_rsc_unique)? rsc->id : ID(rsc->xml);
+ return pcmk_is_set(rsc->flags, pcmk_rsc_unique)? rsc->id : ID(rsc->xml);
}
void
-pe__clear_resource_flags_recursive(pe_resource_t *rsc, uint64_t flags)
+pe__clear_resource_flags_recursive(pcmk_resource_t *rsc, uint64_t flags)
{
pe__clear_resource_flags(rsc, flags);
for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
- pe__clear_resource_flags_recursive((pe_resource_t *) gIter->data, flags);
+ pe__clear_resource_flags_recursive((pcmk_resource_t *) gIter->data,
+ flags);
}
}
void
-pe__clear_resource_flags_on_all(pe_working_set_t *data_set, uint64_t flag)
+pe__clear_resource_flags_on_all(pcmk_scheduler_t *scheduler, uint64_t flag)
{
- for (GList *lpc = data_set->resources; lpc != NULL; lpc = lpc->next) {
- pe_resource_t *r = (pe_resource_t *) lpc->data;
+ for (GList *lpc = scheduler->resources; lpc != NULL; lpc = lpc->next) {
+ pcmk_resource_t *r = (pcmk_resource_t *) lpc->data;
pe__clear_resource_flags_recursive(r, flag);
}
}
void
-pe__set_resource_flags_recursive(pe_resource_t *rsc, uint64_t flags)
+pe__set_resource_flags_recursive(pcmk_resource_t *rsc, uint64_t flags)
{
pe__set_resource_flags(rsc, flags);
for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
- pe__set_resource_flags_recursive((pe_resource_t *) gIter->data, flags);
+ pe__set_resource_flags_recursive((pcmk_resource_t *) gIter->data,
+ flags);
}
}
void
-trigger_unfencing(pe_resource_t *rsc, pe_node_t *node, const char *reason,
- pe_action_t *dependency, pe_working_set_t *data_set)
+trigger_unfencing(pcmk_resource_t *rsc, pcmk_node_t *node, const char *reason,
+ pcmk_action_t *dependency, pcmk_scheduler_t *scheduler)
{
- if (!pcmk_is_set(data_set->flags, pe_flag_enable_unfencing)) {
+ if (!pcmk_is_set(scheduler->flags, pcmk_sched_enable_unfencing)) {
/* No resources require it */
return;
} else if ((rsc != NULL)
- && !pcmk_is_set(rsc->flags, pe_rsc_fence_device)) {
+ && !pcmk_is_set(rsc->flags, pcmk_rsc_fence_device)) {
/* Wasn't a stonith device */
return;
@@ -629,10 +594,11 @@ trigger_unfencing(pe_resource_t *rsc, pe_node_t *node, const char *reason,
&& node->details->online
&& node->details->unclean == FALSE
&& node->details->shutdown == FALSE) {
- pe_action_t *unfence = pe_fence_op(node, "on", FALSE, reason, FALSE, data_set);
+ pcmk_action_t *unfence = pe_fence_op(node, PCMK_ACTION_ON, FALSE,
+ reason, FALSE, scheduler);
if(dependency) {
- order_actions(unfence, dependency, pe_order_optional);
+ order_actions(unfence, dependency, pcmk__ar_ordered);
}
} else if(rsc) {
@@ -641,7 +607,7 @@ trigger_unfencing(pe_resource_t *rsc, pe_node_t *node, const char *reason,
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
if(node->details->online && node->details->unclean == FALSE && node->details->shutdown == FALSE) {
- trigger_unfencing(rsc, node, reason, dependency, data_set);
+ trigger_unfencing(rsc, node, reason, dependency, scheduler);
}
}
}
@@ -650,7 +616,7 @@ trigger_unfencing(pe_resource_t *rsc, pe_node_t *node, const char *reason,
gboolean
add_tag_ref(GHashTable * tags, const char * tag_name, const char * obj_ref)
{
- pe_tag_t *tag = NULL;
+ pcmk_tag_t *tag = NULL;
GList *gIter = NULL;
gboolean is_existing = FALSE;
@@ -658,7 +624,7 @@ add_tag_ref(GHashTable * tags, const char * tag_name, const char * obj_ref)
tag = g_hash_table_lookup(tags, tag_name);
if (tag == NULL) {
- tag = calloc(1, sizeof(pe_tag_t));
+ tag = calloc(1, sizeof(pcmk_tag_t));
if (tag == NULL) {
return FALSE;
}
@@ -697,7 +663,7 @@ add_tag_ref(GHashTable * tags, const char * tag_name, const char * obj_ref)
* shutdown of remote nodes by virtue of their connection stopping.
*/
bool
-pe__shutdown_requested(const pe_node_t *node)
+pe__shutdown_requested(const pcmk_node_t *node)
{
const char *shutdown = pe_node_attribute_raw(node, XML_CIB_ATTR_SHUTDOWN);
@@ -706,18 +672,22 @@ pe__shutdown_requested(const pe_node_t *node)
/*!
* \internal
- * \brief Update a data set's "recheck by" time
+ * \brief Update "recheck by" time in scheduler data
*
- * \param[in] recheck Epoch time when recheck should happen
- * \param[in,out] data_set Current working set
+ * \param[in] recheck Epoch time when recheck should happen
+ * \param[in,out] scheduler Scheduler data
+ * \param[in] reason What time is being updated for (for logs)
*/
void
-pe__update_recheck_time(time_t recheck, pe_working_set_t *data_set)
+pe__update_recheck_time(time_t recheck, pcmk_scheduler_t *scheduler,
+ const char *reason)
{
- if ((recheck > get_effective_time(data_set))
- && ((data_set->recheck_by == 0)
- || (data_set->recheck_by > recheck))) {
- data_set->recheck_by = recheck;
+ if ((recheck > get_effective_time(scheduler))
+ && ((scheduler->recheck_by == 0)
+ || (scheduler->recheck_by > recheck))) {
+ scheduler->recheck_by = recheck;
+ crm_debug("Updated next scheduler recheck to %s for %s",
+ pcmk__trim(ctime(&recheck)), reason);
}
}
@@ -731,28 +701,28 @@ pe__update_recheck_time(time_t recheck, pe_working_set_t *data_set)
* \param[out] hash Where to store extracted name/value pairs
* \param[in] always_first If not NULL, process block with this ID first
* \param[in] overwrite Whether to replace existing values with same name
- * \param[in,out] data_set Cluster working set containing \p xml_obj
+ * \param[in,out] scheduler Scheduler data containing \p xml_obj
*/
void
pe__unpack_dataset_nvpairs(const xmlNode *xml_obj, const char *set_name,
const pe_rule_eval_data_t *rule_data,
GHashTable *hash, const char *always_first,
- gboolean overwrite, pe_working_set_t *data_set)
+ gboolean overwrite, pcmk_scheduler_t *scheduler)
{
crm_time_t *next_change = crm_time_new_undefined();
- pe_eval_nvpairs(data_set->input, xml_obj, set_name, rule_data, hash,
+ pe_eval_nvpairs(scheduler->input, xml_obj, set_name, rule_data, hash,
always_first, overwrite, next_change);
if (crm_time_is_defined(next_change)) {
time_t recheck = (time_t) crm_time_get_seconds_since_epoch(next_change);
- pe__update_recheck_time(recheck, data_set);
+ pe__update_recheck_time(recheck, scheduler, "rule evaluation");
}
crm_time_free(next_change);
}
bool
-pe__resource_is_disabled(const pe_resource_t *rsc)
+pe__resource_is_disabled(const pcmk_resource_t *rsc)
{
const char *target_role = NULL;
@@ -761,10 +731,10 @@ pe__resource_is_disabled(const pe_resource_t *rsc)
if (target_role) {
enum rsc_role_e target_role_e = text2role(target_role);
- if ((target_role_e == RSC_ROLE_STOPPED)
- || ((target_role_e == RSC_ROLE_UNPROMOTED)
+ if ((target_role_e == pcmk_role_stopped)
+ || ((target_role_e == pcmk_role_unpromoted)
&& pcmk_is_set(pe__const_top_resource(rsc, false)->flags,
- pe_rsc_promotable))) {
+ pcmk_rsc_promotable))) {
return true;
}
}
@@ -781,17 +751,17 @@ pe__resource_is_disabled(const pe_resource_t *rsc)
* \return true if \p rsc is running only on \p node, otherwise false
*/
bool
-pe__rsc_running_on_only(const pe_resource_t *rsc, const pe_node_t *node)
+pe__rsc_running_on_only(const pcmk_resource_t *rsc, const pcmk_node_t *node)
{
return (rsc != NULL) && pcmk__list_of_1(rsc->running_on)
- && pe__same_node((const pe_node_t *) rsc->running_on->data, node);
+ && pe__same_node((const pcmk_node_t *) rsc->running_on->data, node);
}
bool
-pe__rsc_running_on_any(pe_resource_t *rsc, GList *node_list)
+pe__rsc_running_on_any(pcmk_resource_t *rsc, GList *node_list)
{
for (GList *ele = rsc->running_on; ele; ele = ele->next) {
- pe_node_t *node = (pe_node_t *) ele->data;
+ pcmk_node_t *node = (pcmk_node_t *) ele->data;
if (pcmk__str_in_list(node->details->uname, node_list,
pcmk__str_star_matches|pcmk__str_casei)) {
return true;
@@ -802,7 +772,7 @@ pe__rsc_running_on_any(pe_resource_t *rsc, GList *node_list)
}
bool
-pcmk__rsc_filtered_by_node(pe_resource_t *rsc, GList *only_node)
+pcmk__rsc_filtered_by_node(pcmk_resource_t *rsc, GList *only_node)
{
return (rsc->fns->active(rsc, FALSE) && !pe__rsc_running_on_any(rsc, only_node));
}
@@ -813,7 +783,7 @@ pe__filter_rsc_list(GList *rscs, GList *filter)
GList *retval = NULL;
for (GList *gIter = rscs; gIter; gIter = gIter->next) {
- pe_resource_t *rsc = (pe_resource_t *) gIter->data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) gIter->data;
/* I think the second condition is safe here for all callers of this
* function. If not, it needs to move into pe__node_text.
@@ -828,7 +798,8 @@ pe__filter_rsc_list(GList *rscs, GList *filter)
}
GList *
-pe__build_node_name_list(pe_working_set_t *data_set, const char *s) {
+pe__build_node_name_list(pcmk_scheduler_t *scheduler, const char *s)
+{
GList *nodes = NULL;
if (pcmk__str_eq(s, "*", pcmk__str_null_matches)) {
@@ -838,7 +809,7 @@ pe__build_node_name_list(pe_working_set_t *data_set, const char *s) {
*/
nodes = g_list_prepend(nodes, strdup("*"));
} else {
- pe_node_t *node = pe_find_node(data_set->nodes, s);
+ pcmk_node_t *node = pe_find_node(scheduler->nodes, s);
if (node) {
/* The given string was a valid uname for a node. Return a
@@ -852,7 +823,7 @@ pe__build_node_name_list(pe_working_set_t *data_set, const char *s) {
* second case, we'll return a NULL pointer and nothing will
* get displayed.
*/
- nodes = pe__unames_with_tag(data_set, s);
+ nodes = pe__unames_with_tag(scheduler, s);
}
}
@@ -860,14 +831,16 @@ pe__build_node_name_list(pe_working_set_t *data_set, const char *s) {
}
GList *
-pe__build_rsc_list(pe_working_set_t *data_set, const char *s) {
+pe__build_rsc_list(pcmk_scheduler_t *scheduler, const char *s)
+{
GList *resources = NULL;
if (pcmk__str_eq(s, "*", pcmk__str_null_matches)) {
resources = g_list_prepend(resources, strdup("*"));
} else {
- pe_resource_t *rsc = pe_find_resource_with_flags(data_set->resources, s,
- pe_find_renamed|pe_find_any);
+ const uint32_t flags = pcmk_rsc_match_history|pcmk_rsc_match_basename;
+ pcmk_resource_t *rsc = pe_find_resource_with_flags(scheduler->resources,
+ s, flags);
if (rsc) {
/* A colon in the name we were given means we're being asked to filter
@@ -885,7 +858,7 @@ pe__build_rsc_list(pe_working_set_t *data_set, const char *s) {
* typo or something. See pe__build_node_name_list() for more
* detail.
*/
- resources = pe__rscs_with_tag(data_set, s);
+ resources = pe__rscs_with_tag(scheduler, s);
}
}
@@ -893,12 +866,12 @@ pe__build_rsc_list(pe_working_set_t *data_set, const char *s) {
}
xmlNode *
-pe__failed_probe_for_rsc(const pe_resource_t *rsc, const char *name)
+pe__failed_probe_for_rsc(const pcmk_resource_t *rsc, const char *name)
{
- const pe_resource_t *parent = pe__const_top_resource(rsc, false);
+ const pcmk_resource_t *parent = pe__const_top_resource(rsc, false);
const char *rsc_id = rsc->id;
- if (parent->variant == pe_clone) {
+ if (parent->variant == pcmk_rsc_variant_clone) {
rsc_id = pe__clone_child_id(parent);
}
diff --git a/lib/pengine/variant.h b/lib/pengine/variant.h
deleted file mode 100644
index daa3781..0000000
--- a/lib/pengine/variant.h
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright 2004-2022 the Pacemaker project contributors
- *
- * The version control history for this file may have further details.
- *
- * This source code is licensed under the GNU Lesser General Public License
- * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
- */
-
-#ifndef PE_VARIANT__H
-# define PE_VARIANT__H
-
-# if PE__VARIANT_BUNDLE
-
-typedef struct {
- int offset;
- char *ipaddr;
- pe_node_t *node;
- pe_resource_t *ip;
- pe_resource_t *child;
- pe_resource_t *container;
- pe_resource_t *remote;
-} pe__bundle_replica_t;
-
-enum pe__bundle_mount_flags {
- pe__bundle_mount_none = 0x00,
-
- // mount instance-specific subdirectory rather than source directly
- pe__bundle_mount_subdir = 0x01
-};
-
-typedef struct {
- char *source;
- char *target;
- char *options;
- uint32_t flags; // bitmask of pe__bundle_mount_flags
-} pe__bundle_mount_t;
-
-typedef struct {
- char *source;
- char *target;
-} pe__bundle_port_t;
-
-enum pe__container_agent {
- PE__CONTAINER_AGENT_UNKNOWN,
- PE__CONTAINER_AGENT_DOCKER,
- PE__CONTAINER_AGENT_RKT,
- PE__CONTAINER_AGENT_PODMAN,
-};
-
-#define PE__CONTAINER_AGENT_UNKNOWN_S "unknown"
-#define PE__CONTAINER_AGENT_DOCKER_S "docker"
-#define PE__CONTAINER_AGENT_RKT_S "rkt"
-#define PE__CONTAINER_AGENT_PODMAN_S "podman"
-
-typedef struct pe__bundle_variant_data_s {
- int promoted_max;
- int nreplicas;
- int nreplicas_per_host;
- char *prefix;
- char *image;
- const char *ip_last;
- char *host_network;
- char *host_netmask;
- char *control_port;
- char *container_network;
- char *ip_range_start;
- gboolean add_host;
- gchar *container_host_options;
- char *container_command;
- char *launcher_options;
- const char *attribute_target;
-
- pe_resource_t *child;
-
- GList *replicas; // pe__bundle_replica_t *
- GList *ports; // pe__bundle_port_t *
- GList *mounts; // pe__bundle_mount_t *
-
- enum pe__container_agent agent_type;
-} pe__bundle_variant_data_t;
-
-# define get_bundle_variant_data(data, rsc) \
- CRM_ASSERT(rsc != NULL); \
- CRM_ASSERT(rsc->variant == pe_container); \
- CRM_ASSERT(rsc->variant_opaque != NULL); \
- data = (pe__bundle_variant_data_t *)rsc->variant_opaque; \
-
-# endif
-
-#endif
diff --git a/lib/services/Makefile.am b/lib/services/Makefile.am
index a7e10c9..5a19003 100644
--- a/lib/services/Makefile.am
+++ b/lib/services/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2012-2021 the Pacemaker project contributors
+# Copyright 2012-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -12,19 +12,18 @@ MAINTAINERCLEANFILES = Makefile.in
AM_CPPFLAGS = -I$(top_srcdir)/include
lib_LTLIBRARIES = libcrmservice.la
-noinst_HEADERS = pcmk-dbus.h upstart.h systemd.h \
- services_lsb.h services_nagios.h \
- services_ocf.h \
- services_private.h
+noinst_HEADERS = $(wildcard *.h)
-libcrmservice_la_LDFLAGS = -version-info 31:2:3
+libcrmservice_la_LDFLAGS = -version-info 32:0:4
libcrmservice_la_CFLAGS =
libcrmservice_la_CFLAGS += $(CFLAGS_HARDENED_LIB)
libcrmservice_la_LDFLAGS += $(LDFLAGS_HARDENED_LIB)
-libcrmservice_la_LIBADD = $(top_builddir)/lib/common/libcrmcommon.la $(DBUS_LIBS)
+libcrmservice_la_LIBADD = $(top_builddir)/lib/common/libcrmcommon.la \
+ $(DBUS_LIBS)
+## Library sources (*must* use += format for bumplibs)
libcrmservice_la_SOURCES = services.c
libcrmservice_la_SOURCES += services_linux.c
libcrmservice_la_SOURCES += services_lsb.c
diff --git a/lib/services/dbus.c b/lib/services/dbus.c
index f052c0a..8a517d2 100644
--- a/lib/services/dbus.c
+++ b/lib/services/dbus.c
@@ -594,6 +594,8 @@ handle_query_result(DBusMessage *reply, struct property_query *data)
DBusMessageIter variant_iter;
DBusBasicValue value;
+ dbus_error_init(&error);
+
// First, check if the reply contains an error
if (pcmk_dbus_find_error((void*)&error, reply, &error)) {
crm_err("DBus query for %s property '%s' failed: %s",
diff --git a/lib/services/services.c b/lib/services/services.c
index b60d8bd..e438443 100644
--- a/lib/services/services.c
+++ b/lib/services/services.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2010-2022 the Pacemaker project contributors
+ * Copyright 2010-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -233,8 +233,8 @@ copy_action_arguments(svc_action_t *op, uint32_t ra_caps, const char *name,
}
if (pcmk_is_set(ra_caps, pcmk_ra_cap_status)
- && pcmk__str_eq(action, "monitor", pcmk__str_casei)) {
- action = "status";
+ && pcmk__str_eq(action, PCMK_ACTION_MONITOR, pcmk__str_casei)) {
+ action = PCMK_ACTION_STATUS;
}
op->action = strdup(action);
if (op->action == NULL) {
@@ -1028,7 +1028,7 @@ services_action_sync(svc_action_t * op)
op->synchronous = true;
- if (pcmk__str_eq(op->action, "meta-data", pcmk__str_casei)) {
+ if (pcmk__str_eq(op->action, PCMK_ACTION_META_DATA, pcmk__str_casei)) {
/* Synchronous meta-data operations are handled specially. Since most
* resource classes don't provide any meta-data, it has to be
* synthesized from available information about the agent.
diff --git a/lib/services/services_linux.c b/lib/services/services_linux.c
index fb12f73..c7792f0 100644
--- a/lib/services/services_linux.c
+++ b/lib/services/services_linux.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2010-2022 the Pacemaker project contributors
+ * Copyright 2010-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -725,7 +725,7 @@ services__generic_error(const svc_action_t *op)
}
if (pcmk__str_eq(op->standard, PCMK_RESOURCE_CLASS_LSB, pcmk__str_casei)
- && pcmk__str_eq(op->action, "status", pcmk__str_casei)) {
+ && pcmk__str_eq(op->action, PCMK_ACTION_STATUS, pcmk__str_casei)) {
return PCMK_LSB_STATUS_UNKNOWN;
}
@@ -760,7 +760,7 @@ services__not_installed_error(const svc_action_t *op)
}
if (pcmk__str_eq(op->standard, PCMK_RESOURCE_CLASS_LSB, pcmk__str_casei)
- && pcmk__str_eq(op->action, "status", pcmk__str_casei)) {
+ && pcmk__str_eq(op->action, PCMK_ACTION_STATUS, pcmk__str_casei)) {
return PCMK_LSB_STATUS_NOT_INSTALLED;
}
@@ -795,7 +795,7 @@ services__authorization_error(const svc_action_t *op)
}
if (pcmk__str_eq(op->standard, PCMK_RESOURCE_CLASS_LSB, pcmk__str_casei)
- && pcmk__str_eq(op->action, "status", pcmk__str_casei)) {
+ && pcmk__str_eq(op->action, PCMK_ACTION_STATUS, pcmk__str_casei)) {
return PCMK_LSB_STATUS_INSUFFICIENT_PRIV;
}
@@ -831,7 +831,7 @@ services__configuration_error(const svc_action_t *op, bool is_fatal)
}
if (pcmk__str_eq(op->standard, PCMK_RESOURCE_CLASS_LSB, pcmk__str_casei)
- && pcmk__str_eq(op->action, "status", pcmk__str_casei)) {
+ && pcmk__str_eq(op->action, PCMK_ACTION_STATUS, pcmk__str_casei)) {
return PCMK_LSB_NOT_CONFIGURED;
}
@@ -954,7 +954,7 @@ action_launch_child(svc_action_t *op)
#if SUPPORT_CIBSECRETS
rc = pcmk__substitute_secrets(op->rsc, op->params);
if (rc != pcmk_rc_ok) {
- if (pcmk__str_eq(op->action, "stop", pcmk__str_casei)) {
+ if (pcmk__str_eq(op->action, PCMK_ACTION_STOP, pcmk__str_casei)) {
crm_info("Proceeding with stop operation for %s "
"despite being unable to load CIB secrets (%s)",
op->rsc, pcmk_rc_str(rc));
@@ -1178,7 +1178,7 @@ services__execute_file(svc_action_t *op)
if (stat(op->opaque->exec, &st) != 0) {
rc = errno;
crm_info("Cannot execute '%s': %s " CRM_XS " stat rc=%d",
- op->opaque->exec, pcmk_strerror(rc), rc);
+ op->opaque->exec, pcmk_rc_str(rc), rc);
services__handle_exec_error(op, rc);
goto done;
}
@@ -1186,7 +1186,7 @@ services__execute_file(svc_action_t *op)
if (pipe(stdout_fd) < 0) {
rc = errno;
crm_info("Cannot execute '%s': %s " CRM_XS " pipe(stdout) rc=%d",
- op->opaque->exec, pcmk_strerror(rc), rc);
+ op->opaque->exec, pcmk_rc_str(rc), rc);
services__handle_exec_error(op, rc);
goto done;
}
@@ -1197,7 +1197,7 @@ services__execute_file(svc_action_t *op)
close_pipe(stdout_fd);
crm_info("Cannot execute '%s': %s " CRM_XS " pipe(stderr) rc=%d",
- op->opaque->exec, pcmk_strerror(rc), rc);
+ op->opaque->exec, pcmk_rc_str(rc), rc);
services__handle_exec_error(op, rc);
goto done;
}
@@ -1210,7 +1210,7 @@ services__execute_file(svc_action_t *op)
close_pipe(stderr_fd);
crm_info("Cannot execute '%s': %s " CRM_XS " pipe(stdin) rc=%d",
- op->opaque->exec, pcmk_strerror(rc), rc);
+ op->opaque->exec, pcmk_rc_str(rc), rc);
services__handle_exec_error(op, rc);
goto done;
}
@@ -1235,7 +1235,7 @@ services__execute_file(svc_action_t *op)
close_pipe(stderr_fd);
crm_info("Cannot execute '%s': %s " CRM_XS " fork rc=%d",
- op->opaque->exec, pcmk_strerror(rc), rc);
+ op->opaque->exec, pcmk_rc_str(rc), rc);
services__handle_exec_error(op, rc);
if (op->synchronous) {
sigchld_cleanup(&data);
diff --git a/lib/services/services_lsb.c b/lib/services/services_lsb.c
index 134cc70..9ad7025 100644
--- a/lib/services/services_lsb.c
+++ b/lib/services/services_lsb.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2010-2022 the Pacemaker project contributors
+ * Copyright 2010-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -290,7 +290,8 @@ enum ocf_exitcode
services__lsb2ocf(const char *action, int exit_status)
{
// For non-status actions, LSB and OCF share error codes <= 7
- if (!pcmk__str_any_of(action, "status", "monitor", NULL)) {
+ if (!pcmk__str_any_of(action, PCMK_ACTION_STATUS, PCMK_ACTION_MONITOR,
+ NULL)) {
if ((exit_status < 0) || (exit_status > PCMK_LSB_NOT_RUNNING)) {
return PCMK_OCF_UNKNOWN_ERROR;
}
diff --git a/lib/services/services_nagios.c b/lib/services/services_nagios.c
index abddca8..10759b5 100644
--- a/lib/services/services_nagios.c
+++ b/lib/services/services_nagios.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2010-2022 the Pacemaker project contributors
+ * Copyright 2010-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -49,7 +49,7 @@ services__nagios_prepare(svc_action_t *op)
return ENOMEM;
}
- if (pcmk__str_eq(op->action, "monitor", pcmk__str_casei)
+ if (pcmk__str_eq(op->action, PCMK_ACTION_MONITOR, pcmk__str_casei)
&& (op->interval_ms == 0)) {
// Invoke --version for a nagios probe
diff --git a/lib/services/systemd.c b/lib/services/systemd.c
index 0c38ae0..ecac86c 100644
--- a/lib/services/systemd.c
+++ b/lib/services/systemd.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2012-2022 the Pacemaker project contributors
+ * Copyright 2012-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -308,7 +308,7 @@ set_result_from_method_error(svc_action_t *op, const DBusError *error)
|| strstr(error->name, "org.freedesktop.systemd1.LoadFailed")
|| strstr(error->name, "org.freedesktop.systemd1.NoSuchUnit")) {
- if (pcmk__str_eq(op->action, "stop", pcmk__str_casei)) {
+ if (pcmk__str_eq(op->action, PCMK_ACTION_STOP, pcmk__str_casei)) {
crm_trace("Masking systemd stop failure (%s) for %s "
"because unknown service can be considered stopped",
error->name, pcmk__s(op->rsc, "unknown resource"));
@@ -459,7 +459,11 @@ invoke_unit_by_name(const char *arg_name, svc_action_t *op, char **path)
CRM_ASSERT(msg != NULL);
// Add the (expanded) unit name as the argument
- name = systemd_service_name(arg_name, op == NULL || pcmk__str_eq(op->action, "meta-data", pcmk__str_none));
+ name = systemd_service_name(arg_name,
+ (op == NULL)
+ || pcmk__str_eq(op->action,
+ PCMK_ACTION_META_DATA,
+ pcmk__str_none));
CRM_LOG_ASSERT(dbus_message_append_args(msg, DBUS_TYPE_STRING, &name,
DBUS_TYPE_INVALID));
free(name);
@@ -717,6 +721,8 @@ process_unit_method_reply(DBusMessage *reply, svc_action_t *op)
{
DBusError error;
+ dbus_error_init(&error);
+
/* The first use of error here is not used other than as a non-NULL flag to
* indicate that a request was indeed sent
*/
@@ -932,7 +938,8 @@ invoke_unit_by_path(svc_action_t *op, const char *unit)
DBusMessage *msg = NULL;
DBusMessage *reply = NULL;
- if (pcmk__str_any_of(op->action, "monitor", "status", NULL)) {
+ if (pcmk__str_any_of(op->action, PCMK_ACTION_MONITOR, PCMK_ACTION_STATUS,
+ NULL)) {
DBusPendingCall *pending = NULL;
char *state;
@@ -955,11 +962,11 @@ invoke_unit_by_path(svc_action_t *op, const char *unit)
}
return;
- } else if (pcmk__str_eq(op->action, "start", pcmk__str_none)) {
+ } else if (pcmk__str_eq(op->action, PCMK_ACTION_START, pcmk__str_none)) {
method = "StartUnit";
systemd_create_override(op->agent, op->timeout);
- } else if (pcmk__str_eq(op->action, "stop", pcmk__str_none)) {
+ } else if (pcmk__str_eq(op->action, PCMK_ACTION_STOP, pcmk__str_none)) {
method = "StopUnit";
systemd_remove_override(op->agent, op->timeout);
@@ -988,7 +995,10 @@ invoke_unit_by_path(svc_action_t *op, const char *unit)
/* (ss) */
{
const char *replace_s = "replace";
- char *name = systemd_service_name(op->agent, pcmk__str_eq(op->action, "meta-data", pcmk__str_none));
+ char *name = systemd_service_name(op->agent,
+ pcmk__str_eq(op->action,
+ PCMK_ACTION_META_DATA,
+ pcmk__str_none));
CRM_LOG_ASSERT(dbus_message_append_args(msg, DBUS_TYPE_STRING, &name, DBUS_TYPE_INVALID));
CRM_LOG_ASSERT(dbus_message_append_args(msg, DBUS_TYPE_STRING, &replace_s, DBUS_TYPE_INVALID));
@@ -1072,7 +1082,7 @@ services__execute_systemd(svc_action_t *op)
(op->synchronous? "" : "a"), op->action, op->agent,
((op->rsc == NULL)? "" : " for resource "), pcmk__s(op->rsc, ""));
- if (pcmk__str_eq(op->action, "meta-data", pcmk__str_casei)) {
+ if (pcmk__str_eq(op->action, PCMK_ACTION_META_DATA, pcmk__str_casei)) {
op->stdout_data = systemd_unit_metadata(op->agent, op->timeout);
services__set_result(op, PCMK_OCF_OK, PCMK_EXEC_DONE, NULL);
goto done;
diff --git a/lib/services/upstart.c b/lib/services/upstart.c
index 459b572..2306e73 100644
--- a/lib/services/upstart.c
+++ b/lib/services/upstart.c
@@ -1,7 +1,7 @@
/*
* Original copyright 2010 Senko Rasic <senko.rasic@dobarkod.hr>
* and Ante Karamatic <ivoks@init.hr>
- * Later changes copyright 2012-2022 the Pacemaker project contributors
+ * Later changes copyright 2012-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -412,7 +412,7 @@ set_result_from_method_error(svc_action_t *op, const DBusError *error)
if (strstr(error->name, UPSTART_06_API ".Error.UnknownInstance")) {
- if (pcmk__str_eq(op->action, "stop", pcmk__str_casei)) {
+ if (pcmk__str_eq(op->action, PCMK_ACTION_STOP, pcmk__str_casei)) {
crm_trace("Masking stop failure (%s) for %s "
"because unknown service can be considered stopped",
error->name, pcmk__s(op->rsc, "unknown resource"));
@@ -423,7 +423,7 @@ set_result_from_method_error(svc_action_t *op, const DBusError *error)
services__set_result(op, PCMK_OCF_NOT_INSTALLED,
PCMK_EXEC_NOT_INSTALLED, "Upstart job not found");
- } else if (pcmk__str_eq(op->action, "start", pcmk__str_casei)
+ } else if (pcmk__str_eq(op->action, PCMK_ACTION_START, pcmk__str_casei)
&& strstr(error->name, UPSTART_06_API ".Error.AlreadyStarted")) {
crm_trace("Masking start failure (%s) for %s "
"because already started resource is OK",
@@ -462,7 +462,7 @@ job_method_complete(DBusPendingCall *pending, void *user_data)
set_result_from_method_error(op, &error);
dbus_error_free(&error);
- } else if (pcmk__str_eq(op->action, "stop", pcmk__str_none)) {
+ } else if (pcmk__str_eq(op->action, PCMK_ACTION_STOP, pcmk__str_none)) {
// Call has no return value
crm_debug("DBus request for stop of %s succeeded",
pcmk__s(op->rsc, "unknown resource"));
@@ -539,14 +539,14 @@ services__execute_upstart(svc_action_t *op)
goto cleanup;
}
- if (pcmk__str_eq(op->action, "meta-data", pcmk__str_casei)) {
+ if (pcmk__str_eq(op->action, PCMK_ACTION_META_DATA, pcmk__str_casei)) {
op->stdout_data = upstart_job_metadata(op->agent);
services__set_result(op, PCMK_OCF_OK, PCMK_EXEC_DONE, NULL);
goto cleanup;
}
if (!object_path_for_job(op->agent, &job, op->timeout)) {
- if (pcmk__str_eq(action, "stop", pcmk__str_none)) {
+ if (pcmk__str_eq(action, PCMK_ACTION_STOP, pcmk__str_none)) {
services__set_result(op, PCMK_OCF_OK, PCMK_EXEC_DONE, NULL);
} else {
services__set_result(op, PCMK_OCF_NOT_INSTALLED,
@@ -563,7 +563,8 @@ services__execute_upstart(svc_action_t *op)
goto cleanup;
}
- if (pcmk__strcase_any_of(op->action, "monitor", "status", NULL)) {
+ if (pcmk__strcase_any_of(op->action, PCMK_ACTION_MONITOR,
+ PCMK_ACTION_STATUS, NULL)) {
DBusPendingCall *pending = NULL;
char *state = NULL;
char *path = get_first_instance(job, op->timeout);
@@ -598,10 +599,10 @@ services__execute_upstart(svc_action_t *op)
goto cleanup;
- } else if (pcmk__str_eq(action, "start", pcmk__str_none)) {
+ } else if (pcmk__str_eq(action, PCMK_ACTION_START, pcmk__str_none)) {
action = "Start";
- } else if (pcmk__str_eq(action, "stop", pcmk__str_none)) {
+ } else if (pcmk__str_eq(action, PCMK_ACTION_STOP, pcmk__str_none)) {
action = "Stop";
} else if (pcmk__str_eq(action, "restart", pcmk__str_none)) {
@@ -665,7 +666,7 @@ services__execute_upstart(svc_action_t *op)
set_result_from_method_error(op, &error);
dbus_error_free(&error);
- } else if (pcmk__str_eq(op->action, "stop", pcmk__str_none)) {
+ } else if (pcmk__str_eq(op->action, PCMK_ACTION_STOP, pcmk__str_none)) {
// DBus call does not return a value
services__set_result(op, PCMK_OCF_OK, PCMK_EXEC_DONE, NULL);
diff --git a/m4/REQUIRE_PROG.m4 b/m4/REQUIRE_PROG.m4
new file mode 100644
index 0000000..118c07d
--- /dev/null
+++ b/m4/REQUIRE_PROG.m4
@@ -0,0 +1,18 @@
+# REQUIRE_PROG([variable], [prog-to-check-for])
+#
+# Error if a program can't be found on the path
+#
+dnl
+dnl Copyright 2023 the Pacemaker project contributors
+dnl
+dnl The version control history for this file may have further details.
+dnl
+dnl This source code is licensed under the GNU General Public License version 2
+dnl or later (GPLv2+) WITHOUT ANY WARRANTY.
+
+dnl Usage: REQUIRE_PROG([variable], [prog-to-check-for])
+AC_DEFUN([REQUIRE_PROG], [
+ AC_PATH_PROG([$1], [$2])
+ AS_IF([test x"$(eval echo "\${$1}")" != x""], [],
+ [AC_MSG_FAILURE([Could not find required build tool $2])])
+])
diff --git a/m4/version.m4 b/m4/version.m4
index 24dc5ac..f469bba 100644
--- a/m4/version.m4
+++ b/m4/version.m4
@@ -1,2 +1,2 @@
-m4_define([VERSION_NUMBER], [2.1.6])
+m4_define([VERSION_NUMBER], [2.1.7])
m4_define([PCMK_URL], [https://ClusterLabs.org/pacemaker/])
diff --git a/maint/Makefile.am b/maint/Makefile.am
index 788dd46..bfdbfaf 100644
--- a/maint/Makefile.am
+++ b/maint/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2019-2022 the Pacemaker project contributors
+# Copyright 2019-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -18,14 +18,18 @@ EXTRA_DIST = README
#
# Count changes in these directories
-CHANGELOG_DIRS = ../include ../lib ../daemons ../tools ../xml
+CHANGELOG_DIRS = ../include \
+ ../lib \
+ ../daemons \
+ ../tools \
+ ../xml
.PHONY: require_last_release
require_last_release:
@if [ -z "$(CHECKOUT)" ]; then \
echo "This target must be run from a git checkout"; \
exit 1; \
- elif ! git rev-parse $(LAST_RELEASE) >/dev/null 2>&1; then \
+ elif ! "$(GIT)" rev-parse $(LAST_RELEASE) >/dev/null 2>&1; then \
echo "LAST_RELEASE must be set to a valid git tag"; \
exit 1; \
fi
@@ -33,22 +37,23 @@ require_last_release:
.PHONY: summary
summary: require_last_release
@printf "* %s %s <%s> %s\n" "$$(date +'%a %b %d %Y')" \
- "$$(git config user.name)" "$$(git config user.email)" \
+ "$$("$(GIT)" config user.name)" \
+ "$$("$(GIT)" config user.email)" \
"$(NEXT_RELEASE)"
@printf "\055 %d commits with%s\n" \
- "$$(git log --pretty=oneline --no-merges \
+ "$$("$(GIT)" log --pretty=oneline --no-merges \
$(LAST_RELEASE)..HEAD | wc -l)" \
- "$$(git diff $(LAST_RELEASE)..HEAD --shortstat \
+ "$$("$(GIT)" diff $(LAST_RELEASE)..HEAD --shortstat \
$(CHANGELOG_DIRS))"
.PHONY: changes
changes: summary
@printf "\n- Features added since $(LAST_RELEASE)\n"
- @git log --pretty=format:'%s' --no-merges \
+ @"$(GIT)" log --pretty=format:'%s' --no-merges \
--abbrev-commit $(LAST_RELEASE)..HEAD \
| sed -n -e 's/^ *Feature: */ + /p' | sort -uf
@printf "\n- Fixes since $(LAST_RELEASE)\n"
- @git log --pretty=format:'%s' --no-merges \
+ @"$(GIT)" log --pretty=format:'%s' --no-merges \
--abbrev-commit $(LAST_RELEASE)..HEAD \
| sed -n -e 's/^ *\(Fix\|High\|Bug\): */ + /p' | sed \
-e 's/\(cib\|pacemaker-based\|based\):/CIB:/' \
@@ -58,7 +63,7 @@ changes: summary
-e 's/\(PE\|pengine\|pacemaker-schedulerd\|schedulerd\):/scheduler:/' \
| sort -uf
@printf "\n- Public API changes since $(LAST_RELEASE)\n"
- @git log --pretty=format:'%s' --no-merges \
+ @"$(GIT)" log --pretty=format:'%s' --no-merges \
--abbrev-commit $(LAST_RELEASE)..HEAD \
| sed -n -e 's/^ *API: */ + /p' | sort -uf
@@ -71,7 +76,7 @@ changelog: require_last_release
.PHONY: authors
authors: require_last_release
- git log $(LAST_RELEASE)..$(COMMIT) --format='%an' | sort -u
+ "$(GIT)" log $(LAST_RELEASE)..$(COMMIT) --format='%an' | sort -u
#
# gnulib updates
@@ -91,17 +96,22 @@ gnulib-update:
@echo 'Pacemaker cannot update until minimum supported automake is 1.14'
@exit 1
if test -e gnulib; then \
- cd gnulib && git pull; \
+ cd gnulib && "$(GIT)" pull; \
else \
- git clone https://git.savannah.gnu.org/git/gnulib.git gnulib \
- && cd gnulib && git config pull.rebase false; \
+ "$(GIT)" clone https://git.savannah.gnu.org/git/gnulib.git \
+ gnulib \
+ && cd gnulib && "$(GIT)" config pull.rebase false; \
fi
cd $(top_srcdir) && maint/gnulib/gnulib-tool --source-base=lib/gnu \
--lgpl=2 --no-vc-files --no-conditional-dependencies --libtool \
$(GNU_MODS_AVOID:%=--avoid %) --import $(GNU_MODS)
- sed -i -e "s/bundled(gnulib).*/bundled(gnulib) = `date +'%Y%m%d'`/" \
- ../rpm/pacemaker.spec.in
- sed -i -e "s/_GL_EXTERN_INLINE/_GL_INLINE/" \
+ cp -p ../rpm/pacemaker.spec.in ../rpm/pacemaker.spec.in.$$
+ sed -e "s/bundled(gnulib).*/bundled(gnulib) = `date +'%Y%m%d'`/" \
+ ../rpm/pacemaker.spec.in > ../rpm/pacemaker.spec.in.$$
+ mv ../rpm/pacemaker.spec.in.$$ ../rpm/pacemaker.spec.in
+ cp -p ../lib/gnu/md5.c ../lib/gnu/md5.c.$$
+ sed -e "s/_GL_EXTERN_INLINE/_GL_INLINE/" \
-e "s#left_over -= 64;#left_over \&= 63; /* helps static analysis */#" \
-e "s#&ctx->buffer\[16\]#\&(((char *) ctx->buffer)[64]) /* helps static analysis */#" \
- ../lib/gnu/md5.c
+ ../lib/gnu/md5.c > ../lib/gnu/md5.c.$$
+ mv ../lib/gnu/md5.c.$$ ../lib/gnu/md5.c
diff --git a/maint/bumplibs.in b/maint/bumplibs.in
index a142660..ddaa1a9 100644
--- a/maint/bumplibs.in
+++ b/maint/bumplibs.in
@@ -1,6 +1,6 @@
#!@BASH_PATH@
#
-# Copyright 2012-2021 the Pacemaker project contributors
+# Copyright 2012-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -27,7 +27,7 @@ yesno() {
local RESPONSE
read -p "$1 " RESPONSE
- case $(echo "$RESPONSE" | tr A-Z a-z) in
+ case $(echo "$RESPONSE" | tr '[:upper:]' '[:lower:]') in
y|yes|ano|ja|si|oui) return 0 ;;
*) return 1 ;;
esac
@@ -37,8 +37,14 @@ prompt_to_continue() {
yesno "Continue?" || exit 0
}
+sed_in_place() {
+ cp -p "$1" "$1.$$"
+ sed -e "$2" "$1" > "$1.$$"
+ mv "$1.$$" "$1"
+}
+
find_last_release() {
- if [ ! -z "$1" ]; then
+ if [ -n "$1" ]; then
echo "$1"
else
git tag -l | grep Pacemaker | grep -v rc | sort -Vr | head -n 1
@@ -63,7 +69,7 @@ find_sources() {
# than backslashed continuation lines, to allow this script to detect
# source files correctly. Warn if that's not the case.
if
- grep "lib${LIB}_la_SOURCES.*\\\\" $AMFILE
+ grep "lib${LIB}_la_SOURCES.*\\\\" "$AMFILE"
then
echo -e "\033[1;35m -- Sources list for lib$LIB is probably truncated! --\033[0m"
echo "Edit to use '+=' rather than backslashed continuation lines"
@@ -75,11 +81,11 @@ find_sources() {
for SOURCE in $SOURCES; do
if
- echo $SOURCE | grep -q "/"
+ echo "$SOURCE" | grep -q "/"
then
echo "$SOURCE"
else
- echo "$(dirname $AMFILE)/$SOURCE"
+ echo "$(dirname "$AMFILE")/$SOURCE"
fi
done
}
@@ -111,7 +117,7 @@ shared_lib_name() {
local LIB="$1"
local VERSION="$2"
- echo "lib${LIB}.so.$(echo $VERSION | cut -d: -f 1)"
+ echo "lib${LIB}.so.$(echo "$VERSION" | cut -d: -f 1)"
}
process_lib() {
@@ -135,11 +141,10 @@ process_lib() {
AMFILE="$(find_makefile "$LIB")"
# Get current shared library version
- VER_NOW=$(cat $AMFILE | extract_version $LIB)
+ VER_NOW=$(extract_version "$LIB" < "$AMFILE")
# Check whether library existed at last release
- git cat-file -e $LAST_RELEASE:$AMFILE 2>/dev/null
- if [ $? -ne 0 ]; then
+ if ! git cat-file -e "$LAST_RELEASE:$AMFILE" 2>/dev/null; then
echo "lib$LIB is new, not changing version ($VER_NOW)"
prompt_to_continue
echo ""
@@ -158,7 +163,7 @@ process_lib() {
DEFAULT_CHANGE="i" # Removed public header is incompatible change
elif [ -n "$HEADERS_ADDED" ]; then
DEFAULT_CHANGE="c" # Additions are likely compatible
- elif git diff --quiet -w $LAST_RELEASE..HEAD $HEADERS_HEAD $SOURCES ; then
+ elif git diff --quiet -w "$LAST_RELEASE..HEAD" $HEADERS_HEAD $SOURCES ; then
echo "No changes to $LIB interface"
prompt_to_continue
echo ""
@@ -179,12 +184,12 @@ process_lib() {
echo "++ $HEADER is new"
done
fi
- git --no-pager diff --color -w $LAST_RELEASE..HEAD $HEADERS_HEAD
+ git --no-pager diff --color -w "$LAST_RELEASE..HEAD" $HEADERS_HEAD
echo ""
if yesno "Show commits (minus refactor/build/merge) touching lib$LIB since $LAST_RELEASE [y/N]?"
then
- git log --color $LAST_RELEASE..HEAD -z $HEADERS_HEAD $SOURCES $AMFILE \
+ git log --color "$LAST_RELEASE..HEAD" -z $HEADERS_HEAD $SOURCES "$AMFILE" \
| grep -vzE "Refactor:|Build:|Merge pull request"
echo
prompt_to_continue
@@ -203,7 +208,7 @@ process_lib() {
echo ""
echo "- Headers: $HEADERS_HEAD"
echo "- Changed sources since $LAST_RELEASE:"
- git --no-pager diff --color -w $LAST_RELEASE..HEAD --stat $SOURCES
+ git --no-pager diff --color -w "$LAST_RELEASE..HEAD" --stat $SOURCES
echo ""
# Ask for human guidance
@@ -212,14 +217,14 @@ process_lib() {
[ -z "$CHANGE" ] && CHANGE="$DEFAULT_CHANGE"
# Get (and show) shared library version at last release
- VER=$(git show $LAST_RELEASE:$AMFILE | extract_version $LIB)
- VER_1=$(echo $VER | awk -F: '{print $1}')
- VER_2=$(echo $VER | awk -F: '{print $2}')
- VER_3=$(echo $VER | awk -F: '{print $3}')
+ VER=$(git show "$LAST_RELEASE:$AMFILE" | extract_version "$LIB")
+ VER_1=$(echo "$VER" | awk -F: '{print $1}')
+ VER_2=$(echo "$VER" | awk -F: '{print $2}')
+ VER_3=$(echo "$VER" | awk -F: '{print $3}')
echo "lib$LIB version at $LAST_RELEASE: $VER"
# Show current shared library version if changed
- if [ $VER_NOW != $VER ]; then
+ if [ "$VER_NOW" != "$VER" ]; then
echo "lib$LIB version currently: $VER_NOW"
fi
@@ -227,25 +232,25 @@ process_lib() {
case $CHANGE in
i|I)
echo "New backwards-incompatible version: x+1:0:0"
- VER_1=$(expr $VER_1 + 1)
+ (( VER_1++ ))
VER_2=0
VER_3=0
# Some headers define constants for shared library names,
# update them if the name changed
for H in $HEADERS_HEAD; do
- sed -i -e "s/$(shared_lib_name "$LIB" "$VER_NOW")/$(shared_lib_name "$LIB" "$VER_1:0:0")/" $H
+ sed_in_place "$H" "s/$(shared_lib_name "$LIB" "$VER_NOW")/$(shared_lib_name "$LIB" "$VER_1:0:0")/"
done
;;
c|C)
echo "New version with backwards-compatible extensions: x+1:0:z+1"
- VER_1=$(expr $VER_1 + 1)
+ (( VER_1++ ))
VER_2=0
- VER_3=$(expr $VER_3 + 1)
+ (( VER_3++ ))
;;
F|f)
echo "Code changed though interfaces didn't: x:y+1:z"
- VER_2=$(expr $VER_2 + 1)
+ (( VER_2++ ))
;;
*)
echo "Not updating lib$LIB version"
@@ -255,11 +260,11 @@ process_lib() {
esac
VER_NEW=$VER_1:$VER_2:$VER_3
- if [ ! -z $CHANGE ]; then
+ if [ -n "$CHANGE" ]; then
if [ "$VER_NEW" != "$VER_NOW" ]; then
echo "Updating lib$LIB version from $VER_NOW to $VER_NEW"
prompt_to_continue
- sed -i "s/version-info\s*$VER_NOW/version-info $VER_NEW/" $AMFILE
+ sed_in_place "$AMFILE" "s/version-info\s*$VER_NOW/version-info $VER_NEW/"
else
echo "No version change needed for lib$LIB"
prompt_to_continue
diff --git a/mk/common.mk b/mk/common.mk
index ac360cc..4c92a0f 100644
--- a/mk/common.mk
+++ b/mk/common.mk
@@ -1,5 +1,5 @@
#
-# Copyright 2014-2021 the Pacemaker project contributors
+# Copyright 2014-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -36,5 +36,7 @@ am__v_BOOK_1 =
MAINTAINERCLEANFILES = Makefile.in
-AM_CPPFLAGS = -I$(top_builddir)/include -I$(top_srcdir)/include \
- -I$(top_builddir)/libltdl -I$(top_srcdir)/libltdl
+AM_CPPFLAGS = -I$(top_builddir)/include \
+ -I$(top_srcdir)/include \
+ -I$(top_builddir)/libltdl \
+ -I$(top_srcdir)/libltdl
diff --git a/mk/release.mk b/mk/release.mk
index 7d7259c..b10bcf0 100644
--- a/mk/release.mk
+++ b/mk/release.mk
@@ -1,5 +1,5 @@
#
-# Copyright 2008-2022 the Pacemaker project contributors
+# Copyright 2008-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -13,10 +13,12 @@ COMMIT ?= HEAD
# TAG defaults to DIST when in a source distribution instead of a git checkout,
# the tag name if COMMIT is tagged, and the full commit ID otherwise.
-TAG ?= $(shell \
- T=$$(git describe --tags --exact-match '$(COMMIT)' 2>/dev/null); \
- [ -n "$${T}" ] && echo "$${T}" \
- || git log --pretty=format:%H -n 1 '$(COMMIT)' 2>/dev/null \
+TAG ?= $(shell \
+ T=$$("$(GIT)" describe --tags --exact-match '$(COMMIT)' \
+ 2>/dev/null); \
+ [ -n "$${T}" ] && echo "$${T}" \
+ || "$(GIT)" log --pretty=format:%H -n 1 '$(COMMIT)' \
+ 2>/dev/null \
|| echo DIST)
# If DIRTY=anything is passed to make, generated versions will end in ".mod"
@@ -24,7 +26,8 @@ TAG ?= $(shell \
# default.
DIRTY_EXT = $(shell [ -n "$(DIRTY)" ] \
&& [ "$(COMMIT)" == "HEAD" ] \
- && ! git diff-index --quiet HEAD -- 2>/dev/null \
+ && ! "$(GIT)" diff-index --quiet HEAD -- \
+ 2>/dev/null \
&& echo .mod)
# These can be used in case statements to avoid make interpreting parentheses
@@ -32,14 +35,14 @@ lparen = (
rparen = )
# This will be empty if not in a git checkout
-CHECKOUT = $(shell git rev-parse --git-dir 2>/dev/null)
+CHECKOUT = $(shell "$(GIT)" rev-parse --git-dir 2>/dev/null)
# VERSION is set by configure, but we allow some make targets to be run without
# running configure first, so set a reasonable default in that case.
VERSION ?= $(shell if [ -z "$(CHECKOUT)" ]; then \
echo 0.0.0; \
else \
- git tag -l \
+ "$(GIT)" tag -l \
| sed -n -e 's/^\(Pacemaker-[0-9.]*\)$$/\1/p' \
| sort -Vr | head -n 1; \
fi)
@@ -83,5 +86,5 @@ top_distdir = $(PACKAGE)-$(shell \
Pacemaker-*$(rparen) \
echo '$(TAG)' | cut -c11-;; \
*$(rparen) \
- git log --pretty=format:%h -n 1 '$(TAG)';; \
+ "$(GIT)" log --pretty=format:%h -n 1 '$(TAG)';; \
esac)$(DIRTY_EXT)
diff --git a/mk/tap.mk b/mk/tap.mk
index da67813..fd6d4e2 100644
--- a/mk/tap.mk
+++ b/mk/tap.mk
@@ -1,5 +1,5 @@
#
-# Copyright 2021-2022 the Pacemaker project contributors
+# Copyright 2021-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -7,9 +7,9 @@
# or later (GPLv2+) WITHOUT ANY WARRANTY.
#
-AM_TESTS_ENVIRONMENT= \
- G_DEBUG=gc-friendly \
- MALLOC_CHECK_=2 \
+AM_TESTS_ENVIRONMENT= \
+ G_DEBUG=gc-friendly \
+ MALLOC_CHECK_=2 \
MALLOC_PERTURB_=$$(($${RANDOM:-256} % 256))
LOG_DRIVER = env AM_TAP_AWK='$(AWK)' $(SHELL) $(top_srcdir)/tests/tap-driver.sh
LOG_COMPILER = $(top_srcdir)/tests/tap-test
@@ -28,4 +28,9 @@ WRAPPED = calloc \
strdup \
uname \
unsetenv
+
+if WRAPPABLE_FOPEN64
+WRAPPED += fopen64
+endif
+
LDFLAGS_WRAP = $(foreach fn,$(WRAPPED),-Wl,--wrap=$(fn))
diff --git a/po/zh_CN.po b/po/zh_CN.po
index a107f0b..93249f8 100644
--- a/po/zh_CN.po
+++ b/po/zh_CN.po
@@ -11,7 +11,7 @@ msgid ""
msgstr ""
"Project-Id-Version: Pacemaker 2\n"
"Report-Msgid-Bugs-To: developers@clusterlabs.org\n"
-"POT-Creation-Date: 2023-04-05 16:20-0500\n"
+"POT-Creation-Date: 2023-12-05 15:13+0800\n"
"PO-Revision-Date: 2021-11-08 11:04+0800\n"
"Last-Translator: Vivi <developers@clusterlabs.org>\n"
"Language-Team: CHINESE <wangluwei@uniontech.org>\n"
@@ -20,29 +20,29 @@ msgstr ""
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-#: daemons/controld/controld_control.c:533
+#: daemons/controld/controld_control.c:526
msgid "Pacemaker version on cluster node elected Designated Controller (DC)"
msgstr "集群选定的控制器节点(DC)的 Pacemaker 版本"
-#: daemons/controld/controld_control.c:534
+#: daemons/controld/controld_control.c:527
msgid ""
"Includes a hash which identifies the exact changeset the code was built "
"from. Used for diagnostic purposes."
msgstr "它包å«ä¸€ä¸ªæ ‡è¯†æ‰€æž„建代ç å˜æ›´ç‰ˆæœ¬çš„哈希值,其å¯ç”¨äºŽè¯Šæ–­ã€‚"
-#: daemons/controld/controld_control.c:539
+#: daemons/controld/controld_control.c:532
msgid "The messaging stack on which Pacemaker is currently running"
msgstr "Pacemaker 正在使用的消æ¯ä¼ è¾“引擎"
-#: daemons/controld/controld_control.c:540
+#: daemons/controld/controld_control.c:533
msgid "Used for informational and diagnostic purposes."
msgstr "用于æ供信æ¯å’Œè¯Šæ–­ã€‚"
-#: daemons/controld/controld_control.c:544
+#: daemons/controld/controld_control.c:537
msgid "An arbitrary name for the cluster"
msgstr "ä»»æ„的集群å称"
-#: daemons/controld/controld_control.c:545
+#: daemons/controld/controld_control.c:538
msgid ""
"This optional value is mostly for users' convenience as desired in "
"administration, but may also be used in Pacemaker configuration rules via "
@@ -52,17 +52,17 @@ msgstr ""
"该å¯é€‰å€¼ä¸»è¦æ˜¯ä¸ºäº†æ–¹ä¾¿ç”¨æˆ·ç®¡ç†ä½¿ç”¨ï¼Œä¹Ÿå¯ä»¥åœ¨pacemaker é…置规则中通过 "
"#cluster-name 节点属性é…置使用,也å¯ä»¥é€šè¿‡é«˜çº§å·¥å…·å’Œèµ„æºä»£ç†ä½¿ç”¨ã€‚"
-#: daemons/controld/controld_control.c:553
+#: daemons/controld/controld_control.c:546
msgid "How long to wait for a response from other nodes during start-up"
msgstr "å¯åŠ¨è¿‡ç¨‹ä¸­ç­‰å¾…其他节点å“应的时间"
-#: daemons/controld/controld_control.c:554
+#: daemons/controld/controld_control.c:547
msgid ""
"The optimal value will depend on the speed and load of your network and the "
"type of switches used."
msgstr "其最佳值将å–决于你的网络速度和负载以åŠæ‰€ç”¨äº¤æ¢æœºçš„类型。"
-#: daemons/controld/controld_control.c:559
+#: daemons/controld/controld_control.c:552
msgid ""
"Zero disables polling, while positive values are an interval in "
"seconds(unless other units are specified, for example \"5min\")"
@@ -70,13 +70,13 @@ msgstr ""
"设置为0å°†ç¦ç”¨è½®è¯¢ï¼Œè®¾ç½®ä¸ºæ­£æ•°å°†æ˜¯ä»¥ç§’为å•ä½çš„时间间隔(除éžä½¿ç”¨äº†å…¶ä»–å•ä½ï¼Œæ¯”"
"如\"5min\"表示5分钟)"
-#: daemons/controld/controld_control.c:562
+#: daemons/controld/controld_control.c:555
msgid ""
"Polling interval to recheck cluster state and evaluate rules with date "
"specifications"
msgstr "é‡æ–°æ£€æŸ¥é›†ç¾¤çŠ¶æ€å¹¶ä¸”评估具有日期规格的é…置规则的轮询间隔"
-#: daemons/controld/controld_control.c:564
+#: daemons/controld/controld_control.c:557
msgid ""
"Pacemaker is primarily event-driven, and looks ahead to know when to recheck "
"cluster state for failure timeouts and most time-based rules. However, it "
@@ -89,26 +89,26 @@ msgstr ""
"å°†é‡æ–°æ£€æŸ¥é›†ç¾¤ï¼Œä»¥è¯„估具有日期规格的规则,并为æŸäº›ç±»åž‹çš„调度程åºç¼ºé™·æ供故障"
"ä¿æŠ¤ã€‚"
-#: daemons/controld/controld_control.c:573
+#: daemons/controld/controld_control.c:566
msgid "Maximum amount of system load that should be used by cluster nodes"
msgstr "集群节点应该使用的最大系统负载é‡"
-#: daemons/controld/controld_control.c:574
+#: daemons/controld/controld_control.c:567
msgid ""
"The cluster will slow down its recovery process when the amount of system "
"resources used (currently CPU) approaches this limit"
msgstr "当使用的系统资æºé‡ï¼ˆå½“å‰ä¸ºCPU)接近此é™åˆ¶æ—¶ï¼Œé›†ç¾¤å°†å‡æ…¢å…¶æ¢å¤è¿‡ç¨‹"
-#: daemons/controld/controld_control.c:580
+#: daemons/controld/controld_control.c:573
msgid ""
"Maximum number of jobs that can be scheduled per node (defaults to 2x cores)"
msgstr "æ¯ä¸ªèŠ‚点å¯ä»¥è°ƒåº¦çš„最大作业数(默认为2x内核数)"
-#: daemons/controld/controld_control.c:584
+#: daemons/controld/controld_control.c:577
msgid "How a cluster node should react if notified of its own fencing"
msgstr "集群节点在收到针对自己的 fence æ“作结果通知时应如何å应"
-#: daemons/controld/controld_control.c:585
+#: daemons/controld/controld_control.c:578
msgid ""
"A cluster node may receive notification of its own fencing if fencing is "
"misconfigured, or if fabric fencing is in use that doesn't cut cluster "
@@ -121,7 +121,7 @@ msgstr ""
"æ­¢ pacemaker 并ä¿æŒåœç”¨çŠ¶æ€,或者 \"panic\" å°è¯•ç«‹å³é‡æ–°å¯åŠ¨æœ¬åœ°èŠ‚点,并在失败"
"时返回执行stop。"
-#: daemons/controld/controld_control.c:595
+#: daemons/controld/controld_control.c:588
msgid ""
"Declare an election failed if it is not decided within this much time. If "
"you need to adjust this value, it probably indicates the presence of a bug."
@@ -129,7 +129,7 @@ msgstr ""
"如果集群在本项设置时间内没有作出决定则宣布选举失败。如果您需è¦è°ƒæ•´è¯¥å€¼ï¼Œè¿™å¯"
"能代表存在æŸäº›ç¼ºé™·ã€‚"
-#: daemons/controld/controld_control.c:603
+#: daemons/controld/controld_control.c:596
msgid ""
"Exit immediately if shutdown does not complete within this much time. If you "
"need to adjust this value, it probably indicates the presence of a bug."
@@ -137,20 +137,20 @@ msgstr ""
"如果在这段时间内关机ä»æœªå®Œæˆï¼Œåˆ™ç«‹å³é€€å‡ºã€‚如果您需è¦è°ƒæ•´è¯¥å€¼ï¼Œè¿™å¯èƒ½ä»£è¡¨å­˜åœ¨"
"æŸäº›ç¼ºé™·ã€‚"
+#: daemons/controld/controld_control.c:604
#: daemons/controld/controld_control.c:611
-#: daemons/controld/controld_control.c:618
msgid ""
"If you need to adjust this value, it probably indicates the presence of a "
"bug."
msgstr "如果您需è¦è°ƒæ•´è¯¥å€¼ï¼Œè¿™å¯èƒ½ä»£è¡¨å­˜åœ¨æŸäº›ç¼ºé™·ã€‚"
-#: daemons/controld/controld_control.c:624
+#: daemons/controld/controld_control.c:617
msgid ""
"*** Advanced Use Only *** Enabling this option will slow down cluster "
"recovery under all conditions"
msgstr "*** Advanced Use Only *** å¯ç”¨æ­¤é€‰é¡¹å°†åœ¨æ‰€æœ‰æƒ…况下å‡æ…¢é›†ç¾¤æ¢å¤çš„速度"
-#: daemons/controld/controld_control.c:626
+#: daemons/controld/controld_control.c:619
msgid ""
"Delay cluster recovery for this much time to allow for additional events to "
"occur. Useful if your configuration is sensitive to the order in which ping "
@@ -159,7 +159,7 @@ msgstr ""
"集群æ¢å¤å°†è¢«æŽ¨è¿ŸæŒ‡å®šçš„时间间隔,以等待更多事件å‘生。如果您的é…置对 ping æ›´æ–°"
"到达的顺åºå¾ˆæ•æ„Ÿï¼Œè¿™å°±å¾ˆæœ‰ç”¨"
-#: daemons/controld/controld_control.c:633
+#: daemons/controld/controld_control.c:626
#, fuzzy
msgid ""
"How long before nodes can be assumed to be safely down when watchdog-based "
@@ -168,7 +168,7 @@ msgstr ""
"当基于 watchdog 的自我 fence 机制通过SBD 被执行时,我们å¯ä»¥å‡è®¾èŠ‚点安全关闭之"
"å‰éœ€è¦ç­‰å¾…多长时间"
-#: daemons/controld/controld_control.c:635
+#: daemons/controld/controld_control.c:628
msgid ""
"If this is set to a positive value, lost nodes are assumed to self-fence "
"using watchdog-based SBD within this much time. This does not require a "
@@ -193,21 +193,21 @@ msgstr ""
"Pacemaker将拒ç»åœ¨ä»»ä½•èŠ‚点上å¯åŠ¨ã€‚如果设置为负值,则在使用SBD的所有节点上,"
"`SBD_WATCHDOG_TIMEOUT`必须设置为相åŒçš„值,å¦åˆ™å¯èƒ½ä¼šå‘生数æ®æŸå或丢失。"
-#: daemons/controld/controld_control.c:654
+#: daemons/controld/controld_control.c:647
msgid ""
"How many times fencing can fail before it will no longer be immediately re-"
"attempted on a target"
msgstr "fenceæ“作失败多少次会åœæ­¢ç«‹å³å°è¯•"
-#: daemons/controld/controld_control.c:662 lib/pengine/common.c:39
+#: daemons/controld/controld_control.c:655 lib/pengine/common.c:40
msgid "What to do when the cluster does not have quorum"
msgstr "当集群没有必需票数时该如何作"
-#: daemons/controld/controld_control.c:667 lib/pengine/common.c:73
+#: daemons/controld/controld_control.c:660 lib/pengine/common.c:74
msgid "Whether to lock resources to a cleanly shut down node"
msgstr "是å¦é”定资æºåˆ°å®Œå…¨å…³é—­çš„节点"
-#: daemons/controld/controld_control.c:668 lib/pengine/common.c:74
+#: daemons/controld/controld_control.c:661 lib/pengine/common.c:75
msgid ""
"When true, resources active on a node when it is cleanly shut down are kept "
"\"locked\" to that node (not allowed to run elsewhere) until they start "
@@ -221,11 +221,11 @@ msgstr ""
"设置)。 Stonith资æºå’ŒPacemaker Remote连接永远ä¸ä¼šè¢«é”定。 克隆和æ†ç»‘实例以åŠ"
"å¯å‡çº§å…‹éš†çš„主角色目å‰ä»Žæœªé”定,尽管å¯ä»¥åœ¨å°†æ¥çš„å‘行版中添加支æŒã€‚"
-#: daemons/controld/controld_control.c:680 lib/pengine/common.c:86
+#: daemons/controld/controld_control.c:673 lib/pengine/common.c:87
msgid "Do not lock resources to a cleanly shut down node longer than this"
msgstr "资æºä¼šè¢«é”定到完全关闭的节点的最长时间"
-#: daemons/controld/controld_control.c:682 lib/pengine/common.c:88
+#: daemons/controld/controld_control.c:675 lib/pengine/common.c:89
msgid ""
"If shutdown-lock is true and this is set to a nonzero time duration, "
"shutdown locks will expire after this much time has passed since the "
@@ -234,11 +234,28 @@ msgstr ""
"如果shutdown-lock为true,并且将此选项设置为éžé›¶æŒç»­æ—¶é—´ï¼Œåˆ™è‡ªä»Žå¼€å§‹shutdown以"
"æ¥ç»è¿‡äº†è¿™ä¹ˆé•¿çš„时间åŽï¼Œshutdowné”将过期,å³ä½¿è¯¥èŠ‚点尚未é‡æ–°åŠ å…¥ã€‚"
-#: daemons/fenced/pacemaker-fenced.c:1379
+#: daemons/controld/controld_control.c:683 lib/pengine/common.c:164
+msgid ""
+"How long to wait for a node that has joined the cluster to join the "
+"controller process group"
+msgstr ""
+"等待已加入集群的节点加入控制器进程组的时间"
+
+#: daemons/controld/controld_control.c:685 lib/pengine/common.c:166
+msgid ""
+"Fence nodes that do not join the controller process group within this much "
+"time after joining the cluster, to allow the cluster to continue managing "
+"resources. A value of 0 means never fence pending nodes. Setting the value "
+"to 2h means fence nodes after 2 hours."
+msgstr ""
+"如果节点加入集群åŽåœ¨æ­¤æ—¶é—´å†…ä¸åŠ å…¥æŽ§åˆ¶å™¨è¿›ç¨‹ç»„,Fence该节点,以便群集继续管ç†èµ„æºã€‚"
+"值为0è¡¨ç¤ºæ°¸è¿œä¸ fence 待定节点。将值设置为2h表示2å°æ—¶åŽ fence 待定节点。"
+
+#: daemons/fenced/pacemaker-fenced.c:536
msgid "Advanced use only: An alternate parameter to supply instead of 'port'"
msgstr "仅高级使用:使用替代的å‚æ•°å,而ä¸æ˜¯'port'"
-#: daemons/fenced/pacemaker-fenced.c:1380
+#: daemons/fenced/pacemaker-fenced.c:537
msgid ""
"some devices do not support the standard 'port' parameter or may provide "
"additional ones. Use this to specify an alternate, device-specific, "
@@ -249,34 +266,34 @@ msgstr ""
"设备专用的å‚æ•°å,该å‚数用于标识需è¦fence的机器。值noneå¯ä»¥ç”¨äºŽå‘Šè¯‰é›†ç¾¤ä¸è¦æ"
"供任何其他的å‚数。"
-#: daemons/fenced/pacemaker-fenced.c:1389
+#: daemons/fenced/pacemaker-fenced.c:546
msgid ""
"A mapping of host names to ports numbers for devices that do not support "
"host names."
msgstr "为ä¸æ”¯æŒä¸»æœºå的设备æ供主机å到端å£å·çš„映射。"
-#: daemons/fenced/pacemaker-fenced.c:1390
+#: daemons/fenced/pacemaker-fenced.c:547
msgid ""
"Eg. node1:1;node2:2,3 would tell the cluster to use port 1 for node1 and "
"ports 2 and 3 for node2"
msgstr ""
"例如 node1:1;node2:2,3,将会告诉集群对node1使用端å£1,对node2使用端å£2å’Œ3 "
-#: daemons/fenced/pacemaker-fenced.c:1394
+#: daemons/fenced/pacemaker-fenced.c:551
msgid "Eg. node1,node2,node3"
msgstr "例如 node1,node2,node3"
-#: daemons/fenced/pacemaker-fenced.c:1395
+#: daemons/fenced/pacemaker-fenced.c:552
msgid ""
"A list of machines controlled by this device (Optional unless "
"pcmk_host_list=static-list)"
msgstr "该设备控制的机器列表(å¯é€‰å‚æ•°ï¼Œé™¤éž pcmk_host_list 设置为 static-list)"
-#: daemons/fenced/pacemaker-fenced.c:1400
+#: daemons/fenced/pacemaker-fenced.c:557
msgid "How to determine which machines are controlled by the device."
msgstr "如何确定设备控制哪些机器。"
-#: daemons/fenced/pacemaker-fenced.c:1401
+#: daemons/fenced/pacemaker-fenced.c:558
msgid ""
"Allowed values: dynamic-list (query the device via the 'list' command), "
"static-list (check the pcmk_host_list attribute), status (query the device "
@@ -286,12 +303,11 @@ msgstr ""
"pcmk_host_list属性),status(通过'status'命令查询设备),none(å‡è®¾æ¯ä¸ªè®¾å¤‡"
"都å¯fence æ¯å°æœºå™¨ )"
-#: daemons/fenced/pacemaker-fenced.c:1410
-#: daemons/fenced/pacemaker-fenced.c:1419
+#: daemons/fenced/pacemaker-fenced.c:567 daemons/fenced/pacemaker-fenced.c:576
msgid "Enable a base delay for fencing actions and specify base delay value."
msgstr "在执行 fencing æ“作å‰å¯ç”¨ä¸è¶…过指定时间的延迟。"
-#: daemons/fenced/pacemaker-fenced.c:1411
+#: daemons/fenced/pacemaker-fenced.c:568
msgid ""
"Enable a delay of no more than the time specified before executing fencing "
"actions. Pacemaker derives the overall delay by taking the value of "
@@ -302,7 +318,7 @@ msgstr ""
"pcmk_delay_base的值并添加éšæœºå»¶è¿Ÿå€¼æ¥å¾—出总体延迟,从而使总和ä¿æŒåœ¨æ­¤æœ€å¤§å€¼ä»¥"
"下。"
-#: daemons/fenced/pacemaker-fenced.c:1421
+#: daemons/fenced/pacemaker-fenced.c:578
msgid ""
"This enables a static delay for fencing actions, which can help avoid "
"\"death matches\" where two nodes try to fence each other at the same time. "
@@ -318,12 +334,12 @@ msgstr ""
"目标分别é…置了å„自的设备的情况), 或ç€è®¾ç½®ä¸ºä¸€ä¸ªèŠ‚点映射 (例如,\"node1:1s;"
"node2:5\")从而为æ¯ä¸ªç›®æ ‡è®¾ç½®ä¸åŒå€¼ã€‚"
-#: daemons/fenced/pacemaker-fenced.c:1433
+#: daemons/fenced/pacemaker-fenced.c:590
msgid ""
"The maximum number of actions can be performed in parallel on this device"
msgstr "å¯ä»¥åœ¨è¯¥è®¾å¤‡ä¸Šå¹¶å‘执行的最多æ“作数é‡"
-#: daemons/fenced/pacemaker-fenced.c:1434
+#: daemons/fenced/pacemaker-fenced.c:591
msgid ""
"Cluster property concurrent-fencing=true needs to be configured first.Then "
"use this to specify the maximum number of actions can be performed in "
@@ -332,11 +348,11 @@ msgstr ""
"需è¦é¦–å…ˆé…置集群属性 concurrent-fencing=true 。然åŽä½¿ç”¨æ­¤å‚数指定å¯ä»¥åœ¨è¯¥è®¾å¤‡"
"上并å‘执行的最多æ“作数é‡ã€‚ -1 代表没有é™åˆ¶"
-#: daemons/fenced/pacemaker-fenced.c:1439
+#: daemons/fenced/pacemaker-fenced.c:597
msgid "Advanced use only: An alternate command to run instead of 'reboot'"
msgstr "仅高级使用:è¿è¡Œæ›¿ä»£å‘½ä»¤ï¼Œè€Œä¸æ˜¯'reboot'"
-#: daemons/fenced/pacemaker-fenced.c:1440
+#: daemons/fenced/pacemaker-fenced.c:598
msgid ""
"Some devices do not support the standard commands or may provide additional "
"ones.\n"
@@ -346,13 +362,13 @@ msgstr ""
"一些设备ä¸æ”¯æŒæ ‡å‡†å‘½ä»¤æˆ–å¯èƒ½æ供其他命令,使用此选项å¯ä»¥æŒ‡å®šä¸€ä¸ªè¯¥è®¾å¤‡ç‰¹å®šçš„"
"替代命令,用æ¥å®žçŽ°'reboot'æ“作。"
-#: daemons/fenced/pacemaker-fenced.c:1445
+#: daemons/fenced/pacemaker-fenced.c:603
msgid ""
"Advanced use only: Specify an alternate timeout to use for reboot actions "
"instead of stonith-timeout"
msgstr "仅高级使用:指定用于'reboot' æ“作的替代超时,而ä¸æ˜¯stonith-timeout"
-#: daemons/fenced/pacemaker-fenced.c:1446
+#: daemons/fenced/pacemaker-fenced.c:604
msgid ""
"Some devices need much more/less time to complete than normal.Use this to "
"specify an alternate, device-specific, timeout for 'reboot' actions."
@@ -360,13 +376,13 @@ msgstr ""
"一些设备需è¦æ¯”正常情况下更多或更少的时间æ¥å®Œæˆæ“作,使用此选项指定一个用"
"于'reboot'æ“作的该设备特定的替代超时。"
-#: daemons/fenced/pacemaker-fenced.c:1451
+#: daemons/fenced/pacemaker-fenced.c:609
msgid ""
"Advanced use only: The maximum number of times to retry the 'reboot' command "
"within the timeout period"
msgstr "仅高级使用:在超时å‰é‡è¯•'reboot'命令的最大次数"
-#: daemons/fenced/pacemaker-fenced.c:1452
+#: daemons/fenced/pacemaker-fenced.c:610
msgid ""
"Some devices do not support multiple connections. Operations may 'fail' if "
"the device is busy with another task so Pacemaker will automatically retry "
@@ -377,11 +393,11 @@ msgstr ""
"Pacemaker将自动é‡è¯•ï¼ˆå¦‚果时间å…许)。 使用此选项更改Pacemaker在放弃之å‰é‡"
"试'reboot' æ“作的次数."
-#: daemons/fenced/pacemaker-fenced.c:1458
+#: daemons/fenced/pacemaker-fenced.c:617
msgid "Advanced use only: An alternate command to run instead of 'off'"
msgstr "仅高级使用:è¿è¡Œæ›¿ä»£å‘½ä»¤ï¼Œè€Œä¸æ˜¯'off'"
-#: daemons/fenced/pacemaker-fenced.c:1459
+#: daemons/fenced/pacemaker-fenced.c:618
msgid ""
"Some devices do not support the standard commands or may provide additional "
"ones.Use this to specify an alternate, device-specific, command that "
@@ -390,13 +406,13 @@ msgstr ""
"一些设备ä¸æ”¯æŒæ ‡å‡†å‘½ä»¤æˆ–å¯èƒ½æ供其他命令,使用此选项å¯æŒ‡å®šä¸€ä¸ªè¯¥è®¾å¤‡ä¸“用的替代"
"命令,用æ¥å®žçŽ°'off'æ“作。"
-#: daemons/fenced/pacemaker-fenced.c:1464
+#: daemons/fenced/pacemaker-fenced.c:623
msgid ""
"Advanced use only: Specify an alternate timeout to use for off actions "
"instead of stonith-timeout"
msgstr "仅高级使用:指定用于off æ“作的替代超时,而ä¸æ˜¯stonith-timeout"
-#: daemons/fenced/pacemaker-fenced.c:1465
+#: daemons/fenced/pacemaker-fenced.c:624
msgid ""
"Some devices need much more/less time to complete than normal.Use this to "
"specify an alternate, device-specific, timeout for 'off' actions."
@@ -404,13 +420,13 @@ msgstr ""
"一些设备需è¦æ¯”正常情况下更多或更少的时间æ¥å®Œæˆæ“作,使用此选项指定一个用"
"于'off'æ“作的该设备特定的替代超时。"
-#: daemons/fenced/pacemaker-fenced.c:1470
+#: daemons/fenced/pacemaker-fenced.c:629
msgid ""
"Advanced use only: The maximum number of times to retry the 'off' command "
"within the timeout period"
msgstr "仅高级使用:在超时å‰é‡è¯•'off'命令的最大次数"
-#: daemons/fenced/pacemaker-fenced.c:1471
+#: daemons/fenced/pacemaker-fenced.c:630
msgid ""
"Some devices do not support multiple connections. Operations may 'fail' if "
"the device is busy with another task so Pacemaker will automatically retry "
@@ -421,11 +437,11 @@ msgstr ""
"Pacemaker将自动é‡è¯•ï¼ˆå¦‚果时间å…许)。 使用此选项更改Pacemaker在放弃之å‰é‡"
"试'off' æ“作的次数."
-#: daemons/fenced/pacemaker-fenced.c:1477
+#: daemons/fenced/pacemaker-fenced.c:637
msgid "Advanced use only: An alternate command to run instead of 'on'"
msgstr "仅高级使用:è¿è¡Œæ›¿ä»£å‘½ä»¤ï¼Œè€Œä¸æ˜¯'on'"
-#: daemons/fenced/pacemaker-fenced.c:1478
+#: daemons/fenced/pacemaker-fenced.c:638
msgid ""
"Some devices do not support the standard commands or may provide additional "
"ones.Use this to specify an alternate, device-specific, command that "
@@ -434,13 +450,13 @@ msgstr ""
"一些设备ä¸æ”¯æŒæ ‡å‡†å‘½ä»¤æˆ–å¯èƒ½æ供其他命令,使用此选项å¯æŒ‡å®šä¸€ä¸ªè¯¥è®¾å¤‡ç‰¹å®šçš„替"
"代命令,用æ¥å®žçŽ°'on'æ“作。"
-#: daemons/fenced/pacemaker-fenced.c:1483
+#: daemons/fenced/pacemaker-fenced.c:643
msgid ""
"Advanced use only: Specify an alternate timeout to use for on actions "
"instead of stonith-timeout"
msgstr "仅高级使用:指定用于on æ“作的替代超时,而ä¸æ˜¯stonith-timeout"
-#: daemons/fenced/pacemaker-fenced.c:1484
+#: daemons/fenced/pacemaker-fenced.c:644
msgid ""
"Some devices need much more/less time to complete than normal.Use this to "
"specify an alternate, device-specific, timeout for 'on' actions."
@@ -448,13 +464,13 @@ msgstr ""
"一些设备需è¦æ¯”正常情况下更多或更少的时间æ¥å®Œæˆæ“作,使用此选项指定一个用"
"于'on'æ“作的该设备特定的替代超时。"
-#: daemons/fenced/pacemaker-fenced.c:1489
+#: daemons/fenced/pacemaker-fenced.c:649
msgid ""
"Advanced use only: The maximum number of times to retry the 'on' command "
"within the timeout period"
msgstr "仅高级使用:在超时å‰é‡è¯•'on'命令的最大次数"
-#: daemons/fenced/pacemaker-fenced.c:1490
+#: daemons/fenced/pacemaker-fenced.c:650
msgid ""
"Some devices do not support multiple connections. Operations may 'fail' if "
"the device is busy with another task so Pacemaker will automatically retry "
@@ -465,11 +481,11 @@ msgstr ""
"Pacemaker将自动é‡è¯•ï¼ˆå¦‚果时间å…许)。 使用此选项更改Pacemaker在放弃之å‰é‡"
"试'on' æ“作的次数."
-#: daemons/fenced/pacemaker-fenced.c:1496
+#: daemons/fenced/pacemaker-fenced.c:657
msgid "Advanced use only: An alternate command to run instead of 'list'"
msgstr "仅高级使用:è¿è¡Œæ›¿ä»£å‘½ä»¤ï¼Œè€Œä¸æ˜¯'list'"
-#: daemons/fenced/pacemaker-fenced.c:1497
+#: daemons/fenced/pacemaker-fenced.c:658
msgid ""
"Some devices do not support the standard commands or may provide additional "
"ones.Use this to specify an alternate, device-specific, command that "
@@ -478,13 +494,13 @@ msgstr ""
"一些设备ä¸æ”¯æŒæ ‡å‡†å‘½ä»¤æˆ–å¯èƒ½æ供其他命令,使用此选项å¯æŒ‡å®šä¸€ä¸ªè¯¥è®¾å¤‡ç‰¹å®šçš„替"
"代命令,用æ¥å®žçŽ°'list'æ“作。"
-#: daemons/fenced/pacemaker-fenced.c:1502
+#: daemons/fenced/pacemaker-fenced.c:663
msgid ""
"Advanced use only: Specify an alternate timeout to use for list actions "
"instead of stonith-timeout"
msgstr "仅高级使用:指定用于list æ“作的替代超时,而ä¸æ˜¯stonith-timeout"
-#: daemons/fenced/pacemaker-fenced.c:1503
+#: daemons/fenced/pacemaker-fenced.c:664
msgid ""
"Some devices need much more/less time to complete than normal.Use this to "
"specify an alternate, device-specific, timeout for 'list' actions."
@@ -492,13 +508,13 @@ msgstr ""
"一些设备需è¦æ¯”正常情况下更多或更少的时间æ¥å®Œæˆæ“作,使用此选项指定一个用"
"于'list'æ“作的该设备特定的替代超时。"
-#: daemons/fenced/pacemaker-fenced.c:1508
+#: daemons/fenced/pacemaker-fenced.c:669
msgid ""
"Advanced use only: The maximum number of times to retry the 'list' command "
"within the timeout period"
msgstr "仅高级使用:在超时å‰é‡è¯•'list'命令的最大次数"
-#: daemons/fenced/pacemaker-fenced.c:1509
+#: daemons/fenced/pacemaker-fenced.c:670
msgid ""
"Some devices do not support multiple connections. Operations may 'fail' if "
"the device is busy with another task so Pacemaker will automatically retry "
@@ -509,11 +525,11 @@ msgstr ""
"Pacemaker将自动é‡è¯•ï¼ˆå¦‚果时间å…许)。 使用此选项更改Pacemaker在放弃之å‰é‡"
"试'list' æ“作的次数."
-#: daemons/fenced/pacemaker-fenced.c:1515
+#: daemons/fenced/pacemaker-fenced.c:677
msgid "Advanced use only: An alternate command to run instead of 'monitor'"
msgstr "仅高级使用:è¿è¡Œæ›¿ä»£å‘½ä»¤ï¼Œè€Œä¸æ˜¯'monitor'"
-#: daemons/fenced/pacemaker-fenced.c:1516
+#: daemons/fenced/pacemaker-fenced.c:678
msgid ""
"Some devices do not support the standard commands or may provide additional "
"ones.Use this to specify an alternate, device-specific, command that "
@@ -522,13 +538,13 @@ msgstr ""
"一些设备ä¸æ”¯æŒæ ‡å‡†å‘½ä»¤æˆ–å¯èƒ½æ供其他命令,使用此选项å¯æŒ‡å®šä¸€ä¸ªè¯¥è®¾å¤‡ç‰¹å®šçš„替"
"代命令,用æ¥å®žçŽ°'monitor'æ“作。"
-#: daemons/fenced/pacemaker-fenced.c:1521
+#: daemons/fenced/pacemaker-fenced.c:683
msgid ""
"Advanced use only: Specify an alternate timeout to use for monitor actions "
"instead of stonith-timeout"
msgstr "仅高级使用:指定用于monitor æ“作的替代超时,而ä¸æ˜¯stonith-timeout"
-#: daemons/fenced/pacemaker-fenced.c:1522
+#: daemons/fenced/pacemaker-fenced.c:684
msgid ""
"Some devices need much more/less time to complete than normal.\n"
"Use this to specify an alternate, device-specific, timeout for 'monitor' "
@@ -537,13 +553,13 @@ msgstr ""
"一些设备需è¦æ¯”正常情况下更多或更少的时间æ¥å®Œæˆæ“作,使用此选项指定一个用"
"于'monitor'æ“作的该设备特定的替代超时。"
-#: daemons/fenced/pacemaker-fenced.c:1527
+#: daemons/fenced/pacemaker-fenced.c:689
msgid ""
"Advanced use only: The maximum number of times to retry the 'monitor' "
"command within the timeout period"
msgstr "仅高级使用:在超时å‰é‡è¯•'monitor'命令的最大次数"
-#: daemons/fenced/pacemaker-fenced.c:1528
+#: daemons/fenced/pacemaker-fenced.c:690
msgid ""
"Some devices do not support multiple connections. Operations may 'fail' if "
"the device is busy with another task so Pacemaker will automatically retry "
@@ -554,11 +570,11 @@ msgstr ""
"Pacemaker将自动é‡è¯•ï¼ˆå¦‚果时间å…许)。 使用此选项更改Pacemaker在放弃之å‰é‡"
"试'monitor' æ“作的次数."
-#: daemons/fenced/pacemaker-fenced.c:1534
+#: daemons/fenced/pacemaker-fenced.c:697
msgid "Advanced use only: An alternate command to run instead of 'status'"
msgstr "仅高级使用:è¿è¡Œæ›¿ä»£å‘½ä»¤ï¼Œè€Œä¸æ˜¯'status'"
-#: daemons/fenced/pacemaker-fenced.c:1535
+#: daemons/fenced/pacemaker-fenced.c:698
msgid ""
"Some devices do not support the standard commands or may provide additional "
"ones.Use this to specify an alternate, device-specific, command that "
@@ -567,13 +583,13 @@ msgstr ""
"一些设备ä¸æ”¯æŒæ ‡å‡†å‘½ä»¤æˆ–å¯èƒ½æ供其他命令,使用此选项å¯æŒ‡å®šä¸€ä¸ªè¯¥è®¾å¤‡ç‰¹å®šçš„替"
"代命令,用æ¥å®žçŽ°'status'æ“作。"
-#: daemons/fenced/pacemaker-fenced.c:1540
+#: daemons/fenced/pacemaker-fenced.c:703
msgid ""
"Advanced use only: Specify an alternate timeout to use for status actions "
"instead of stonith-timeout"
msgstr "仅高级使用:指定用于status æ“作的替代超时,而ä¸æ˜¯stonith-timeout"
-#: daemons/fenced/pacemaker-fenced.c:1541
+#: daemons/fenced/pacemaker-fenced.c:704
msgid ""
"Some devices need much more/less time to complete than normal.Use this to "
"specify an alternate, device-specific, timeout for 'status' actions."
@@ -581,13 +597,13 @@ msgstr ""
"一些设备需è¦æ¯”正常情况下更多或更少的时间æ¥å®Œæˆæ“作,使用此选项指定一个用"
"于'status'æ“作的该设备特定的替代超时"
-#: daemons/fenced/pacemaker-fenced.c:1546
+#: daemons/fenced/pacemaker-fenced.c:709
msgid ""
"Advanced use only: The maximum number of times to retry the 'status' command "
"within the timeout period"
msgstr "仅高级使用:在超时å‰é‡è¯•'status'命令的最大次数"
-#: daemons/fenced/pacemaker-fenced.c:1547
+#: daemons/fenced/pacemaker-fenced.c:710
msgid ""
"Some devices do not support multiple connections. Operations may 'fail' if "
"the device is busy with another task so Pacemaker will automatically retry "
@@ -598,11 +614,11 @@ msgstr ""
"Pacemaker将自动é‡è¯•ï¼ˆå¦‚果时间å…许)。 使用此选项更改Pacemaker在放弃之å‰é‡"
"试'status' æ“作的次数."
-#: daemons/fenced/pacemaker-fenced.c:1556
+#: daemons/fenced/pacemaker-fenced.c:719
msgid "Instance attributes available for all \"stonith\"-class resources"
msgstr " å¯ç”¨äºŽæ‰€æœ‰stonith类资æºçš„实例属性"
-#: daemons/fenced/pacemaker-fenced.c:1558
+#: daemons/fenced/pacemaker-fenced.c:721
msgid ""
"Instance attributes available for all \"stonith\"-class resources and used "
"by Pacemaker's fence daemon, formerly known as stonithd"
@@ -610,15 +626,27 @@ msgstr ""
" å¯ç”¨äºŽæ‰€æœ‰stonith类资æºçš„实例属性,并由Pacemakerçš„fence守护程åºä½¿ç”¨ï¼ˆä»¥å‰ç§°"
"为stonithd)"
-#: lib/cib/cib_utils.c:589
+#: daemons/fenced/pacemaker-fenced.c:734
+msgid "Deprecated (will be removed in a future release)"
+msgstr "已弃用(将在未æ¥ç‰ˆæœ¬ä¸­åˆ é™¤)"
+
+#: daemons/fenced/pacemaker-fenced.c:737
+msgid "Intended for use in regression testing only"
+msgstr "仅适用于回归测试"
+
+#: daemons/fenced/pacemaker-fenced.c:740
+msgid "Send logs to the additional named logfile"
+msgstr "将日志å‘é€åˆ°å…¶ä»–命å日志文件"
+
+#: lib/cib/cib_utils.c:875
msgid "Enable Access Control Lists (ACLs) for the CIB"
msgstr "为CIBå¯ç”¨è®¿é—®æŽ§åˆ¶åˆ—表(ACL)"
-#: lib/cib/cib_utils.c:595
+#: lib/cib/cib_utils.c:881
msgid "Maximum IPC message backlog before disconnecting a cluster daemon"
msgstr "断开集群守护程åºä¹‹å‰çš„最大IPC消æ¯ç§¯åŽ‹"
-#: lib/cib/cib_utils.c:596
+#: lib/cib/cib_utils.c:882
msgid ""
"Raise this if log has \"Evicting client\" messages for cluster daemon PIDs "
"(a good value is the number of resources in the cluster multiplied by the "
@@ -627,7 +655,7 @@ msgstr ""
"如果日志中有针对集群守护程åºPID的消æ¯â€œEvicting clientâ€ï¼Œï¼ˆåˆ™å»ºè®®å°†å€¼è®¾ä¸ºé›†ç¾¤"
"中的资æºæ•°é‡ä¹˜ä»¥èŠ‚点数é‡ï¼‰"
-#: lib/common/options.c:401
+#: lib/common/options.c:414
msgid " Allowed values: "
msgstr " å…许的值: "
@@ -659,41 +687,41 @@ msgstr "输出选项"
msgid "Show output help"
msgstr "显示输出帮助"
-#: lib/pengine/common.c:45
+#: lib/pengine/common.c:46
msgid "Whether resources can run on any node by default"
msgstr "资æºæ˜¯å¦é»˜è®¤å¯ä»¥åœ¨ä»»ä½•èŠ‚点上è¿è¡Œ"
-#: lib/pengine/common.c:51
+#: lib/pengine/common.c:52
msgid ""
"Whether the cluster should refrain from monitoring, starting, and stopping "
"resources"
msgstr "集群是å¦åº”é¿å…监视,å¯åŠ¨å’Œåœæ­¢èµ„æº"
-#: lib/pengine/common.c:58
+#: lib/pengine/common.c:59
msgid ""
"Whether a start failure should prevent a resource from being recovered on "
"the same node"
msgstr "是å¦é¿å…在åŒä¸€èŠ‚点上é‡å¯å¯åŠ¨å¤±è´¥çš„资æº"
-#: lib/pengine/common.c:60
+#: lib/pengine/common.c:61
msgid ""
"When true, the cluster will immediately ban a resource from a node if it "
"fails to start there. When false, the cluster will instead check the "
"resource's fail count against its migration-threshold."
msgstr ""
-"当为true,如果资æºå¯åŠ¨å¤±è´¥ï¼Œé›†ç¾¤å°†ç«‹å³ç¦æ­¢èŠ‚点å¯åŠ¨è¯¥èµ„æºï¼Œå½“为false,群集将根"
+"当为true,如果资æºå¯åŠ¨å¤±è´¥ï¼Œé›†ç¾¤å°†ç«‹å³ç¦æ­¢èŠ‚点å¯åŠ¨è¯¥èµ„æºï¼Œå½“为false,集群将根"
"æ®å…¶è¿ç§»é˜ˆå€¼æ¥æ£€æŸ¥èµ„æºçš„失败计数。"
-#: lib/pengine/common.c:67
+#: lib/pengine/common.c:68
msgid "Whether the cluster should check for active resources during start-up"
-msgstr "群集是å¦åœ¨å¯åŠ¨æœŸé—´æ£€æŸ¥è¿è¡Œèµ„æº"
+msgstr "集群是å¦åœ¨å¯åŠ¨æœŸé—´æ£€æŸ¥è¿è¡Œèµ„æº"
-#: lib/pengine/common.c:98
+#: lib/pengine/common.c:99
msgid ""
"*** Advanced Use Only *** Whether nodes may be fenced as part of recovery"
msgstr "*** Advanced Use Only *** 节点是å¦å¯ä»¥è¢« fence 以作为集群æ¢å¤çš„一部分"
-#: lib/pengine/common.c:100
+#: lib/pengine/common.c:101
msgid ""
"If false, unresponsive nodes are immediately assumed to be harmless, and "
"resources that were active on them may be recovered elsewhere. This can "
@@ -703,17 +731,17 @@ msgstr ""
"如果为false,则立å³å‡å®šæ— å“应的节点是无害的,并且å¯ä»¥åœ¨å…¶ä»–ä½ç½®æ¢å¤åœ¨å…¶ä¸Šæ´»åŠ¨"
"的资æºã€‚ è¿™å¯èƒ½ä¼šå¯¼è‡´ \"split-brain\" 情况,å¯èƒ½å¯¼è‡´æ•°æ®ä¸¢å¤±å’Œ/或æœåŠ¡ä¸å¯ç”¨ã€‚"
-#: lib/pengine/common.c:108
+#: lib/pengine/common.c:109
msgid ""
"Action to send to fence device when a node needs to be fenced (\"poweroff\" "
"is a deprecated alias for \"off\")"
msgstr "å‘é€åˆ° fence 设备的æ“作( \"poweroff\" 是 \"off \"的别å,ä¸å»ºè®®ä½¿ç”¨)"
-#: lib/pengine/common.c:115
+#: lib/pengine/common.c:116
msgid "*** Advanced Use Only *** Unused by Pacemaker"
msgstr "*** Advanced Use Only *** pacemaker未使用"
-#: lib/pengine/common.c:116
+#: lib/pengine/common.c:117
msgid ""
"This value is not used by Pacemaker, but is kept for backward compatibility, "
"and certain legacy fence agents might use it."
@@ -721,11 +749,11 @@ msgstr ""
"Pacemakerä¸ä½¿ç”¨æ­¤å€¼ï¼Œä½†ä¿ç•™æ­¤å€¼æ˜¯ä¸ºäº†å‘åŽå…¼å®¹ï¼ŒæŸäº›ä¼ ç»Ÿçš„fence 代ç†å¯èƒ½ä¼šä½¿ç”¨"
"它。"
-#: lib/pengine/common.c:122
+#: lib/pengine/common.c:123
msgid "Whether watchdog integration is enabled"
msgstr "是å¦å¯ç”¨watchdog集æˆè®¾ç½®"
-#: lib/pengine/common.c:123
+#: lib/pengine/common.c:124
msgid ""
"This is set automatically by the cluster according to whether SBD is "
"detected to be in use. User-configured values are ignored. The value `true` "
@@ -739,15 +767,15 @@ msgstr ""
"è¿™ç§æƒ…况下,无需明确é…ç½®fence资æºï¼Œå¦‚果需è¦fence时,基于watchdog的自我fence会"
"通过SBD执行。"
-#: lib/pengine/common.c:133
+#: lib/pengine/common.c:134
msgid "Allow performing fencing operations in parallel"
msgstr "å…许并行执行 fencing æ“作"
-#: lib/pengine/common.c:139
+#: lib/pengine/common.c:140
msgid "*** Advanced Use Only *** Whether to fence unseen nodes at start-up"
msgstr "*** 仅高级使用 *** 是å¦åœ¨å¯åŠ¨æ—¶fenceä¸å¯è§èŠ‚点"
-#: lib/pengine/common.c:140
+#: lib/pengine/common.c:141
msgid ""
"Setting this to false may lead to a \"split-brain\" situation,potentially "
"leading to data loss and/or service unavailability."
@@ -755,13 +783,13 @@ msgstr ""
"将此设置为 false å¯èƒ½ä¼šå¯¼è‡´ \"split-brain\" 的情况,å¯èƒ½å¯¼è‡´æ•°æ®ä¸¢å¤±å’Œ/或æœåŠ¡"
"ä¸å¯ç”¨ã€‚"
-#: lib/pengine/common.c:146
+#: lib/pengine/common.c:147
msgid ""
"Apply fencing delay targeting the lost nodes with the highest total resource "
"priority"
msgstr "针对具有最高总资æºä¼˜å…ˆçº§çš„丢失节点应用fencing延迟"
-#: lib/pengine/common.c:147
+#: lib/pengine/common.c:148
msgid ""
"Apply specified delay for the fencings that are targeting the lost nodes "
"with the highest total resource priority in case we don't have the majority "
@@ -781,11 +809,11 @@ msgstr ""
"加到此延迟。为了安全, 这个延迟应该明显大于 pcmk_delay_base/max 的最大设置值,"
"例如两å€ã€‚默认情况下,优先级fencing延迟已ç¦ç”¨ã€‚"
-#: lib/pengine/common.c:164
+#: lib/pengine/common.c:175
msgid "Maximum time for node-to-node communication"
msgstr "最大节点间通信时间"
-#: lib/pengine/common.c:165
+#: lib/pengine/common.c:176
msgid ""
"The node elected Designated Controller (DC) will consider an action failed "
"if it does not get a response from the node executing the action within this "
@@ -796,14 +824,14 @@ msgstr ""
"å“应,则会被选为指定控制器(DC)的节点认定为失败。\"正确\" 值将å–决于速度和您"
"的网络和集群节点的负载。"
-#: lib/pengine/common.c:174
+#: lib/pengine/common.c:185
#, fuzzy
msgid ""
"Maximum number of jobs that the cluster may execute in parallel across all "
"nodes"
msgstr "集群å¯ä»¥åœ¨æ‰€æœ‰èŠ‚点上并å‘执行的最大作业数"
-#: lib/pengine/common.c:176
+#: lib/pengine/common.c:187
msgid ""
"The \"correct\" value will depend on the speed and load of your network and "
"cluster nodes. If set to 0, the cluster will impose a dynamically calculated "
@@ -812,106 +840,106 @@ msgstr ""
"\"正确\" 值将å–决于速度和您的网络与集群节点的负载。如果设置为0,当任何节点具"
"有高负载时,集群将施加一个动æ€è®¡ç®—çš„é™åˆ¶ã€‚"
-#: lib/pengine/common.c:184
+#: lib/pengine/common.c:195
msgid ""
"The number of live migration actions that the cluster is allowed to execute "
"in parallel on a node (-1 means no limit)"
msgstr "å…许集群在一个节点上并行执行的实时è¿ç§»æ“作的数é‡(-1表示没有é™åˆ¶)"
-#: lib/pengine/common.c:192
+#: lib/pengine/common.c:203
#, fuzzy
msgid "Whether the cluster should stop all active resources"
-msgstr "群集是å¦åœ¨å¯åŠ¨æœŸé—´æ£€æŸ¥è¿è¡Œèµ„æº"
+msgstr "集群是å¦åœ¨å¯åŠ¨æœŸé—´æ£€æŸ¥è¿è¡Œèµ„æº"
-#: lib/pengine/common.c:198
+#: lib/pengine/common.c:209
msgid "Whether to stop resources that were removed from the configuration"
msgstr "是å¦åœæ­¢é…置已被删除的资æº"
-#: lib/pengine/common.c:204
+#: lib/pengine/common.c:215
msgid "Whether to cancel recurring actions removed from the configuration"
msgstr "是å¦å–消é…置已被删除的的é‡å¤æ“作"
-#: lib/pengine/common.c:210
+#: lib/pengine/common.c:221
msgid ""
"*** Deprecated *** Whether to remove stopped resources from the executor"
msgstr "***ä¸æŽ¨è***是å¦ä»Žpacemaker-execd 守护进程中清除已åœæ­¢çš„资æº"
-#: lib/pengine/common.c:212
+#: lib/pengine/common.c:223
msgid ""
"Values other than default are poorly tested and potentially dangerous. This "
"option will be removed in a future release."
msgstr "éžé»˜è®¤å€¼æœªç»è¿‡å……分的测试,有潜在的风险。该选项将在未æ¥çš„版本中删除。"
-#: lib/pengine/common.c:220
+#: lib/pengine/common.c:231
msgid "The number of scheduler inputs resulting in errors to save"
msgstr "ä¿å­˜å¯¼è‡´é”™è¯¯çš„调度程åºè¾“入的数é‡"
-#: lib/pengine/common.c:221 lib/pengine/common.c:227 lib/pengine/common.c:233
+#: lib/pengine/common.c:232 lib/pengine/common.c:238 lib/pengine/common.c:244
msgid "Zero to disable, -1 to store unlimited."
msgstr "零表示ç¦ç”¨ï¼Œ-1表示存储ä¸å—é™åˆ¶ã€‚"
-#: lib/pengine/common.c:226
+#: lib/pengine/common.c:237
msgid "The number of scheduler inputs resulting in warnings to save"
msgstr "ä¿å­˜å¯¼è‡´è­¦å‘Šçš„调度程åºè¾“入的数é‡"
-#: lib/pengine/common.c:232
+#: lib/pengine/common.c:243
msgid "The number of scheduler inputs without errors or warnings to save"
msgstr "ä¿å­˜æ²¡æœ‰é”™è¯¯æˆ–警告的调度程åºè¾“入的数é‡"
-#: lib/pengine/common.c:243
+#: lib/pengine/common.c:254
#, fuzzy
msgid "How cluster should react to node health attributes"
msgstr "集群节点对节点å¥åº·å±žæ€§å¦‚何å应"
-#: lib/pengine/common.c:244
+#: lib/pengine/common.c:255
msgid ""
"Requires external entities to create node attributes (named with the prefix "
"\"#health\") with values \"red\", \"yellow\", or \"green\"."
msgstr ""
"需è¦å¤–部实体创建具有“redâ€,“yellowâ€æˆ–“greenâ€å€¼çš„节点属性(å‰ç¼€ä¸ºâ€œ#healthâ€)"
-#: lib/pengine/common.c:251
+#: lib/pengine/common.c:262
msgid "Base health score assigned to a node"
msgstr "分é…给节点的基本å¥åº·åˆ†æ•°"
-#: lib/pengine/common.c:252
+#: lib/pengine/common.c:263
msgid "Only used when \"node-health-strategy\" is set to \"progressive\"."
msgstr "仅在“node-health-strategyâ€è®¾ç½®ä¸ºâ€œprogressiveâ€æ—¶ä½¿ç”¨ã€‚"
-#: lib/pengine/common.c:257
+#: lib/pengine/common.c:268
msgid "The score to use for a node health attribute whose value is \"green\""
msgstr "为节点å¥åº·å±žæ€§å€¼ä¸ºâ€œgreenâ€æ‰€ä½¿ç”¨çš„分数"
-#: lib/pengine/common.c:258 lib/pengine/common.c:264 lib/pengine/common.c:270
+#: lib/pengine/common.c:269 lib/pengine/common.c:275 lib/pengine/common.c:281
msgid ""
"Only used when \"node-health-strategy\" is set to \"custom\" or \"progressive"
"\"."
msgstr "仅在“node-health-strategyâ€è®¾ç½®ä¸ºâ€œcustomâ€æˆ–“progressiveâ€æ—¶ä½¿ç”¨ã€‚"
-#: lib/pengine/common.c:263
+#: lib/pengine/common.c:274
msgid "The score to use for a node health attribute whose value is \"yellow\""
msgstr "为节点å¥åº·å±žæ€§å€¼ä¸ºâ€œyellowâ€æ‰€ä½¿ç”¨çš„分数"
-#: lib/pengine/common.c:269
+#: lib/pengine/common.c:280
msgid "The score to use for a node health attribute whose value is \"red\""
msgstr "为节点å¥åº·å±žæ€§å€¼ä¸ºâ€œredâ€æ‰€ä½¿ç”¨çš„分数"
-#: lib/pengine/common.c:278
+#: lib/pengine/common.c:289
#, fuzzy
msgid "How the cluster should allocate resources to nodes"
-msgstr "群集应该如何分é…资æºåˆ°èŠ‚点"
+msgstr "集群应该如何分é…资æºåˆ°èŠ‚点"
#: tools/crm_resource.c:258
#, c-format
msgid "Aborting because no messages received in %d seconds"
msgstr "中止,因为在%d秒内没有接收到消æ¯"
-#: tools/crm_resource.c:915
+#: tools/crm_resource.c:921
#, c-format
msgid "Invalid check level setting: %s"
msgstr "无效的检查级别设置:%s"
-#: tools/crm_resource.c:999
+#: tools/crm_resource.c:1008
#, c-format
msgid ""
"Resource '%s' not moved: active in %d locations (promoted in %d).\n"
@@ -923,7 +951,7 @@ msgstr ""
"è‹¥è¦é˜»æ­¢'%s'在特定ä½ç½®è¿è¡Œï¼Œè¯·æŒ‡å®šä¸€ä¸ªèŠ‚点。若è¦é˜²æ­¢'%s'在指定ä½ç½®å‡çº§ï¼ŒæŒ‡å®š"
"一个节点并使用--promoted选项"
-#: tools/crm_resource.c:1010
+#: tools/crm_resource.c:1019
#, c-format
msgid ""
"Resource '%s' not moved: active in %d locations.\n"
@@ -932,151 +960,154 @@ msgstr ""
"资æº%s未移动:在%d个ä½ç½®è¿è¡Œ\n"
"è‹¥è¦é˜²æ­¢'%s'è¿è¡Œåœ¨ç‰¹å®šä½ç½®ï¼ŒæŒ‡å®šä¸€ä¸ªèŠ‚点"
-#: tools/crm_resource.c:1085
+#: tools/crm_resource.c:1096
#, c-format
msgid "Could not get modified CIB: %s\n"
msgstr "无法获得修改的CIB:%s\n"
-#: tools/crm_resource.c:1119
-msgid "You need to specify a resource type with -t"
-msgstr "需è¦ä½¿ç”¨-t指定资æºç±»åž‹"
-
-#: tools/crm_resource.c:1162
-#, c-format
-msgid "No agents found for standard '%s'"
-msgstr "没有å‘现指定的'%s'标准代ç†"
-
-#: tools/crm_resource.c:1165
-#, fuzzy, c-format
-msgid "No agents found for standard '%s' and provider '%s'"
-msgstr "没有å‘现指定的标准%så’Œæ供者%S的资æºä»£ç†"
-
-#: tools/crm_resource.c:1232
-#, c-format
-msgid "No %s found for %s"
-msgstr "没有å‘现%s符åˆ%s"
-
-#: tools/crm_resource.c:1237
-#, c-format
-msgid "No %s found"
-msgstr "没有å‘现%s"
-
-#: tools/crm_resource.c:1297
+#: tools/crm_resource.c:1174
#, c-format
msgid "No cluster connection to Pacemaker Remote node %s detected"
msgstr "未检测到至pacemaker远程节点%s的集群连接"
-#: tools/crm_resource.c:1358
+#: tools/crm_resource.c:1235
msgid "Must specify -t with resource type"
msgstr "需è¦ä½¿ç”¨-t指定资æºç±»åž‹"
-#: tools/crm_resource.c:1364
+#: tools/crm_resource.c:1241
msgid "Must supply -v with new value"
msgstr "必须使用-v指定新值"
-#: tools/crm_resource.c:1396
+#: tools/crm_resource.c:1273
msgid "Could not create executor connection"
msgstr "无法创建到pacemaker-execd守护进程的连接"
-#: tools/crm_resource.c:1421
+#: tools/crm_resource.c:1298
#, fuzzy, c-format
msgid "Metadata query for %s failed: %s"
msgstr ",查询%s的元数æ®å¤±è´¥: %s\n"
-#: tools/crm_resource.c:1427
+#: tools/crm_resource.c:1304
#, c-format
msgid "'%s' is not a valid agent specification"
msgstr "'%s' 是一个无效的代ç†"
-#: tools/crm_resource.c:1440
+#: tools/crm_resource.c:1317
msgid "--resource cannot be used with --class, --agent, and --provider"
msgstr "--resource ä¸èƒ½ä¸Ž --class, --agent, --provider一起使用"
-#: tools/crm_resource.c:1445
+#: tools/crm_resource.c:1322
msgid ""
"--class, --agent, and --provider can only be used with --validate and --"
"force-*"
msgstr "--class, --agentå’Œ--provideråªèƒ½è¢«ç”¨äºŽ--validateå’Œ--force-*"
-#: tools/crm_resource.c:1454
+#: tools/crm_resource.c:1331
msgid "stonith does not support providers"
msgstr "stonith ä¸æ”¯æŒæ供者"
-#: tools/crm_resource.c:1458
+#: tools/crm_resource.c:1335
#, c-format
msgid "%s is not a known stonith agent"
msgstr "%s ä¸æ˜¯ä¸€ä¸ªå·²çŸ¥stonith代ç†"
-#: tools/crm_resource.c:1463
+#: tools/crm_resource.c:1340
#, c-format
msgid "%s:%s:%s is not a known resource"
msgstr "%s:%s:%s ä¸æ˜¯ä¸€ä¸ªå·²çŸ¥èµ„æº"
-#: tools/crm_resource.c:1577
+#: tools/crm_resource.c:1454
#, c-format
msgid "Error creating output format %s: %s"
msgstr "创建输出格å¼é”™è¯¯ %s:%s"
-#: tools/crm_resource.c:1604
+#: tools/crm_resource.c:1481
msgid "--expired requires --clear or -U"
msgstr "--expired需è¦å’Œ--clear或-U一起使用"
-#: tools/crm_resource.c:1621
+#: tools/crm_resource.c:1498
#, c-format
msgid "Error parsing '%s' as a name=value pair"
msgstr "'%s'解æžé”™è¯¯ï¼Œæ ¼å¼ä¸ºname=value"
-#: tools/crm_resource.c:1718
+#: tools/crm_resource.c:1595
msgid "Must supply a resource id with -r"
msgstr "必须使用-r指定资æºid"
-#: tools/crm_resource.c:1724
+#: tools/crm_resource.c:1601
msgid "Must supply a node name with -N"
msgstr "必须使用-N指定节点å称"
-#: tools/crm_resource.c:1742
+#: tools/crm_resource.c:1619
msgid "Could not create CIB connection"
msgstr "无法创建到CIB的连接"
-#: tools/crm_resource.c:1750
+#: tools/crm_resource.c:1627
#, c-format
msgid "Could not connect to the CIB: %s"
msgstr "ä¸èƒ½è¿žæŽ¥åˆ°CIB:%s"
-#: tools/crm_resource.c:1771
+#: tools/crm_resource.c:1648
#, c-format
msgid "Resource '%s' not found"
msgstr "没有å‘现'%s'资æº"
-#: tools/crm_resource.c:1783
+#: tools/crm_resource.c:1660
#, c-format
msgid "Cannot operate on clone resource instance '%s'"
msgstr "ä¸èƒ½æ“作克隆资æºå®žä¾‹'%s'"
-#: tools/crm_resource.c:1795
+#: tools/crm_resource.c:1672
#, c-format
msgid "Node '%s' not found"
msgstr "没有å‘现%s节点"
-#: tools/crm_resource.c:1806 tools/crm_resource.c:1815
+#: tools/crm_resource.c:1683
#, c-format
msgid "Error connecting to the controller: %s"
msgstr "连接到控制器错误:%s"
-#: tools/crm_resource.c:2064
+#: tools/crm_resource.c:1692
+#, fuzzy, c-format
+msgid "Error connecting to %s: %s"
+msgstr "连接到控制器错误:%s"
+
+#: tools/crm_resource.c:1950
msgid "You need to supply a value with the -v option"
msgstr "需è¦ä½¿ç”¨-v选项æ供一个值"
-#: tools/crm_resource.c:2119
+#: tools/crm_resource.c:2006
+msgid "You need to specify a resource type with -t"
+msgstr "需è¦ä½¿ç”¨-t指定资æºç±»åž‹"
+
+#: tools/crm_resource.c:2013
+#, fuzzy, c-format
+msgid "Could not delete resource %s: %s"
+msgstr "无法删除资æºï¼š%s:%s"
+
+#: tools/crm_resource.c:2023
#, c-format
msgid "Unimplemented command: %d"
msgstr "无效的命令:%d"
-#: tools/crm_resource.c:2149
+#: tools/crm_resource.c:2053
#, c-format
msgid "Error performing operation: %s"
msgstr "执行æ“作错误:%s"
+#~ msgid "No agents found for standard '%s'"
+#~ msgstr "没有å‘现指定的'%s'标准代ç†"
+
+#, fuzzy
+#~ msgid "No agents found for standard '%s' and provider '%s'"
+#~ msgstr "没有å‘现指定的标准%så’Œæ供者%S的资æºä»£ç†"
+
+#~ msgid "No %s found for %s"
+#~ msgstr "没有å‘现%s符åˆ%s"
+
+#~ msgid "No %s found"
+#~ msgstr "没有å‘现%s"
+
#~ msgid ""
#~ "If nonzero, along with `have-watchdog=true` automatically set by the "
#~ "cluster, when fencing is required, watchdog-based self-fencing will be "
diff --git a/python/Makefile.am b/python/Makefile.am
index 6cefb63..803fb0c 100644
--- a/python/Makefile.am
+++ b/python/Makefile.am
@@ -11,10 +11,16 @@ MAINTAINERCLEANFILES = Makefile.in
EXTRA_DIST = pylintrc
-SUBDIRS = pacemaker tests
+SUBDIRS = pacemaker \
+ tests
+.PHONY: check-local
check-local:
- $(PYTHON) -m unittest discover -v -s tests
+ if [ "x$(top_srcdir)" != "x$(top_builddir)" ]; then \
+ cp -r $(top_srcdir)/python/* $(abs_top_builddir)/python/; \
+ fi
+ PYTHONPATH=$(top_builddir)/python $(PYTHON) -m unittest discover -v -s $(top_builddir)/python/tests
+.PHONY: pylint
pylint:
pylint $(SUBDIRS)
diff --git a/python/pacemaker/Makefile.am b/python/pacemaker/Makefile.am
index f209bba..df9cc46 100644
--- a/python/pacemaker/Makefile.am
+++ b/python/pacemaker/Makefile.am
@@ -9,8 +9,8 @@
MAINTAINERCLEANFILES = Makefile.in
-pkgpython_PYTHON = __init__.py \
- exitstatus.py
+pkgpython_PYTHON = __init__.py \
+ exitstatus.py
nodist_pkgpython_PYTHON = buildoptions.py
diff --git a/python/pacemaker/_cts/CTS.py b/python/pacemaker/_cts/CTS.py
index 4ca7e59..166ea10 100644
--- a/python/pacemaker/_cts/CTS.py
+++ b/python/pacemaker/_cts/CTS.py
@@ -10,6 +10,7 @@ import traceback
from pacemaker.exitstatus import ExitStatus
from pacemaker._cts.environment import EnvFactory
+from pacemaker._cts.input import should_continue
from pacemaker._cts.logging import LogFactory
from pacemaker._cts.remote import RemoteFactory
@@ -32,7 +33,7 @@ class CtsLab:
def __init__(self, args=None):
""" Create a new CtsLab instance. This class can be treated kind
of like a dictionary due to the presence of typical dict functions
- like has_key, __getitem__, and __setitem__. However, it is not a
+ like __contains__, __getitem__, and __setitem__. However, it is not a
dictionary so do not rely on standard dictionary behavior.
Arguments:
@@ -48,10 +49,12 @@ class CtsLab:
self._env.dump()
- def has_key(self, key):
+ def __contains__(self, key):
""" Does the given environment key exist? """
- return key in list(self._env.keys())
+ # pylint gets confused because of EnvFactory here.
+ # pylint: disable=unsupported-membership-test
+ return key in self._env
def __getitem__(self, key):
""" Return the given environment key, or raise KeyError if it does
@@ -90,7 +93,7 @@ class CtsLab:
for node in self._env["nodes"]:
self._logger.log(" * %s" % (node))
- if not scenario.SetUp():
+ if not scenario.setup():
return ExitStatus.ERROR
# We want to alert on any exceptions caused by running a scenario, so
@@ -103,16 +106,16 @@ class CtsLab:
self._logger.traceback(traceback)
scenario.summarize()
- scenario.TearDown()
+ scenario.teardown()
return ExitStatus.ERROR
- scenario.TearDown()
+ scenario.teardown()
scenario.summarize()
- if scenario.Stats["failure"] > 0:
+ if scenario.stats["failure"] > 0:
return ExitStatus.ERROR
- if scenario.Stats["success"] != iterations:
+ if scenario.stats["success"] != iterations:
self._logger.log("No failure count but success != requested iterations")
return ExitStatus.ERROR
@@ -177,15 +180,7 @@ class NodeStatus:
timeout -= 1
LogFactory().log("%s did not come up within %d tries" % (node, initial_timeout))
- if self._env["continue"]:
- answer = "Y"
- else:
- try:
- answer = input('Continue? [nY]')
- except EOFError:
- answer = "n"
-
- if answer and answer == "n":
+ if not should_continue(self._env["continue"]):
raise ValueError("%s did not come up within %d tries" % (node, initial_timeout))
return False
@@ -241,4 +236,4 @@ class Process:
(rc, _) = self._cm.rsh(node, "killall -9 %s" % self.name)
if rc != 0:
- self._cm.log ("ERROR: Kill %s failed on node %s" % (self.name, node))
+ self._cm.log("ERROR: Kill %s failed on node %s" % (self.name, node))
diff --git a/python/pacemaker/_cts/Makefile.am b/python/pacemaker/_cts/Makefile.am
index 3b3e3f8..efb0019 100644
--- a/python/pacemaker/_cts/Makefile.am
+++ b/python/pacemaker/_cts/Makefile.am
@@ -11,14 +11,6 @@ MAINTAINERCLEANFILES = Makefile.in
pkgpythondir = $(pythondir)/$(PACKAGE)/_cts
-pkgpython_PYTHON = CTS.py \
- __init__.py \
- corosync.py \
- environment.py \
- errors.py \
- logging.py \
- patterns.py \
- process.py \
- remote.py \
- test.py \
- watcher.py
+pkgpython_PYTHON = $(wildcard *.py)
+
+SUBDIRS = tests
diff --git a/python/pacemaker/_cts/audits.py b/python/pacemaker/_cts/audits.py
new file mode 100644
index 0000000..dc66f96
--- /dev/null
+++ b/python/pacemaker/_cts/audits.py
@@ -0,0 +1,1029 @@
+""" Auditing classes for Pacemaker's Cluster Test Suite (CTS) """
+
+__all__ = ["AuditConstraint", "AuditResource", "ClusterAudit", "audit_list"]
+__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
+
+import re
+import time
+import uuid
+
+from pacemaker.buildoptions import BuildOptions
+from pacemaker._cts.input import should_continue
+from pacemaker._cts.watcher import LogKind, LogWatcher
+
+
+class ClusterAudit:
+ """ The base class for various kinds of auditors. Specific audit implementations
+ should be built on top of this one. Audits can do all kinds of checks on the
+ system. The basic interface for callers is the `__call__` method, which
+ returns True if the audit passes and False if it fails.
+ """
+
+ def __init__(self, cm):
+ """ Create a new ClusterAudit instance
+
+ Arguments:
+
+ cm -- A ClusterManager instance
+ """
+
+ # pylint: disable=invalid-name
+ self._cm = cm
+ self.name = None
+
+ def __call__(self):
+ raise NotImplementedError
+
+ def is_applicable(self):
+ """ Return True if this audit is applicable in the current test configuration.
+ This method must be implemented by all subclasses.
+ """
+
+ raise NotImplementedError
+
+ def log(self, args):
+ """ Log a message """
+
+ self._cm.log("audit: %s" % args)
+
+ def debug(self, args):
+ """ Log a debug message """
+
+ self._cm.debug("audit: %s" % args)
+
+
+class LogAudit(ClusterAudit):
+ """ Audit each cluster node to verify that some logging system is usable.
+ This is done by logging a unique test message and then verifying that
+ we can read back that test message using logging tools.
+ """
+
+ def __init__(self, cm):
+ """ Create a new LogAudit instance
+
+ Arguments:
+
+ cm -- A ClusterManager instance
+ """
+
+ ClusterAudit.__init__(self, cm)
+ self.name = "LogAudit"
+
+ def _restart_cluster_logging(self, nodes=None):
+ """ Restart logging on the given nodes, or all if none are given """
+
+ if not nodes:
+ nodes = self._cm.env["nodes"]
+
+ self._cm.debug("Restarting logging on: %r" % nodes)
+
+ for node in nodes:
+ if self._cm.env["have_systemd"]:
+ (rc, _) = self._cm.rsh(node, "systemctl stop systemd-journald.socket")
+ if rc != 0:
+ self._cm.log("ERROR: Cannot stop 'systemd-journald' on %s" % node)
+
+ (rc, _) = self._cm.rsh(node, "systemctl start systemd-journald.service")
+ if rc != 0:
+ self._cm.log("ERROR: Cannot start 'systemd-journald' on %s" % node)
+
+ (rc, _) = self._cm.rsh(node, "service %s restart" % self._cm.env["syslogd"])
+ if rc != 0:
+ self._cm.log("ERROR: Cannot restart '%s' on %s" % (self._cm.env["syslogd"], node))
+
+ def _create_watcher(self, patterns, kind):
+ """ Create a new LogWatcher instance for the given patterns """
+
+ watch = LogWatcher(self._cm.env["LogFileName"], patterns,
+ self._cm.env["nodes"], kind, "LogAudit", 5,
+ silent=True)
+ watch.set_watch()
+ return watch
+
+ def _test_logging(self):
+ """ Perform the log audit """
+
+ patterns = []
+ prefix = "Test message from"
+ suffix = str(uuid.uuid4())
+ watch = {}
+
+ for node in self._cm.env["nodes"]:
+ # Look for the node name in two places to make sure
+ # that syslog is logging with the correct hostname
+ m = re.search("^([^.]+).*", node)
+ if m:
+ simple = m.group(1)
+ else:
+ simple = node
+
+ patterns.append("%s.*%s %s %s" % (simple, prefix, node, suffix))
+
+ watch_pref = self._cm.env["LogWatcher"]
+ if watch_pref == LogKind.ANY:
+ kinds = [LogKind.FILE]
+ if self._cm.env["have_systemd"]:
+ kinds += [LogKind.JOURNAL]
+ kinds += [LogKind.REMOTE_FILE]
+ for k in kinds:
+ watch[k] = self._create_watcher(patterns, k)
+ self._cm.log("Logging test message with identifier %s" % suffix)
+ else:
+ watch[watch_pref] = self._create_watcher(patterns, watch_pref)
+
+ for node in self._cm.env["nodes"]:
+ cmd = "logger -p %s.info %s %s %s" % (self._cm.env["SyslogFacility"], prefix, node, suffix)
+
+ (rc, _) = self._cm.rsh(node, cmd, synchronous=False, verbose=0)
+ if rc != 0:
+ self._cm.log("ERROR: Cannot execute remote command [%s] on %s" % (cmd, node))
+
+ for k in list(watch.keys()):
+ w = watch[k]
+ if watch_pref == LogKind.ANY:
+ self._cm.log("Checking for test message in %s logs" % k)
+ w.look_for_all(silent=True)
+ if w.unmatched:
+ for regex in w.unmatched:
+ self._cm.log("Test message [%s] not found in %s logs" % (regex, w.kind))
+ else:
+ if watch_pref == LogKind.ANY:
+ self._cm.log("Found test message in %s logs" % k)
+ self._cm.env["LogWatcher"] = k
+ return 1
+
+ return False
+
+ def __call__(self):
+ max_attempts = 3
+ attempt = 0
+
+ self._cm.ns.wait_for_all_nodes(self._cm.env["nodes"])
+ while attempt <= max_attempts and not self._test_logging():
+ attempt += 1
+ self._restart_cluster_logging()
+ time.sleep(60*attempt)
+
+ if attempt > max_attempts:
+ self._cm.log("ERROR: Cluster logging unrecoverable.")
+ return False
+
+ return True
+
+ def is_applicable(self):
+ """ Return True if this audit is applicable in the current test configuration. """
+
+ if self._cm.env["LogAuditDisabled"]:
+ return False
+
+ return True
+
+
+class DiskAudit(ClusterAudit):
+ """ Audit disk usage on cluster nodes to verify that there is enough free
+ space left on whichever mounted file system holds the logs.
+
+ Warn on: less than 100 MB or 10% of free space
+ Error on: less than 10 MB or 5% of free space
+ """
+
+ def __init__(self, cm):
+ """ Create a new DiskAudit instance
+
+ Arguments:
+
+ cm -- A ClusterManager instance
+ """
+
+ ClusterAudit.__init__(self, cm)
+ self.name = "DiskspaceAudit"
+
+ def __call__(self):
+ result = True
+
+ # @TODO Use directory of PCMK_logfile if set on host
+ dfcmd = "df -BM %s | tail -1 | awk '{print $(NF-1)\" \"$(NF-2)}' | tr -d 'M%%'" % BuildOptions.LOG_DIR
+
+ self._cm.ns.wait_for_all_nodes(self._cm.env["nodes"])
+ for node in self._cm.env["nodes"]:
+ (_, dfout) = self._cm.rsh(node, dfcmd, verbose=1)
+ if not dfout:
+ self._cm.log("ERROR: Cannot execute remote df command [%s] on %s" % (dfcmd, node))
+ continue
+
+ dfout = dfout[0].strip()
+
+ try:
+ (used, remain) = dfout.split()
+ used_percent = int(used)
+ remaining_mb = int(remain)
+ except (ValueError, TypeError):
+ self._cm.log("Warning: df output '%s' from %s was invalid [%s, %s]"
+ % (dfout, node, used, remain))
+ else:
+ if remaining_mb < 10 or used_percent > 95:
+ self._cm.log("CRIT: Out of log disk space on %s (%d%% / %dMB)"
+ % (node, used_percent, remaining_mb))
+ result = False
+
+ if not should_continue(self._cm.env):
+ raise ValueError("Disk full on %s" % node)
+
+ elif remaining_mb < 100 or used_percent > 90:
+ self._cm.log("WARN: Low on log disk space (%dMB) on %s" % (remaining_mb, node))
+
+ return result
+
+ def is_applicable(self):
+ """ Return True if this audit is applicable in the current test configuration. """
+
+ return True
+
+
+class FileAudit(ClusterAudit):
+ """ Audit the filesystem looking for various failure conditions:
+
+ * The presence of core dumps from corosync or Pacemaker daemons
+ * Stale IPC files
+ """
+
+ def __init__(self, cm):
+ """ Create a new FileAudit instance
+
+ Arguments:
+
+ cm -- A ClusterManager instance
+ """
+
+ ClusterAudit.__init__(self, cm)
+ self.known = []
+ self.name = "FileAudit"
+
+ def __call__(self):
+ result = True
+
+ self._cm.ns.wait_for_all_nodes(self._cm.env["nodes"])
+ for node in self._cm.env["nodes"]:
+
+ (_, lsout) = self._cm.rsh(node, "ls -al /var/lib/pacemaker/cores/* | grep core.[0-9]", verbose=1)
+ for line in lsout:
+ line = line.strip()
+
+ if line not in self.known:
+ result = False
+ self.known.append(line)
+ self._cm.log("Warning: Pacemaker core file on %s: %s" % (node, line))
+
+ (_, lsout) = self._cm.rsh(node, "ls -al /var/lib/corosync | grep core.[0-9]", verbose=1)
+ for line in lsout:
+ line = line.strip()
+
+ if line not in self.known:
+ result = False
+ self.known.append(line)
+ self._cm.log("Warning: Corosync core file on %s: %s" % (node, line))
+
+ if self._cm.expected_status.get(node) == "down":
+ clean = False
+ (_, lsout) = self._cm.rsh(node, "ls -al /dev/shm | grep qb-", verbose=1)
+
+ for line in lsout:
+ result = False
+ clean = True
+ self._cm.log("Warning: Stale IPC file on %s: %s" % (node, line))
+
+ if clean:
+ (_, lsout) = self._cm.rsh(node, "ps axf | grep -e pacemaker -e corosync", verbose=1)
+
+ for line in lsout:
+ self._cm.debug("ps[%s]: %s" % (node, line))
+
+ self._cm.rsh(node, "rm -rf /dev/shm/qb-*")
+
+ else:
+ self._cm.debug("Skipping %s" % node)
+
+ return result
+
+ def is_applicable(self):
+ """ Return True if this audit is applicable in the current test configuration. """
+
+ return True
+
+
+class AuditResource:
+ """ A base class for storing information about a cluster resource """
+
+ def __init__(self, cm, line):
+ """ Create a new AuditResource instance
+
+ Arguments:
+
+ cm -- A ClusterManager instance
+ line -- One line of output from `crm_resource` describing a single
+ resource
+ """
+
+ # pylint: disable=invalid-name
+ fields = line.split()
+ self._cm = cm
+ self.line = line
+ self.type = fields[1]
+ self.id = fields[2]
+ self.clone_id = fields[3]
+ self.parent = fields[4]
+ self.rprovider = fields[5]
+ self.rclass = fields[6]
+ self.rtype = fields[7]
+ self.host = fields[8]
+ self.needs_quorum = fields[9]
+ self.flags = int(fields[10])
+ self.flags_s = fields[11]
+
+ if self.parent == "NA":
+ self.parent = None
+
+ @property
+ def unique(self):
+ """ Is this resource unique? """
+
+ return self.flags & 0x20
+
+ @property
+ def orphan(self):
+ """ Is this resource an orphan? """
+
+ return self.flags & 0x01
+
+ @property
+ def managed(self):
+ """ Is this resource managed by the cluster? """
+
+ return self.flags & 0x02
+
+
+class AuditConstraint:
+ """ A base class for storing information about a cluster constraint """
+
+ def __init__(self, cm, line):
+ """ Create a new AuditConstraint instance
+
+ Arguments:
+
+ cm -- A ClusterManager instance
+ line -- One line of output from `crm_resource` describing a single
+ constraint
+ """
+
+ # pylint: disable=invalid-name
+ fields = line.split()
+ self._cm = cm
+ self.line = line
+ self.type = fields[1]
+ self.id = fields[2]
+ self.rsc = fields[3]
+ self.target = fields[4]
+ self.score = fields[5]
+ self.rsc_role = fields[6]
+ self.target_role = fields[7]
+
+ if self.rsc_role == "NA":
+ self.rsc_role = None
+
+ if self.target_role == "NA":
+ self.target_role = None
+
+
+class PrimitiveAudit(ClusterAudit):
+ """ Audit primitive resources to verify a variety of conditions, including that
+ they are active and managed only when expected; they are active on the
+ expected clusted node; and that they are not orphaned.
+ """
+
+ def __init__(self, cm):
+ """ Create a new PrimitiveAudit instance
+
+ Arguments:
+
+ cm -- A ClusterManager instance
+ """
+
+ ClusterAudit.__init__(self, cm)
+ self.name = "PrimitiveAudit"
+
+ self._active_nodes = []
+ self._constraints = []
+ self._inactive_nodes = []
+ self._resources = []
+ self._target = None
+
+ def _audit_resource(self, resource, quorum):
+ """ Perform the audit of a single resource """
+
+ rc = True
+ active = self._cm.resource_location(resource.id)
+
+ if len(active) == 1:
+ if quorum:
+ self.debug("Resource %s active on %r" % (resource.id, active))
+
+ elif resource.needs_quorum == 1:
+ self._cm.log("Resource %s active without quorum: %r" % (resource.id, active))
+ rc = False
+
+ elif not resource.managed:
+ self._cm.log("Resource %s not managed. Active on %r" % (resource.id, active))
+
+ elif not resource.unique:
+ # TODO: Figure out a clever way to actually audit these resource types
+ if len(active) > 1:
+ self.debug("Non-unique resource %s is active on: %r" % (resource.id, active))
+ else:
+ self.debug("Non-unique resource %s is not active" % resource.id)
+
+ elif len(active) > 1:
+ self._cm.log("Resource %s is active multiple times: %r" % (resource.id, active))
+ rc = False
+
+ elif resource.orphan:
+ self.debug("Resource %s is an inactive orphan" % resource.id)
+
+ elif not self._inactive_nodes:
+ self._cm.log("WARN: Resource %s not served anywhere" % resource.id)
+ rc = False
+
+ elif self._cm.env["warn-inactive"]:
+ if quorum or not resource.needs_quorum:
+ self._cm.log("WARN: Resource %s not served anywhere (Inactive nodes: %r)"
+ % (resource.id, self._inactive_nodes))
+ else:
+ self.debug("Resource %s not served anywhere (Inactive nodes: %r)"
+ % (resource.id, self._inactive_nodes))
+
+ elif quorum or not resource.needs_quorum:
+ self.debug("Resource %s not served anywhere (Inactive nodes: %r)"
+ % (resource.id, self._inactive_nodes))
+
+ return rc
+
+ def _setup(self):
+ """ Verify cluster nodes are active, and collect resource and colocation
+ information used for performing the audit.
+ """
+
+ for node in self._cm.env["nodes"]:
+ if self._cm.expected_status[node] == "up":
+ self._active_nodes.append(node)
+ else:
+ self._inactive_nodes.append(node)
+
+ for node in self._cm.env["nodes"]:
+ if self._target is None and self._cm.expected_status[node] == "up":
+ self._target = node
+
+ if not self._target:
+ # TODO: In Pacemaker 1.0 clusters we'll be able to run crm_resource
+ # with CIB_file=/path/to/cib.xml even when the cluster isn't running
+ self.debug("No nodes active - skipping %s" % self.name)
+ return False
+
+ (_, lines) = self._cm.rsh(self._target, "crm_resource -c", verbose=1)
+
+ for line in lines:
+ if re.search("^Resource", line):
+ self._resources.append(AuditResource(self._cm, line))
+ elif re.search("^Constraint", line):
+ self._constraints.append(AuditConstraint(self._cm, line))
+ else:
+ self._cm.log("Unknown entry: %s" % line)
+
+ return True
+
+ def __call__(self):
+ result = True
+
+ if not self._setup():
+ return result
+
+ quorum = self._cm.has_quorum(None)
+ for resource in self._resources:
+ if resource.type == "primitive" and not self._audit_resource(resource, quorum):
+ result = False
+
+ return result
+
+ def is_applicable(self):
+ """ Return True if this audit is applicable in the current test configuration. """
+
+ # @TODO Due to long-ago refactoring, this name test would never match,
+ # so this audit (and those derived from it) would never run.
+ # Uncommenting the next lines fixes the name test, but that then
+ # exposes pre-existing bugs that need to be fixed.
+ #if self._cm["Name"] == "crm-corosync":
+ # return True
+ return False
+
+
+class GroupAudit(PrimitiveAudit):
+ """ Audit group resources to verify that each of its child primitive
+ resources is active on the expected cluster node.
+ """
+
+ def __init__(self, cm):
+ """ Create a new GroupAudit instance
+
+ Arguments:
+
+ cm -- A ClusterManager instance
+ """
+
+ PrimitiveAudit.__init__(self, cm)
+ self.name = "GroupAudit"
+
+ def __call__(self):
+ result = True
+
+ if not self._setup():
+ return result
+
+ for group in self._resources:
+ if group.type != "group":
+ continue
+
+ first_match = True
+ group_location = None
+
+ for child in self._resources:
+ if child.parent != group.id:
+ continue
+
+ nodes = self._cm.resource_location(child.id)
+
+ if first_match and len(nodes) > 0:
+ group_location = nodes[0]
+
+ first_match = False
+
+ if len(nodes) > 1:
+ result = False
+ self._cm.log("Child %s of %s is active more than once: %r"
+ % (child.id, group.id, nodes))
+
+ elif not nodes:
+ # Groups are allowed to be partially active
+ # However we do need to make sure later children aren't running
+ group_location = None
+ self.debug("Child %s of %s is stopped" % (child.id, group.id))
+
+ elif nodes[0] != group_location:
+ result = False
+ self._cm.log("Child %s of %s is active on the wrong node (%s) expected %s"
+ % (child.id, group.id, nodes[0], group_location))
+ else:
+ self.debug("Child %s of %s is active on %s" % (child.id, group.id, nodes[0]))
+
+ return result
+
+
+class CloneAudit(PrimitiveAudit):
+ """ Audit clone resources. NOTE: Currently, this class does not perform
+ any actual audit functions.
+ """
+
+ def __init__(self, cm):
+ """ Create a new CloneAudit instance
+
+ Arguments:
+
+ cm -- A ClusterManager instance
+ """
+
+ PrimitiveAudit.__init__(self, cm)
+ self.name = "CloneAudit"
+
+ def __call__(self):
+ result = True
+
+ if not self._setup():
+ return result
+
+ for clone in self._resources:
+ if clone.type != "clone":
+ continue
+
+ for child in self._resources:
+ if child.parent == clone.id and child.type == "primitive":
+ self.debug("Checking child %s of %s..." % (child.id, clone.id))
+ # Check max and node_max
+ # Obtain with:
+ # crm_resource -g clone_max --meta -r child.id
+ # crm_resource -g clone_node_max --meta -r child.id
+
+ return result
+
+
+class ColocationAudit(PrimitiveAudit):
+ """ Audit cluster resources to verify that those that should be colocated
+ with each other actually are.
+ """
+
+ def __init__(self, cm):
+ """ Create a new ColocationAudit instance
+
+ Arguments:
+
+ cm -- A ClusterManager instance
+ """
+
+ PrimitiveAudit.__init__(self, cm)
+ self.name = "ColocationAudit"
+
+ def _crm_location(self, resource):
+ """ Return a list of cluster nodes where a given resource is running """
+
+ (rc, lines) = self._cm.rsh(self._target, "crm_resource -W -r %s -Q" % resource, verbose=1)
+ hosts = []
+
+ if rc == 0:
+ for line in lines:
+ fields = line.split()
+ hosts.append(fields[0])
+
+ return hosts
+
+ def __call__(self):
+ result = True
+
+ if not self._setup():
+ return result
+
+ for coloc in self._constraints:
+ if coloc.type != "rsc_colocation":
+ continue
+
+ source = self._crm_location(coloc.rsc)
+ target = self._crm_location(coloc.target)
+
+ if not source:
+ self.debug("Colocation audit (%s): %s not running" % (coloc.id, coloc.rsc))
+ else:
+ for node in source:
+ if not node in target:
+ result = False
+ self._cm.log("Colocation audit (%s): %s running on %s (not in %r)"
+ % (coloc.id, coloc.rsc, node, target))
+ else:
+ self.debug("Colocation audit (%s): %s running on %s (in %r)"
+ % (coloc.id, coloc.rsc, node, target))
+
+ return result
+
+
+class ControllerStateAudit(ClusterAudit):
+ """ Audit cluster nodes to verify that those we expect to be active are
+ active, and those that are expected to be inactive are inactive.
+ """
+
+ def __init__(self, cm):
+ """ Create a new ControllerStateAudit instance
+
+ Arguments:
+
+ cm -- A ClusterManager instance
+ """
+
+ ClusterAudit.__init__(self, cm)
+ self.name = "ControllerStateAudit"
+
+ def __call__(self):
+ result = True
+ up_are_down = 0
+ down_are_up = 0
+ unstable_list = []
+
+ for node in self._cm.env["nodes"]:
+ should_be = self._cm.expected_status[node]
+ rc = self._cm.test_node_cm(node)
+
+ if rc > 0:
+ if should_be == "down":
+ down_are_up += 1
+
+ if rc == 1:
+ unstable_list.append(node)
+
+ elif should_be == "up":
+ up_are_down += 1
+
+ if len(unstable_list) > 0:
+ result = False
+ self._cm.log("Cluster is not stable: %d (of %d): %r"
+ % (len(unstable_list), self._cm.upcount(), unstable_list))
+
+ if up_are_down > 0:
+ result = False
+ self._cm.log("%d (of %d) nodes expected to be up were down."
+ % (up_are_down, len(self._cm.env["nodes"])))
+
+ if down_are_up > 0:
+ result = False
+ self._cm.log("%d (of %d) nodes expected to be down were up."
+ % (down_are_up, len(self._cm.env["nodes"])))
+
+ return result
+
+ def is_applicable(self):
+ """ Return True if this audit is applicable in the current test configuration. """
+
+ # @TODO Due to long-ago refactoring, this name test would never match,
+ # so this audit (and those derived from it) would never run.
+ # Uncommenting the next lines fixes the name test, but that then
+ # exposes pre-existing bugs that need to be fixed.
+ #if self._cm["Name"] == "crm-corosync":
+ # return True
+ return False
+
+
+class CIBAudit(ClusterAudit):
+ """ Audit the CIB by verifying that it is identical across cluster nodes """
+
+ def __init__(self, cm):
+ """ Create a new CIBAudit instance
+
+ Arguments:
+
+ cm -- A ClusterManager instance
+ """
+
+ ClusterAudit.__init__(self, cm)
+ self.name = "CibAudit"
+
+ def __call__(self):
+ result = True
+ ccm_partitions = self._cm.find_partitions()
+
+ if not ccm_partitions:
+ self.debug("\tNo partitions to audit")
+ return result
+
+ for partition in ccm_partitions:
+ self.debug("\tAuditing CIB consistency for: %s" % partition)
+
+ if self._audit_cib_contents(partition) == 0:
+ result = False
+
+ return result
+
+ def _audit_cib_contents(self, hostlist):
+ """ Perform the CIB audit on the given hosts """
+
+ passed = True
+ node0 = None
+ node0_xml = None
+
+ partition_hosts = hostlist.split()
+ for node in partition_hosts:
+ node_xml = self._store_remote_cib(node, node0)
+
+ if node_xml is None:
+ self._cm.log("Could not perform audit: No configuration from %s" % node)
+ passed = False
+
+ elif node0 is None:
+ node0 = node
+ node0_xml = node_xml
+
+ elif node0_xml is None:
+ self._cm.log("Could not perform audit: No configuration from %s" % node0)
+ passed = False
+
+ else:
+ (rc, result) = self._cm.rsh(
+ node0, "crm_diff -VV -cf --new %s --original %s" % (node_xml, node0_xml), verbose=1)
+
+ if rc != 0:
+ self._cm.log("Diff between %s and %s failed: %d" % (node0_xml, node_xml, rc))
+ passed = False
+
+ for line in result:
+ if not re.search("<diff/>", line):
+ passed = False
+ self.debug("CibDiff[%s-%s]: %s" % (node0, node, line))
+ else:
+ self.debug("CibDiff[%s-%s] Ignoring: %s" % (node0, node, line))
+
+ return passed
+
+ def _store_remote_cib(self, node, target):
+ """ Store a copy of the given node's CIB on the given target node. If
+ no target is given, store the CIB on the given node.
+ """
+
+ filename = "/tmp/ctsaudit.%s.xml" % node
+
+ if not target:
+ target = node
+
+ (rc, lines) = self._cm.rsh(node, self._cm["CibQuery"], verbose=1)
+ if rc != 0:
+ self._cm.log("Could not retrieve configuration")
+ return None
+
+ self._cm.rsh("localhost", "rm -f %s" % filename)
+ for line in lines:
+ self._cm.rsh("localhost", "echo \'%s\' >> %s" % (line[:-1], filename), verbose=0)
+
+ if self._cm.rsh.copy(filename, "root@%s:%s" % (target, filename), silent=True) != 0:
+ self._cm.log("Could not store configuration")
+ return None
+
+ return filename
+
+ def is_applicable(self):
+ """ Return True if this audit is applicable in the current test configuration. """
+
+ # @TODO Due to long-ago refactoring, this name test would never match,
+ # so this audit (and those derived from it) would never run.
+ # Uncommenting the next lines fixes the name test, but that then
+ # exposes pre-existing bugs that need to be fixed.
+ #if self._cm["Name"] == "crm-corosync":
+ # return True
+ return False
+
+
+class PartitionAudit(ClusterAudit):
+ """ Audit each partition in a cluster to verify a variety of conditions:
+
+ * The number of partitions and the nodes in each is as expected
+ * Each node is active when it should be active and inactive when it
+ should be inactive
+ * The status and epoch of each node is as expected
+ * A partition has quorum
+ * A partition has a DC when expected
+ """
+
+ def __init__(self, cm):
+ """ Create a new PartitionAudit instance
+
+ Arguments:
+
+ cm -- A ClusterManager instance
+ """
+
+ ClusterAudit.__init__(self, cm)
+ self.name = "PartitionAudit"
+
+ self._node_epoch = {}
+ self._node_state = {}
+ self._node_quorum = {}
+
+ def __call__(self):
+ result = True
+ ccm_partitions = self._cm.find_partitions()
+
+ if not ccm_partitions:
+ return result
+
+ self._cm.cluster_stable(double_check=True)
+
+ if len(ccm_partitions) != self._cm.partitions_expected:
+ self._cm.log("ERROR: %d cluster partitions detected:" % len(ccm_partitions))
+ result = False
+
+ for partition in ccm_partitions:
+ self._cm.log("\t %s" % partition)
+
+ for partition in ccm_partitions:
+ if self._audit_partition(partition) == 0:
+ result = False
+
+ return result
+
+ def _trim_string(self, avalue):
+ """ Remove the last character from a multi-character string """
+
+ if not avalue:
+ return None
+
+ if len(avalue) > 1:
+ return avalue[:-1]
+
+ return avalue
+
+ def _trim2int(self, avalue):
+ """ Remove the last character from a multi-character string and convert
+ the result to an int.
+ """
+
+ trimmed = self._trim_string(avalue)
+ if trimmed:
+ return int(trimmed)
+
+ return None
+
+ def _audit_partition(self, partition):
+ """ Perform the audit of a single partition """
+
+ passed = True
+ dc_found = []
+ dc_allowed_list = []
+ lowest_epoch = None
+ node_list = partition.split()
+
+ self.debug("Auditing partition: %s" % partition)
+ for node in node_list:
+ if self._cm.expected_status[node] != "up":
+ self._cm.log("Warn: Node %s appeared out of nowhere" % node)
+ self._cm.expected_status[node] = "up"
+ # not in itself a reason to fail the audit (not what we're
+ # checking for in this audit)
+
+ (_, out) = self._cm.rsh(node, self._cm["StatusCmd"] % node, verbose=1)
+ self._node_state[node] = out[0].strip()
+
+ (_, out) = self._cm.rsh(node, self._cm["EpochCmd"], verbose=1)
+ self._node_epoch[node] = out[0].strip()
+
+ (_, out) = self._cm.rsh(node, self._cm["QuorumCmd"], verbose=1)
+ self._node_quorum[node] = out[0].strip()
+
+ self.debug("Node %s: %s - %s - %s." % (node, self._node_state[node], self._node_epoch[node], self._node_quorum[node]))
+ self._node_state[node] = self._trim_string(self._node_state[node])
+ self._node_epoch[node] = self._trim2int(self._node_epoch[node])
+ self._node_quorum[node] = self._trim_string(self._node_quorum[node])
+
+ if not self._node_epoch[node]:
+ self._cm.log("Warn: Node %s dissappeared: cant determin epoch" % node)
+ self._cm.expected_status[node] = "down"
+ # not in itself a reason to fail the audit (not what we're
+ # checking for in this audit)
+ elif lowest_epoch is None or self._node_epoch[node] < lowest_epoch:
+ lowest_epoch = self._node_epoch[node]
+
+ if not lowest_epoch:
+ self._cm.log("Lowest epoch not determined in %s" % partition)
+ passed = False
+
+ for node in node_list:
+ if self._cm.expected_status[node] != "up":
+ continue
+
+ if self._cm.is_node_dc(node, self._node_state[node]):
+ dc_found.append(node)
+ if self._node_epoch[node] == lowest_epoch:
+ self.debug("%s: OK" % node)
+ elif not self._node_epoch[node]:
+ self.debug("Check on %s ignored: no node epoch" % node)
+ elif not lowest_epoch:
+ self.debug("Check on %s ignored: no lowest epoch" % node)
+ else:
+ self._cm.log("DC %s is not the oldest node (%d vs. %d)"
+ % (node, self._node_epoch[node], lowest_epoch))
+ passed = False
+
+ if not dc_found:
+ self._cm.log("DC not found on any of the %d allowed nodes: %s (of %s)"
+ % (len(dc_allowed_list), str(dc_allowed_list), str(node_list)))
+
+ elif len(dc_found) > 1:
+ self._cm.log("%d DCs (%s) found in cluster partition: %s"
+ % (len(dc_found), str(dc_found), str(node_list)))
+ passed = False
+
+ if not passed:
+ for node in node_list:
+ if self._cm.expected_status[node] == "up":
+ self._cm.log("epoch %s : %s"
+ % (self._node_epoch[node], self._node_state[node]))
+
+ return passed
+
+ def is_applicable(self):
+ """ Return True if this audit is applicable in the current test configuration. """
+
+ # @TODO Due to long-ago refactoring, this name test would never match,
+ # so this audit (and those derived from it) would never run.
+ # Uncommenting the next lines fixes the name test, but that then
+ # exposes pre-existing bugs that need to be fixed.
+ #if self._cm["Name"] == "crm-corosync":
+ # return True
+ return False
+
+
+# pylint: disable=invalid-name
+def audit_list(cm):
+ """ Return a list of instances of applicable audits that can be performed
+ for the given ClusterManager.
+ """
+
+ result = []
+
+ for auditclass in [DiskAudit, FileAudit, LogAudit, ControllerStateAudit,
+ PartitionAudit, PrimitiveAudit, GroupAudit, CloneAudit,
+ ColocationAudit, CIBAudit]:
+ a = auditclass(cm)
+ if a.is_applicable():
+ result.append(a)
+
+ return result
diff --git a/python/pacemaker/_cts/cib.py b/python/pacemaker/_cts/cib.py
new file mode 100644
index 0000000..b8b5d5d
--- /dev/null
+++ b/python/pacemaker/_cts/cib.py
@@ -0,0 +1,425 @@
+""" CIB generator for Pacemaker's Cluster Test Suite (CTS) """
+
+__all__ = ["ConfigFactory"]
+__copyright__ = "Copyright 2008-2023 the Pacemaker project contributors"
+__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
+
+import warnings
+import tempfile
+
+from pacemaker.buildoptions import BuildOptions
+from pacemaker._cts.cibxml import Alerts, Clone, Expression, FencingTopology, Group, Nodes, OpDefaults, Option, Resource, Rule
+from pacemaker._cts.network import next_ip
+
+
+class CIB:
+ """ A class for generating, representing, and installing a CIB file onto
+ cluster nodes
+ """
+
+ def __init__(self, cm, version, factory, tmpfile=None):
+ """ Create a new CIB instance
+
+ Arguments:
+
+ cm -- A ClusterManager instance
+ version -- The schema syntax version
+ factory -- A ConfigFactory instance
+ tmpfile -- Where to store the CIB, or None to use a new tempfile
+ """
+
+ # pylint: disable=invalid-name
+ self._cib = None
+ self._cm = cm
+ self._counter = 1
+ self._factory = factory
+ self._num_nodes = 0
+
+ self.version = version
+
+ if not tmpfile:
+ warnings.filterwarnings("ignore")
+
+ # pylint: disable=consider-using-with
+ f = tempfile.NamedTemporaryFile(delete=True)
+ f.close()
+ tmpfile = f.name
+
+ warnings.resetwarnings()
+
+ self._factory.tmpfile = tmpfile
+
+ def _show(self):
+ """ Query a cluster node for its generated CIB; log and return the result """
+
+ output = ""
+ (_, result) = self._factory.rsh(self._factory.target, "HOME=/root CIB_file=%s cibadmin -Ql" % self._factory.tmpfile, verbose=1)
+
+ for line in result:
+ output += line
+ self._factory.debug("Generated Config: %s" % line)
+
+ return output
+
+ def new_ip(self, name=None):
+ """ Generate an IP resource for the next available IP address, optionally
+ specifying the resource's name.
+ """
+
+ if self._cm.env["IPagent"] == "IPaddr2":
+ ip = next_ip(self._cm.env["IPBase"])
+ if not name:
+ if ":" in ip:
+ (_, _, suffix) = ip.rpartition(":")
+ name = "r%s" % suffix
+ else:
+ name = "r%s" % ip
+
+ r = Resource(self._factory, name, self._cm.env["IPagent"], "ocf")
+ r["ip"] = ip
+
+ if ":" in ip:
+ r["cidr_netmask"] = "64"
+ r["nic"] = "eth0"
+ else:
+ r["cidr_netmask"] = "32"
+
+ else:
+ if not name:
+ name = "r%s%d" % (self._cm.env["IPagent"], self._counter)
+ self._counter += 1
+
+ r = Resource(self._factory, name, self._cm.env["IPagent"], "ocf")
+
+ r.add_op("monitor", "5s")
+ return r
+
+ def get_node_id(self, node_name):
+ """ Check the cluster configuration for the node ID for the given node_name """
+
+ # We can't account for every possible configuration,
+ # so we only return a node ID if:
+ # * The node is specified in /etc/corosync/corosync.conf
+ # with "ring0_addr:" equal to node_name and "nodeid:"
+ # explicitly specified.
+ # In all other cases, we return 0.
+ node_id = 0
+
+ # awkward command: use } as record separator
+ # so each corosync.conf "object" is one record;
+ # match the "node {" record that has "ring0_addr: node_name";
+ # then print the substring of that record after "nodeid:"
+ awk = r"""awk -v RS="}" """ \
+ r"""'/^(\s*nodelist\s*{)?\s*node\s*{.*(ring0_addr|name):\s*%s(\s+|$)/""" \
+ r"""{gsub(/.*nodeid:\s*/,"");gsub(/\s+.*$/,"");print}' %s""" \
+ % (node_name, BuildOptions.COROSYNC_CONFIG_FILE)
+
+ (rc, output) = self._factory.rsh(self._factory.target, awk, verbose=1)
+
+ if rc == 0 and len(output) == 1:
+ try:
+ node_id = int(output[0])
+ except ValueError:
+ node_id = 0
+
+ return node_id
+
+ def install(self, target):
+ """ Generate a CIB file and install it to the given cluster node """
+
+ old = self._factory.tmpfile
+
+ # Force a rebuild
+ self._cib = None
+
+ self._factory.tmpfile = "%s/cib.xml" % BuildOptions.CIB_DIR
+ self.contents(target)
+ self._factory.rsh(self._factory.target, "chown %s %s" % (BuildOptions.DAEMON_USER, self._factory.tmpfile))
+
+ self._factory.tmpfile = old
+
+ def contents(self, target):
+ """ Generate a complete CIB file """
+
+ # fencing resource
+ if self._cib:
+ return self._cib
+
+ if target:
+ self._factory.target = target
+
+ self._factory.rsh(self._factory.target, "HOME=/root cibadmin --empty %s > %s" % (self.version, self._factory.tmpfile))
+ self._num_nodes = len(self._cm.env["nodes"])
+
+ no_quorum = "stop"
+ if self._num_nodes < 3:
+ no_quorum = "ignore"
+ self._factory.log("Cluster only has %d nodes, configuring: no-quorum-policy=ignore" % self._num_nodes)
+
+ # We don't need a nodes section unless we add attributes
+ stn = None
+
+ # Fencing resource
+ # Define first so that the shell doesn't reject every update
+ if self._cm.env["DoFencing"]:
+
+ # Define the "real" fencing device
+ st = Resource(self._factory, "Fencing", self._cm.env["stonith-type"], "stonith")
+
+ # Set a threshold for unreliable stonith devices such as the vmware one
+ st.add_meta("migration-threshold", "5")
+ st.add_op("monitor", "120s", timeout="120s")
+ st.add_op("stop", "0", timeout="60s")
+ st.add_op("start", "0", timeout="60s")
+
+ # For remote node tests, a cluster node is stopped and brought back up
+ # as a remote node with the name "remote-OLDNAME". To allow fencing
+ # devices to fence these nodes, create a list of all possible node names.
+ all_node_names = [prefix+n for n in self._cm.env["nodes"] for prefix in ('', 'remote-')]
+
+ # Add all parameters specified by user
+ entries = self._cm.env["stonith-params"].split(',')
+ for entry in entries:
+ try:
+ (name, value) = entry.split('=', 1)
+ except ValueError:
+ print("Warning: skipping invalid fencing parameter: %s" % entry)
+ continue
+
+ # Allow user to specify "all" as the node list, and expand it here
+ if name in ["hostlist", "pcmk_host_list"] and value == "all":
+ value = ' '.join(all_node_names)
+
+ st[name] = value
+
+ st.commit()
+
+ # Test advanced fencing logic
+ stf_nodes = []
+ stt_nodes = []
+ attr_nodes = {}
+
+ # Create the levels
+ stl = FencingTopology(self._factory)
+ for node in self._cm.env["nodes"]:
+ # Remote node tests will rename the node
+ remote_node = "remote-%s" % node
+
+ # Randomly assign node to a fencing method
+ ftype = self._cm.env.random_gen.choice(["levels-and", "levels-or ", "broadcast "])
+
+ # For levels-and, randomly choose targeting by node name or attribute
+ by = ""
+
+ if ftype == "levels-and":
+ node_id = self.get_node_id(node)
+
+ if node_id == 0 or self._cm.env.random_gen.choice([True, False]):
+ by = " (by name)"
+ else:
+ attr_nodes[node] = node_id
+ by = " (by attribute)"
+
+ self._cm.log(" - Using %s fencing for node: %s%s" % (ftype, node, by))
+
+ if ftype == "levels-and":
+ # If targeting by name, add a topology level for this node
+ if node not in attr_nodes:
+ stl.level(1, node, "FencingPass,Fencing")
+
+ # Always target remote nodes by name, otherwise we would need to add
+ # an attribute to the remote node only during remote tests (we don't
+ # want nonexistent remote nodes showing up in the non-remote tests).
+ # That complexity is not worth the effort.
+ stl.level(1, remote_node, "FencingPass,Fencing")
+
+ # Add the node (and its remote equivalent) to the list of levels-and nodes.
+ stt_nodes.extend([node, remote_node])
+
+ elif ftype == "levels-or ":
+ for n in [node, remote_node]:
+ stl.level(1, n, "FencingFail")
+ stl.level(2, n, "Fencing")
+
+ stf_nodes.extend([node, remote_node])
+
+ # If any levels-and nodes were targeted by attribute,
+ # create the attributes and a level for the attribute.
+ if attr_nodes:
+ stn = Nodes(self._factory)
+
+ for (node_name, node_id) in attr_nodes.items():
+ stn.add_node(node_name, node_id, {"cts-fencing": "levels-and"})
+
+ stl.level(1, None, "FencingPass,Fencing", "cts-fencing", "levels-and")
+
+ # Create a Dummy agent that always passes for levels-and
+ if stt_nodes:
+ stt = Resource(self._factory, "FencingPass", "fence_dummy", "stonith")
+ stt["pcmk_host_list"] = " ".join(stt_nodes)
+ # Wait this many seconds before doing anything, handy for letting disks get flushed too
+ stt["random_sleep_range"] = "30"
+ stt["mode"] = "pass"
+ stt.commit()
+
+ # Create a Dummy agent that always fails for levels-or
+ if stf_nodes:
+ stf = Resource(self._factory, "FencingFail", "fence_dummy", "stonith")
+ stf["pcmk_host_list"] = " ".join(stf_nodes)
+ # Wait this many seconds before doing anything, handy for letting disks get flushed too
+ stf["random_sleep_range"] = "30"
+ stf["mode"] = "fail"
+ stf.commit()
+
+ # Now commit the levels themselves
+ stl.commit()
+
+ o = Option(self._factory)
+ o["stonith-enabled"] = self._cm.env["DoFencing"]
+ o["start-failure-is-fatal"] = "false"
+ o["pe-input-series-max"] = "5000"
+ o["shutdown-escalation"] = "5min"
+ o["batch-limit"] = "10"
+ o["dc-deadtime"] = "5s"
+ o["no-quorum-policy"] = no_quorum
+
+ o.commit()
+
+ o = OpDefaults(self._factory)
+ o["timeout"] = "90s"
+ o.commit()
+
+ # Commit the nodes section if we defined one
+ if stn is not None:
+ stn.commit()
+
+ # Add an alerts section if possible
+ if self._factory.rsh.exists_on_all(self._cm.env["notification-agent"], self._cm.env["nodes"]):
+ alerts = Alerts(self._factory)
+ alerts.add_alert(self._cm.env["notification-agent"],
+ self._cm.env["notification-recipient"])
+ alerts.commit()
+
+ # Add resources?
+ if self._cm.env["CIBResource"]:
+ self.add_resources()
+
+ # generate cib
+ self._cib = self._show()
+
+ if self._factory.tmpfile != "%s/cib.xml" % BuildOptions.CIB_DIR:
+ self._factory.rsh(self._factory.target, "rm -f %s" % self._factory.tmpfile)
+
+ return self._cib
+
+ def add_resources(self):
+ """ Add various resources and their constraints to the CIB """
+
+ # Per-node resources
+ for node in self._cm.env["nodes"]:
+ name = "rsc_%s" % node
+ r = self.new_ip(name)
+ r.prefer(node, "100")
+ r.commit()
+
+ # Migrator
+ # Make this slightly sticky (since we have no other location constraints) to avoid relocation during Reattach
+ m = Resource(self._factory, "migrator", "Dummy", "ocf", "pacemaker")
+ m["passwd"] = "whatever"
+ m.add_meta("resource-stickiness", "1")
+ m.add_meta("allow-migrate", "1")
+ m.add_op("monitor", "P10S")
+ m.commit()
+
+ # Ping the test exerciser
+ p = Resource(self._factory, "ping-1", "ping", "ocf", "pacemaker")
+ p.add_op("monitor", "60s")
+ p["host_list"] = self._cm.env["cts-exerciser"]
+ p["name"] = "connected"
+ p["debug"] = "true"
+
+ c = Clone(self._factory, "Connectivity", p)
+ c["globally-unique"] = "false"
+ c.commit()
+
+ # promotable clone resource
+ s = Resource(self._factory, "stateful-1", "Stateful", "ocf", "pacemaker")
+ s.add_op("monitor", "15s", timeout="60s")
+ s.add_op("monitor", "16s", timeout="60s", role="Promoted")
+ ms = Clone(self._factory, "promotable-1", s)
+ ms["promotable"] = "true"
+ ms["clone-max"] = self._num_nodes
+ ms["clone-node-max"] = 1
+ ms["promoted-max"] = 1
+ ms["promoted-node-max"] = 1
+
+ # Require connectivity to run the promotable clone
+ r = Rule(self._factory, "connected", "-INFINITY", op="or")
+ r.add_child(Expression(self._factory, "m1-connected-1", "connected", "lt", "1"))
+ r.add_child(Expression(self._factory, "m1-connected-2", "connected", "not_defined", None))
+ ms.prefer("connected", rule=r)
+
+ ms.commit()
+
+ # Group Resource
+ g = Group(self._factory, "group-1")
+ g.add_child(self.new_ip())
+
+ if self._cm.env["have_systemd"]:
+ sysd = Resource(self._factory, "petulant", "pacemaker-cts-dummyd@10", "service")
+ sysd.add_op("monitor", "P10S")
+ g.add_child(sysd)
+ else:
+ g.add_child(self.new_ip())
+
+ g.add_child(self.new_ip())
+
+ # Make group depend on the promotable clone
+ g.after("promotable-1", first="promote", then="start")
+ g.colocate("promotable-1", "INFINITY", withrole="Promoted")
+
+ g.commit()
+
+ # LSB resource
+ lsb = Resource(self._factory, "lsb-dummy", "LSBDummy", "lsb")
+ lsb.add_op("monitor", "5s")
+
+ # LSB with group
+ lsb.after("group-1")
+ lsb.colocate("group-1")
+
+ lsb.commit()
+
+
+class ConfigFactory:
+ """ Singleton to generate a CIB file for the environment's schema version """
+
+ def __init__(self, cm):
+ """ Create a new ConfigFactory instance
+
+ Arguments:
+
+ cm -- A ClusterManager instance
+ """
+
+ # pylint: disable=invalid-name
+ self._cm = cm
+ self.rsh = self._cm.rsh
+ if not self._cm.env["ListTests"]:
+ self.target = self._cm.env["nodes"][0]
+ self.tmpfile = None
+
+ def log(self, args):
+ """ Log a message """
+
+ self._cm.log("cib: %s" % args)
+
+ def debug(self, args):
+ """ Log a debug message """
+
+ self._cm.debug("cib: %s" % args)
+
+ def create_config(self, name="pacemaker-%s" % BuildOptions.CIB_SCHEMA_VERSION):
+ """ Return a CIB object for the given schema version """
+
+ return CIB(self._cm, name, self)
diff --git a/python/pacemaker/_cts/cibxml.py b/python/pacemaker/_cts/cibxml.py
new file mode 100644
index 0000000..52e3721
--- /dev/null
+++ b/python/pacemaker/_cts/cibxml.py
@@ -0,0 +1,734 @@
+""" CIB XML generator for Pacemaker's Cluster Test Suite (CTS) """
+
+__all__ = [
+ "Alerts",
+ "Clone",
+ "Expression",
+ "FencingTopology",
+ "Group",
+ "Nodes",
+ "OpDefaults",
+ "Option",
+ "Resource",
+ "Rule",
+]
+__copyright__ = "Copyright 2008-2023 the Pacemaker project contributors"
+__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
+
+
+def key_val_string(**kwargs):
+ """ Given keyword arguments as key=value pairs, construct a single string
+ containing all those pairs separated by spaces. This is suitable for
+ using in an XML element as a list of its attributes.
+
+ Any pairs that have value=None will be skipped.
+
+ Note that a dictionary can be passed to this function instead of kwargs
+ by using a construction like:
+
+ key_val_string(**{"a": 1, "b": 2})
+ """
+
+ retval = ""
+
+ for (k, v) in kwargs.items():
+ if v is None:
+ continue
+
+ retval += ' %s="%s"' % (k, v)
+
+ return retval
+
+
+def element(element_name, **kwargs):
+ """ Create an XML element string with the given element_name and attributes.
+ This element does not support having any children, so it will be closed
+ on the same line. The attributes are processed by key_val_string.
+ """
+
+ return "<%s %s/>" % (element_name, key_val_string(**kwargs))
+
+
+def containing_element(element_name, inner, **kwargs):
+ """ Like element, but surrounds some child text passed by the inner
+ parameter.
+ """
+
+ attrs = key_val_string(**kwargs)
+ return "<%s %s>%s</%s>" % (element_name, attrs, inner, element_name)
+
+
+class XmlBase:
+ """ A base class for deriving all kinds of XML sections in the CIB. This
+ class contains only the most basic operations common to all sections.
+ It is up to subclasses to provide most behavior.
+
+ Note that subclasses of this base class often have different sets of
+ arguments to their __init__ methods. In general this is not a great
+ practice, however it is so thoroughly used in these classes that trying
+ to straighten it out is likely to cause more bugs than just leaving it
+ alone for now.
+ """
+
+ def __init__(self, factory, tag, _id, **kwargs):
+ """ Create a new XmlBase instance
+
+ Arguments:
+
+ factory -- A ConfigFactory instance
+ tag -- The XML element's start and end tag
+ _id -- A unique name for the element
+ kwargs -- Any additional key/value pairs that should be added to
+ this element as attributes
+ """
+
+ self._children = []
+ self._factory = factory
+ self._kwargs = kwargs
+ self._tag = tag
+
+ self.name = _id
+
+ def __repr__(self):
+ """ Return a short string description of this XML section """
+
+ return "%s-%s" % (self._tag, self.name)
+
+ def add_child(self, child):
+ """ Add an XML section as a child of this one """
+
+ self._children.append(child)
+
+ def __setitem__(self, key, value):
+ """ Add a key/value pair to this element, resulting in it becoming an
+ XML attribute. If value is None, remove the key.
+ """
+
+ if value:
+ self._kwargs[key] = value
+ else:
+ self._kwargs.pop(key, None)
+
+ def show(self):
+ """ Return a string representation of this XML section, including all
+ of its children
+ """
+
+ text = '''<%s''' % self._tag
+ if self.name:
+ text += ''' id="%s"''' % self.name
+
+ text += key_val_string(**self._kwargs)
+
+ if not self._children:
+ text += '''/>'''
+ return text
+
+ text += '''>'''
+
+ for c in self._children:
+ text += c.show()
+
+ text += '''</%s>''' % self._tag
+ return text
+
+ def _run(self, operation, xml, section, options=""):
+ """ Update the CIB on the cluster to include this XML section, including
+ all of its children
+
+ Arguments:
+
+ operation -- Whether this update is a "create" or "modify" operation
+ xml -- The XML to update the CIB with, typically the result
+ of calling show
+ section -- Which section of the CIB this update applies to (see
+ the --scope argument to cibadmin for allowed values)
+ options -- Extra options to pass to cibadmin
+ """
+
+ if self.name:
+ label = self.name
+ else:
+ label = "<%s>" % self._tag
+
+ self._factory.debug("Writing out %s" % label)
+
+ fixed = "HOME=/root CIB_file=%s" % self._factory.tmpfile
+ fixed += " cibadmin --%s --scope %s %s --xml-text '%s'" % (operation, section, options, xml)
+
+ (rc, _) = self._factory.rsh(self._factory.target, fixed)
+ if rc != 0:
+ raise RuntimeError("Configure call failed: %s" % fixed)
+
+
+class InstanceAttributes(XmlBase):
+ """ A class that creates an <instance_attributes> XML section with
+ key/value pairs
+ """
+
+ def __init__(self, factory, _id, attrs):
+ """ Create a new InstanceAttributes instance
+
+ Arguments:
+
+ factory -- A ConfigFactory instance
+ _id -- A unique name for the element
+ attrs -- Key/value pairs to add as nvpair child elements
+ """
+
+ XmlBase.__init__(self, factory, "instance_attributes", _id)
+
+ # Create an <nvpair> for each attribute
+ for (attr, value) in attrs.items():
+ self.add_child(XmlBase(factory, "nvpair", "%s-%s" % (_id, attr),
+ name=attr, value=value))
+
+
+class Node(XmlBase):
+ """ A class that creates a <node> XML section for a single node, complete
+ with node attributes
+ """
+
+ def __init__(self, factory, node_name, node_id, node_attrs):
+ """ Create a new Node instance
+
+ Arguments:
+
+ factory -- A ConfigFactory instance
+ node_name -- The value of the uname attribute for this node
+ node_id -- A unique name for the element
+ node_attrs -- Additional key/value pairs to set as instance
+ attributes for this node
+ """
+
+ XmlBase.__init__(self, factory, "node", node_id, uname=node_name)
+ self.add_child(InstanceAttributes(factory, "%s-1" % node_name, node_attrs))
+
+
+class Nodes(XmlBase):
+ """ A class that creates a <nodes> XML section containing multiple Node
+ instances as children
+ """
+
+ def __init__(self, factory):
+ """ Create a new Nodes instance
+
+ Arguments:
+
+ factory -- A ConfigFactory instance
+ """
+
+ XmlBase.__init__(self, factory, "nodes", None)
+
+ def add_node(self, node_name, node_id, node_attrs):
+ """ Add a child node element
+
+ Arguments:
+
+ node_name -- The value of the uname attribute for this node
+ node_id -- A unique name for the element
+ node_attrs -- Additional key/value pairs to set as instance
+ attributes for this node
+ """
+
+ self.add_child(Node(self._factory, node_name, node_id, node_attrs))
+
+ def commit(self):
+ """ Modify the CIB on the cluster to include this XML section """
+
+ self._run("modify", self.show(), "configuration", "--allow-create")
+
+
+class FencingTopology(XmlBase):
+ """ A class that creates a <fencing-topology> XML section describing how
+ fencing is configured in the cluster
+ """
+
+ def __init__(self, factory):
+ """ Create a new FencingTopology instance
+
+ Arguments:
+
+ factory -- A ConfigFactory instance
+ """
+
+ XmlBase.__init__(self, factory, "fencing-topology", None)
+
+ def level(self, index, target, devices, target_attr=None, target_value=None):
+ """ Generate a <fencing-level> XML element
+
+ index -- The order in which to attempt fencing-levels
+ (1 through 9). Levels are attempted in ascending
+ order until one succeeds.
+ target -- The name of a single node to which this level applies
+ devices -- A list of devices that must all be tried for this
+ level
+ target_attr -- The name of a node attribute that is set for nodes
+ to which this level applies
+ target_value -- The value of a node attribute that is set for nodes
+ to which this level applies
+ """
+
+ if target:
+ xml_id = "cts-%s.%d" % (target, index)
+ self.add_child(XmlBase(self._factory, "fencing-level", xml_id, target=target, index=index, devices=devices))
+
+ else:
+ xml_id = "%s-%s.%d" % (target_attr, target_value, index)
+ child = XmlBase(self._factory, "fencing-level", xml_id, index=index, devices=devices)
+ child["target-attribute"] = target_attr
+ child["target-value"] = target_value
+ self.add_child(child)
+
+ def commit(self):
+ """ Create this XML section in the CIB """
+
+ self._run("create", self.show(), "configuration", "--allow-create")
+
+
+class Option(XmlBase):
+ """ A class that creates a <cluster_property_set> XML section of key/value
+ pairs for cluster-wide configuration settings
+ """
+
+ def __init__(self, factory, _id="cib-bootstrap-options"):
+ """ Create a new Option instance
+
+ Arguments:
+
+ factory -- A ConfigFactory instance
+ _id -- A unique name for the element
+ """
+
+ XmlBase.__init__(self, factory, "cluster_property_set", _id)
+
+ def __setitem__(self, key, value):
+ """ Add a child nvpair element containing the given key/value pair """
+
+ self.add_child(XmlBase(self._factory, "nvpair", "cts-%s" % key, name=key, value=value))
+
+ def commit(self):
+ """ Modify the CIB on the cluster to include this XML section """
+
+ self._run("modify", self.show(), "crm_config", "--allow-create")
+
+
+class OpDefaults(XmlBase):
+ """ A class that creates a <cts-op_defaults-meta> XML section of key/value
+ pairs for operation default settings
+ """
+
+ def __init__(self, factory):
+ """ Create a new OpDefaults instance
+
+ Arguments:
+
+ factory -- A ConfigFactory instance
+ """
+
+ XmlBase.__init__(self, factory, "op_defaults", None)
+ self.meta = XmlBase(self._factory, "meta_attributes", "cts-op_defaults-meta")
+ self.add_child(self.meta)
+
+ def __setitem__(self, key, value):
+ """ Add a child nvpair meta_attribute element containing the given
+ key/value pair
+ """
+
+ self.meta.add_child(XmlBase(self._factory, "nvpair", "cts-op_defaults-%s" % key, name=key, value=value))
+
+ def commit(self):
+ """ Modify the CIB on the cluster to include this XML section """
+
+ self._run("modify", self.show(), "configuration", "--allow-create")
+
+
+class Alerts(XmlBase):
+ """ A class that creates an <alerts> XML section """
+
+ def __init__(self, factory):
+ """ Create a new Alerts instance
+
+ Arguments:
+
+ factory -- A ConfigFactory instance
+ """
+
+ XmlBase.__init__(self, factory, "alerts", None)
+ self._alert_count = 0
+
+ def add_alert(self, path, recipient):
+ """ Create a new alert as a child of this XML section
+
+ Arguments:
+
+ path -- The path to a script to be called when a cluster
+ event occurs
+ recipient -- An environment variable to be passed to the script
+ """
+
+ self._alert_count += 1
+ alert = XmlBase(self._factory, "alert", "alert-%d" % self._alert_count,
+ path=path)
+ recipient1 = XmlBase(self._factory, "recipient",
+ "alert-%d-recipient-1" % self._alert_count,
+ value=recipient)
+ alert.add_child(recipient1)
+ self.add_child(alert)
+
+ def commit(self):
+ """ Modify the CIB on the cluster to include this XML section """
+
+ self._run("modify", self.show(), "configuration", "--allow-create")
+
+
+class Expression(XmlBase):
+ """ A class that creates an <expression> XML element as part of some
+ constraint rule
+ """
+
+ def __init__(self, factory, _id, attr, op, value=None):
+ """ Create a new Expression instance
+
+ Arguments:
+
+ factory -- A ConfigFactory instance
+ _id -- A unique name for the element
+ attr -- The attribute to be tested
+ op -- The comparison to perform ("lt", "eq", "defined", etc.)
+ value -- Value for comparison (can be None for "defined" and
+ "not_defined" operations)
+ """
+
+ XmlBase.__init__(self, factory, "expression", _id, attribute=attr, operation=op)
+ if value:
+ self["value"] = value
+
+
+class Rule(XmlBase):
+ """ A class that creates a <rule> XML section consisting of one or more
+ expressions, as part of some constraint
+ """
+
+ def __init__(self, factory, _id, score, op="and", expr=None):
+ """ Create a new Rule instance
+
+ Arguments:
+
+ factory -- A ConfigFactory instance
+ _id -- A unique name for the element
+ score -- If this rule is used in a location constraint and
+ evaluates to true, apply this score to the constraint
+ op -- If this rule contains more than one expression, use this
+ boolean op when evaluating
+ expr -- An Expression instance that can be added to this Rule
+ when it is created
+ """
+
+ XmlBase.__init__(self, factory, "rule", _id)
+
+ self["boolean-op"] = op
+ self["score"] = score
+
+ if expr:
+ self.add_child(expr)
+
+
+class Resource(XmlBase):
+ """ A base class that creates all kinds of <resource> XML sections fully
+ describing a single cluster resource. This defaults to primitive
+ resources, but subclasses can create other types.
+ """
+
+ def __init__(self, factory, _id, rtype, standard, provider=None):
+ """ Create a new Resource instance
+
+ Arguments:
+
+ factory -- A ConfigFactory instance
+ _id -- A unique name for the element
+ rtype -- The name of the resource agent
+ standard -- The standard the resource agent follows ("ocf",
+ "systemd", etc.)
+ provider -- The vendor providing the resource agent
+ """
+
+ XmlBase.__init__(self, factory, "native", _id)
+
+ self._provider = provider
+ self._rtype = rtype
+ self._standard = standard
+
+ self._meta = {}
+ self._op = []
+ self._param = {}
+
+ self._coloc = {}
+ self._needs = {}
+ self._scores = {}
+
+ if self._standard == "ocf" and not provider:
+ self._provider = "heartbeat"
+ elif self._standard == "lsb":
+ self._provider = None
+
+ def __setitem__(self, key, value):
+ """ Add a child nvpair element containing the given key/value pair as
+ an instance attribute
+ """
+
+ self._add_param(key, value)
+
+ def add_op(self, _id, interval, **kwargs):
+ """ Add an operation child XML element to this resource
+
+ Arguments:
+
+ _id -- A unique name for the element. Also, the action to
+ perform ("monitor", "start", "stop", etc.)
+ interval -- How frequently (in seconds) to perform the operation
+ kwargs -- Any additional key/value pairs that should be added to
+ this element as attributes
+ """
+
+ self._op.append(XmlBase(self._factory, "op", "%s-%s" % (_id, interval),
+ name=_id, interval=interval, **kwargs))
+
+ def _add_param(self, name, value):
+ """ Add a child nvpair element containing the given key/value pair as
+ an instance attribute
+ """
+
+ self._param[name] = value
+
+ def add_meta(self, name, value):
+ """ Add a child nvpair element containing the given key/value pair as
+ a meta attribute
+ """
+
+ self._meta[name] = value
+
+ def prefer(self, node, score="INFINITY", rule=None):
+ """ Add a location constraint where this resource prefers some node
+
+ Arguments:
+
+ node -- The name of the node to prefer
+ score -- Apply this score to the location constraint
+ rule -- A Rule instance to use in creating this constraint, instead
+ of creating a new rule
+ """
+
+ if not rule:
+ rule = Rule(self._factory, "prefer-%s-r" % node, score,
+ expr=Expression(self._factory, "prefer-%s-e" % node, "#uname", "eq", node))
+
+ self._scores[node] = rule
+
+ def after(self, resource, kind="Mandatory", first="start", then="start", **kwargs):
+ """ Create an ordering constraint between this resource and some other
+
+ Arguments:
+
+ resource -- The name of the dependent resource
+ kind -- How to enforce the constraint ("mandatory", "optional",
+ "serialize")
+ first -- The action that this resource must complete before the
+ then-action can be initiated for the dependent resource
+ ("start", "stop", "promote", "demote")
+ then -- The action that the dependent resource can execute only
+ after the first-action has completed (same values as
+ first)
+ kwargs -- Any additional key/value pairs that should be added to
+ this element as attributes
+ """
+
+ kargs = kwargs.copy()
+ kargs["kind"] = kind
+
+ if then:
+ kargs["first-action"] = "start"
+ kargs["then-action"] = then
+
+ if first:
+ kargs["first-action"] = first
+
+ self._needs[resource] = kargs
+
+ def colocate(self, resource, score="INFINITY", role=None, withrole=None, **kwargs):
+ """ Create a colocation constraint between this resource and some other
+
+ Arguments:
+
+ resource -- The name of the resource that should be located relative
+ this one
+ score -- Apply this score to the colocation constraint
+ role -- Apply this colocation constraint only to promotable clones
+ in this role ("started", "promoted", "unpromoted")
+ withrole -- Apply this colocation constraint only to with-rsc promotable
+ clones in this role
+ kwargs -- Any additional key/value pairs that should be added to
+ this element as attributes
+ """
+
+ kargs = kwargs.copy()
+ kargs["score"] = score
+
+ if role:
+ kargs["rsc-role"] = role
+
+ if withrole:
+ kargs["with-rsc-role"] = withrole
+
+ self._coloc[resource] = kargs
+
+ def _constraints(self):
+ """ Generate a <constraints> XML section containing all previously added
+ ordering and colocation constraints
+ """
+
+ text = "<constraints>"
+
+ for (k, v) in self._scores.items():
+ attrs = {"id": "prefer-%s" % k, "rsc": self.name}
+ text += containing_element("rsc_location", v.show(), **attrs)
+
+ for (k, kargs) in self._needs.items():
+ attrs = {"id": "%s-after-%s" % (self.name, k), "first": k, "then": self.name}
+ text += element("rsc_order", **attrs, **kargs)
+
+ for (k, kargs) in self._coloc.items():
+ attrs = {"id": "%s-with-%s" % (self.name, k), "rsc": self.name, "with-rsc": k}
+ text += element("rsc_colocation", **attrs)
+
+ text += "</constraints>"
+ return text
+
+ def show(self):
+ """ Return a string representation of this XML section, including all
+ of its children
+ """
+
+ text = '''<primitive id="%s" class="%s" type="%s"''' % (self.name, self._standard, self._rtype)
+
+ if self._provider:
+ text += ''' provider="%s"''' % self._provider
+
+ text += '''>'''
+
+ if self._meta:
+ nvpairs = ""
+ for (p, v) in self._meta.items():
+ attrs = {"id": "%s-%s" % (self.name, p), "name": p, "value": v}
+ nvpairs += element("nvpair", **attrs)
+
+ text += containing_element("meta_attributes", nvpairs,
+ id="%s-meta" % self.name)
+
+ if self._param:
+ nvpairs = ""
+ for (p, v) in self._param.items():
+ attrs = {"id": "%s-%s" % (self.name, p), "name": p, "value": v}
+ nvpairs += element("nvpair", **attrs)
+
+ text += containing_element("instance_attributes", nvpairs,
+ id="%s-params" % self.name)
+
+ if self._op:
+ text += '''<operations>'''
+
+ for o in self._op:
+ key = o.name
+ o.name = "%s-%s" % (self.name, key)
+ text += o.show()
+ o.name = key
+
+ text += '''</operations>'''
+
+ text += '''</primitive>'''
+ return text
+
+ def commit(self):
+ """ Modify the CIB on the cluster to include this XML section """
+
+ self._run("create", self.show(), "resources")
+ self._run("modify", self._constraints(), "constraints")
+
+
+class Group(Resource):
+ """ A specialized Resource subclass that creates a <group> XML section
+ describing a single group resource consisting of multiple child
+ primitive resources
+ """
+
+ def __init__(self, factory, _id):
+ """ Create a new Group instance
+
+ Arguments:
+
+ factory -- A ConfigFactory instance
+ _id -- A unique name for the element
+ """
+
+ Resource.__init__(self, factory, _id, None, None)
+ self.tag = "group"
+
+ def __setitem__(self, key, value):
+ self.add_meta(key, value)
+
+ def show(self):
+ """ Return a string representation of this XML section, including all
+ of its children
+ """
+
+ text = '''<%s id="%s">''' % (self.tag, self.name)
+
+ if len(self._meta) > 0:
+ nvpairs = ""
+ for (p, v) in self._meta.items():
+ attrs = {"id": "%s-%s" % (self.name, p), "name": p, "value": v}
+ nvpairs += element("nvpair", **attrs)
+
+ text += containing_element("meta_attributes", nvpairs,
+ id="%s-meta" % self.name)
+
+ for c in self._children:
+ text += c.show()
+
+ text += '''</%s>''' % self.tag
+ return text
+
+
+class Clone(Group):
+ """ A specialized Group subclass that creates a <clone> XML section
+ describing a clone resource containing multiple instances of a
+ single primitive resource
+ """
+
+ def __init__(self, factory, _id, child=None):
+ """ Create a new Clone instance
+
+ Arguments:
+
+ factory -- A ConfigFactory instance
+ _id -- A unique name for the element
+ child -- A Resource instance that can be added to this Clone
+ when it is created. Alternately, use add_child later.
+ Note that a Clone may only have one child.
+ """
+
+ Group.__init__(self, factory, _id)
+ self.tag = "clone"
+
+ if child:
+ self.add_child(child)
+
+ def add_child(self, child):
+ """ Add the given resource as a child of this Clone. Note that a
+ Clone resource only supports one child at a time.
+ """
+
+ if not self._children:
+ self._children.append(child)
+ else:
+ self._factory.log("Clones can only have a single child. Ignoring %s" % child.name)
diff --git a/python/pacemaker/_cts/clustermanager.py b/python/pacemaker/_cts/clustermanager.py
new file mode 100644
index 0000000..652108f
--- /dev/null
+++ b/python/pacemaker/_cts/clustermanager.py
@@ -0,0 +1,916 @@
+""" ClusterManager class for Pacemaker's Cluster Test Suite (CTS) """
+
+__all__ = ["ClusterManager"]
+__copyright__ = """Copyright 2000-2023 the Pacemaker project contributors.
+Certain portions by Huang Zhen <zhenhltc@cn.ibm.com> are copyright 2004
+International Business Machines. The version control history for this file
+may have further details."""
+__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
+
+import os
+import re
+import time
+
+from collections import UserDict
+
+from pacemaker.buildoptions import BuildOptions
+from pacemaker._cts.CTS import NodeStatus
+from pacemaker._cts.audits import AuditResource
+from pacemaker._cts.cib import ConfigFactory
+from pacemaker._cts.environment import EnvFactory
+from pacemaker._cts.logging import LogFactory
+from pacemaker._cts.patterns import PatternSelector
+from pacemaker._cts.remote import RemoteFactory
+from pacemaker._cts.watcher import LogWatcher
+
+# pylint doesn't understand that self._rsh is callable (it stores the
+# singleton instance of RemoteExec, as returned by the getInstance method
+# of RemoteFactory). It's possible we could fix this with type annotations,
+# but those were introduced with python 3.5 and we only support python 3.4.
+# I think we could also fix this by getting rid of the getInstance methods,
+# but that's a project for another day. For now, just disable the warning.
+# pylint: disable=not-callable
+
+# ClusterManager has a lot of methods.
+# pylint: disable=too-many-public-methods
+
+class ClusterManager(UserDict):
+ """ An abstract base class for managing the cluster. This class implements
+ high-level operations on the cluster and/or its cluster managers.
+ Actual cluster-specific management classes should be subclassed from this
+ one.
+
+ Among other things, this class tracks the state every node is expected to
+ be in.
+ """
+
+ def _final_conditions(self):
+ """ Check all keys to make sure they have a non-None value """
+
+ for (key, val) in self._data.items():
+ if val is None:
+ raise ValueError("Improper derivation: self[%s] must be overridden by subclass." % key)
+
+ def __init__(self):
+ """ Create a new ClusterManager instance. This class can be treated
+ kind of like a dictionary due to the process of certain dict
+ functions like __getitem__ and __setitem__. This is because it
+ contains a lot of name/value pairs. However, it is not actually
+ a dictionary so do not rely on standard dictionary behavior.
+ """
+
+ # Eventually, ClusterManager should not be a UserDict subclass. Until
+ # that point...
+ # pylint: disable=super-init-not-called
+ self.__instance_errors_to_ignore = []
+
+ self._cib_installed = False
+ self._data = {}
+ self._logger = LogFactory()
+
+ self.env = EnvFactory().getInstance()
+ self.expected_status = {}
+ self.name = self.env["Name"]
+ # pylint: disable=invalid-name
+ self.ns = NodeStatus(self.env)
+ self.our_node = os.uname()[1].lower()
+ self.partitions_expected = 1
+ self.rsh = RemoteFactory().getInstance()
+ self.templates = PatternSelector(self.env["Name"])
+
+ self._final_conditions()
+
+ self._cib_factory = ConfigFactory(self)
+ self._cib = self._cib_factory.create_config(self.env["Schema"])
+ self._cib_sync = {}
+
+ def __getitem__(self, key):
+ if key == "Name":
+ return self.name
+
+ print("FIXME: Getting %s from %r" % (key, self))
+ if key in self._data:
+ return self._data[key]
+
+ return self.templates.get_patterns(key)
+
+ def __setitem__(self, key, value):
+ print("FIXME: Setting %s=%s on %r" % (key, value, self))
+ self._data[key] = value
+
+ def clear_instance_errors_to_ignore(self):
+ """ Reset instance-specific errors to ignore on each iteration """
+
+ self.__instance_errors_to_ignore = []
+
+ @property
+ def instance_errors_to_ignore(self):
+ """ Return a list of known errors that should be ignored for a specific
+ test instance
+ """
+
+ return self.__instance_errors_to_ignore
+
+ @property
+ def errors_to_ignore(self):
+ """ Return a list of known error messages that should be ignored """
+
+ return self.templates.get_patterns("BadNewsIgnore")
+
+ def log(self, args):
+ """ Log a message """
+
+ self._logger.log(args)
+
+ def debug(self, args):
+ """ Log a debug message """
+
+ self._logger.debug(args)
+
+ def upcount(self):
+ """ How many nodes are up? """
+
+ count = 0
+
+ for node in self.env["nodes"]:
+ if self.expected_status[node] == "up":
+ count += 1
+
+ return count
+
+ def install_support(self, command="install"):
+ """ Install or uninstall the CTS support files - various init scripts and data,
+ daemons, fencing agents, etc.
+ """
+
+ for node in self.env["nodes"]:
+ self.rsh(node, "%s/cts-support %s" % (BuildOptions.DAEMON_DIR, command))
+
+ def prepare_fencing_watcher(self):
+ """ Return a LogWatcher object that watches for fencing log messages """
+
+ # If we don't have quorum now but get it as a result of starting this node,
+ # then a bunch of nodes might get fenced
+ if self.has_quorum(None):
+ self.debug("Have quorum")
+ return None
+
+ if not self.templates["Pat:Fencing_start"]:
+ print("No start pattern")
+ return None
+
+ if not self.templates["Pat:Fencing_ok"]:
+ print("No ok pattern")
+ return None
+
+ stonith = None
+ stonith_pats = []
+ for peer in self.env["nodes"]:
+ if self.expected_status[peer] == "up":
+ continue
+
+ stonith_pats.extend([
+ self.templates["Pat:Fencing_ok"] % peer,
+ self.templates["Pat:Fencing_start"] % peer,
+ ])
+
+ stonith = LogWatcher(self.env["LogFileName"], stonith_pats, self.env["nodes"],
+ self.env["LogWatcher"], "StartupFencing", 0)
+ stonith.set_watch()
+ return stonith
+
+ def fencing_cleanup(self, node, stonith):
+ """ Wait for a previously fenced node to return to the cluster """
+
+ peer_list = []
+ peer_state = {}
+
+ self.debug("Looking for nodes that were fenced as a result of %s starting" % node)
+
+ # If we just started a node, we may now have quorum (and permission to fence)
+ if not stonith:
+ self.debug("Nothing to do")
+ return peer_list
+
+ q = self.has_quorum(None)
+ if not q and len(self.env["nodes"]) > 2:
+ # We didn't gain quorum - we shouldn't have shot anyone
+ self.debug("Quorum: %s Len: %d" % (q, len(self.env["nodes"])))
+ return peer_list
+
+ for n in self.env["nodes"]:
+ peer_state[n] = "unknown"
+
+ # Now see if any states need to be updated
+ self.debug("looking for: %r" % stonith.regexes)
+ shot = stonith.look(0)
+
+ while shot:
+ self.debug("Found: %r" % shot)
+ del stonith.regexes[stonith.whichmatch]
+
+ # Extract node name
+ for n in self.env["nodes"]:
+ if re.search(self.templates["Pat:Fencing_ok"] % n, shot):
+ peer = n
+ peer_state[peer] = "complete"
+ self.__instance_errors_to_ignore.append(self.templates["Pat:Fencing_ok"] % peer)
+
+ elif peer_state[n] != "complete" and re.search(self.templates["Pat:Fencing_start"] % n, shot):
+ # TODO: Correctly detect multiple fencing operations for the same host
+ peer = n
+ peer_state[peer] = "in-progress"
+ self.__instance_errors_to_ignore.append(self.templates["Pat:Fencing_start"] % peer)
+
+ if not peer:
+ self._logger.log("ERROR: Unknown stonith match: %r" % shot)
+
+ elif not peer in peer_list:
+ self.debug("Found peer: %s" % peer)
+ peer_list.append(peer)
+
+ # Get the next one
+ shot = stonith.look(60)
+
+ for peer in peer_list:
+ self.debug(" Peer %s was fenced as a result of %s starting: %s" % (peer, node, peer_state[peer]))
+ if self.env["at-boot"]:
+ self.expected_status[peer] = "up"
+ else:
+ self.expected_status[peer] = "down"
+
+ if peer_state[peer] == "in-progress":
+ # Wait for any in-progress operations to complete
+ shot = stonith.look(60)
+
+ while stonith.regexes and shot:
+ self.debug("Found: %r" % shot)
+ del stonith.regexes[stonith.whichmatch]
+ shot = stonith.look(60)
+
+ # Now make sure the node is alive too
+ self.ns.wait_for_node(peer, self.env["DeadTime"])
+
+ # Poll until it comes up
+ if self.env["at-boot"]:
+ if not self.stat_cm(peer):
+ time.sleep(self.env["StartTime"])
+
+ if not self.stat_cm(peer):
+ self._logger.log("ERROR: Peer %s failed to restart after being fenced" % peer)
+ return None
+
+ return peer_list
+
+ def start_cm(self, node, verbose=False):
+ """ Start up the cluster manager on a given node """
+
+ if verbose:
+ self._logger.log("Starting %s on node %s" % (self.templates["Name"], node))
+ else:
+ self.debug("Starting %s on node %s" % (self.templates["Name"], node))
+
+ if not node in self.expected_status:
+ self.expected_status[node] = "down"
+
+ if self.expected_status[node] != "down":
+ return True
+
+ # Technically we should always be able to notice ourselves starting
+ patterns = [
+ self.templates["Pat:Local_started"] % node,
+ ]
+
+ if self.upcount() == 0:
+ patterns.append(self.templates["Pat:DC_started"] % node)
+ else:
+ patterns.append(self.templates["Pat:NonDC_started"] % node)
+
+ watch = LogWatcher(self.env["LogFileName"], patterns, self.env["nodes"], self.env["LogWatcher"],
+ "StartaCM", self.env["StartTime"] + 10)
+
+ self.install_config(node)
+
+ self.expected_status[node] = "any"
+
+ if self.stat_cm(node) and self.cluster_stable(self.env["DeadTime"]):
+ self._logger.log("%s was already started" % node)
+ return True
+
+ stonith = self.prepare_fencing_watcher()
+ watch.set_watch()
+
+ (rc, _) = self.rsh(node, self.templates["StartCmd"])
+ if rc != 0:
+ self._logger.log("Warn: Start command failed on node %s" % node)
+ self.fencing_cleanup(node, stonith)
+ return False
+
+ self.expected_status[node] = "up"
+ watch_result = watch.look_for_all()
+
+ if watch.unmatched:
+ for regex in watch.unmatched:
+ self._logger.log("Warn: Startup pattern not found: %s" % regex)
+
+ if watch_result and self.cluster_stable(self.env["DeadTime"]):
+ self.fencing_cleanup(node, stonith)
+ return True
+
+ if self.stat_cm(node) and self.cluster_stable(self.env["DeadTime"]):
+ self.fencing_cleanup(node, stonith)
+ return True
+
+ self._logger.log("Warn: Start failed for node %s" % node)
+ return False
+
+ def start_cm_async(self, node, verbose=False):
+ """ Start up the cluster manager on a given node without blocking """
+
+ if verbose:
+ self._logger.log("Starting %s on node %s" % (self["Name"], node))
+ else:
+ self.debug("Starting %s on node %s" % (self["Name"], node))
+
+ self.install_config(node)
+ self.rsh(node, self.templates["StartCmd"], synchronous=False)
+ self.expected_status[node] = "up"
+
+ def stop_cm(self, node, verbose=False, force=False):
+ """ Stop the cluster manager on a given node """
+
+ if verbose:
+ self._logger.log("Stopping %s on node %s" % (self["Name"], node))
+ else:
+ self.debug("Stopping %s on node %s" % (self["Name"], node))
+
+ if self.expected_status[node] != "up" and not force:
+ return True
+
+ (rc, _) = self.rsh(node, self.templates["StopCmd"])
+ if rc == 0:
+ # Make sure we can continue even if corosync leaks
+ self.expected_status[node] = "down"
+ self.cluster_stable(self.env["DeadTime"])
+ return True
+
+ self._logger.log("ERROR: Could not stop %s on node %s" % (self["Name"], node))
+ return False
+
+ def stop_cm_async(self, node):
+ """ Stop the cluster manager on a given node without blocking """
+
+ self.debug("Stopping %s on node %s" % (self["Name"], node))
+
+ self.rsh(node, self.templates["StopCmd"], synchronous=False)
+ self.expected_status[node] = "down"
+
+ def startall(self, nodelist=None, verbose=False, quick=False):
+ """ Start the cluster manager on every node in the cluster, or on every
+ node in nodelist if not None
+ """
+
+ if not nodelist:
+ nodelist = self.env["nodes"]
+
+ for node in nodelist:
+ if self.expected_status[node] == "down":
+ self.ns.wait_for_all_nodes(nodelist, 300)
+
+ if not quick:
+ # This is used for "basic sanity checks", so only start one node ...
+ return self.start_cm(nodelist[0], verbose=verbose)
+
+ # Approximation of SimulStartList for --boot
+ watchpats = [
+ self.templates["Pat:DC_IDLE"],
+ ]
+ for node in nodelist:
+ watchpats.extend([
+ self.templates["Pat:InfraUp"] % node,
+ self.templates["Pat:PacemakerUp"] % node,
+ self.templates["Pat:Local_started"] % node,
+ self.templates["Pat:They_up"] % (nodelist[0], node),
+ ])
+
+ # Start all the nodes - at about the same time...
+ watch = LogWatcher(self.env["LogFileName"], watchpats, self.env["nodes"],
+ self.env["LogWatcher"], "fast-start", self.env["DeadTime"] + 10)
+ watch.set_watch()
+
+ if not self.start_cm(nodelist[0], verbose=verbose):
+ return False
+
+ for node in nodelist:
+ self.start_cm_async(node, verbose=verbose)
+
+ watch.look_for_all()
+ if watch.unmatched:
+ for regex in watch.unmatched:
+ self._logger.log("Warn: Startup pattern not found: %s" % regex)
+
+ if not self.cluster_stable():
+ self._logger.log("Cluster did not stabilize")
+ return False
+
+ return True
+
+ def stopall(self, nodelist=None, verbose=False, force=False):
+ """ Stop the cluster manager on every node in the cluster, or on every
+ node in nodelist if not None
+ """
+
+ ret = True
+
+ if not nodelist:
+ nodelist = self.env["nodes"]
+
+ for node in self.env["nodes"]:
+ if self.expected_status[node] == "up" or force:
+ if not self.stop_cm(node, verbose=verbose, force=force):
+ ret = False
+
+ return ret
+
+ def statall(self, nodelist=None):
+ """ Return the status of the cluster manager on every node in the cluster,
+ or on every node in nodelist if not None
+ """
+
+ result = {}
+
+ if not nodelist:
+ nodelist = self.env["nodes"]
+
+ for node in nodelist:
+ if self.stat_cm(node):
+ result[node] = "up"
+ else:
+ result[node] = "down"
+
+ return result
+
+ def isolate_node(self, target, nodes=None):
+ """ Break communication between the target node and all other nodes in the
+ cluster, or nodes if not None
+ """
+
+ if not nodes:
+ nodes = self.env["nodes"]
+
+ for node in nodes:
+ if node == target:
+ continue
+
+ (rc, _) = self.rsh(target, self.templates["BreakCommCmd"] % node)
+ if rc != 0:
+ self._logger.log("Could not break the communication between %s and %s: %d" % (target, node, rc))
+ return False
+
+ self.debug("Communication cut between %s and %s" % (target, node))
+
+ return True
+
+ def unisolate_node(self, target, nodes=None):
+ """ Re-establish communication between the target node and all other nodes
+ in the cluster, or nodes if not None
+ """
+
+ if not nodes:
+ nodes = self.env["nodes"]
+
+ for node in nodes:
+ if node == target:
+ continue
+
+ # Limit the amount of time we have asynchronous connectivity for
+ # Restore both sides as simultaneously as possible
+ self.rsh(target, self.templates["FixCommCmd"] % node, synchronous=False)
+ self.rsh(node, self.templates["FixCommCmd"] % target, synchronous=False)
+ self.debug("Communication restored between %s and %s" % (target, node))
+
+ def oprofile_start(self, node=None):
+ """ Start profiling on the given node, or all nodes in the cluster """
+
+ if not node:
+ for n in self.env["oprofile"]:
+ self.oprofile_start(n)
+
+ elif node in self.env["oprofile"]:
+ self.debug("Enabling oprofile on %s" % node)
+ self.rsh(node, "opcontrol --init")
+ self.rsh(node, "opcontrol --setup --no-vmlinux --separate=lib --callgraph=20 --image=all")
+ self.rsh(node, "opcontrol --start")
+ self.rsh(node, "opcontrol --reset")
+
+ def oprofile_save(self, test, node=None):
+ """ Save profiling data and restart profiling on the given node, or all
+ nodes in the cluster if None
+ """
+
+ if not node:
+ for n in self.env["oprofile"]:
+ self.oprofile_save(test, n)
+
+ elif node in self.env["oprofile"]:
+ self.rsh(node, "opcontrol --dump")
+ self.rsh(node, "opcontrol --save=cts.%d" % test)
+ # Read back with: opreport -l session:cts.0 image:<directory>/c*
+ self.oprofile_stop(node)
+ self.oprofile_start(node)
+
+ def oprofile_stop(self, node=None):
+ """ Start profiling on the given node, or all nodes in the cluster. This
+ does not save profiling data, so call oprofile_save first if needed.
+ """
+
+ if not node:
+ for n in self.env["oprofile"]:
+ self.oprofile_stop(n)
+
+ elif node in self.env["oprofile"]:
+ self.debug("Stopping oprofile on %s" % node)
+ self.rsh(node, "opcontrol --reset")
+ self.rsh(node, "opcontrol --shutdown 2>&1 > /dev/null")
+
+ def install_config(self, node):
+ """ Remove and re-install the CIB on the first node in the cluster """
+
+ if not self.ns.wait_for_node(node):
+ self.log("Node %s is not up." % node)
+ return
+
+ if node in self._cib_sync or not self.env["ClobberCIB"]:
+ return
+
+ self._cib_sync[node] = True
+ self.rsh(node, "rm -f %s/cib*" % BuildOptions.CIB_DIR)
+
+ # Only install the CIB on the first node, all the other ones will pick it up from there
+ if self._cib_installed:
+ return
+
+ self._cib_installed = True
+ if self.env["CIBfilename"] is None:
+ self.log("Installing Generated CIB on node %s" % node)
+ self._cib.install(node)
+
+ else:
+ self.log("Installing CIB (%s) on node %s" % (self.env["CIBfilename"], node))
+
+ rc = self.rsh.copy(self.env["CIBfilename"], "root@" + (self.templates["CIBfile"] % node))
+
+ if rc != 0:
+ raise ValueError("Can not scp file to %s %d" % (node, rc))
+
+ self.rsh(node, "chown %s %s/cib.xml" % (BuildOptions.DAEMON_USER, BuildOptions.CIB_DIR))
+
+ def prepare(self):
+ """ Finish initialization by clearing out the expected status and recording
+ the current status of every node in the cluster
+ """
+
+ self.partitions_expected = 1
+ for node in self.env["nodes"]:
+ self.expected_status[node] = ""
+
+ if self.env["experimental-tests"]:
+ self.unisolate_node(node)
+
+ self.stat_cm(node)
+
+ def test_node_cm(self, node):
+ """ Check the status of a given node. Returns 0 if the node is
+ down, 1 if the node is up but unstable, and 2 if the node is
+ up and stable
+ """
+
+ watchpats = [
+ "Current ping state: (S_IDLE|S_NOT_DC)",
+ self.templates["Pat:NonDC_started"] % node,
+ self.templates["Pat:DC_started"] % node,
+ ]
+
+ idle_watch = LogWatcher(self.env["LogFileName"], watchpats, [node],
+ self.env["LogWatcher"], "ClusterIdle")
+ idle_watch.set_watch()
+
+ (_, out) = self.rsh(node, self.templates["StatusCmd"] % node, verbose=1)
+
+ if not out:
+ out = ""
+ else:
+ out = out[0].strip()
+
+ self.debug("Node %s status: '%s'" % (node, out))
+
+ if out.find('ok') < 0:
+ if self.expected_status[node] == "up":
+ self.log("Node status for %s is %s but we think it should be %s"
+ % (node, "down", self.expected_status[node]))
+
+ self.expected_status[node] = "down"
+ return 0
+
+ if self.expected_status[node] == "down":
+ self.log("Node status for %s is %s but we think it should be %s: %s"
+ % (node, "up", self.expected_status[node], out))
+
+ self.expected_status[node] = "up"
+
+ # check the output first - because syslog-ng loses messages
+ if out.find('S_NOT_DC') != -1:
+ # Up and stable
+ return 2
+
+ if out.find('S_IDLE') != -1:
+ # Up and stable
+ return 2
+
+ # fall back to syslog-ng and wait
+ if not idle_watch.look():
+ # just up
+ self.debug("Warn: Node %s is unstable: %s" % (node, out))
+ return 1
+
+ # Up and stable
+ return 2
+
+ def stat_cm(self, node):
+ """ Report the status of the cluster manager on a given node """
+
+ return self.test_node_cm(node) > 0
+
+ # Being up and being stable is not the same question...
+ def node_stable(self, node):
+ """ Return whether or not the given node is stable """
+
+ if self.test_node_cm(node) == 2:
+ return True
+
+ self.log("Warn: Node %s not stable" % node)
+ return False
+
+ def partition_stable(self, nodes, timeout=None):
+ """ Return whether or not all nodes in the given partition are stable """
+
+ watchpats = [
+ "Current ping state: S_IDLE",
+ self.templates["Pat:DC_IDLE"],
+ ]
+
+ self.debug("Waiting for cluster stability...")
+
+ if timeout is None:
+ timeout = self.env["DeadTime"]
+
+ if len(nodes) < 3:
+ self.debug("Cluster is inactive")
+ return True
+
+ idle_watch = LogWatcher(self.env["LogFileName"], watchpats, nodes.split(),
+ self.env["LogWatcher"], "ClusterStable", timeout)
+ idle_watch.set_watch()
+
+ for node in nodes.split():
+ # have each node dump its current state
+ self.rsh(node, self.templates["StatusCmd"] % node, verbose=1)
+
+ ret = idle_watch.look()
+
+ while ret:
+ self.debug(ret)
+
+ for node in nodes.split():
+ if re.search(node, ret):
+ return True
+
+ ret = idle_watch.look()
+
+ self.debug("Warn: Partition %r not IDLE after %ds" % (nodes, timeout))
+ return False
+
+ def cluster_stable(self, timeout=None, double_check=False):
+ """ Return whether or not all nodes in the cluster are stable """
+
+ partitions = self.find_partitions()
+
+ for partition in partitions:
+ if not self.partition_stable(partition, timeout):
+ return False
+
+ if not double_check:
+ return True
+
+ # Make sure we are really stable and that all resources,
+ # including those that depend on transient node attributes,
+ # are started if they were going to be
+ time.sleep(5)
+ for partition in partitions:
+ if not self.partition_stable(partition, timeout):
+ return False
+
+ return True
+
+ def is_node_dc(self, node, status_line=None):
+ """ Return whether or not the given node is the cluster DC by checking
+ the given status_line, or by querying the cluster if None
+ """
+
+ if not status_line:
+ (_, out) = self.rsh(node, self.templates["StatusCmd"] % node, verbose=1)
+
+ if out:
+ status_line = out[0].strip()
+
+ if not status_line:
+ return False
+
+ if status_line.find('S_IDLE') != -1:
+ return True
+
+ if status_line.find('S_INTEGRATION') != -1:
+ return True
+
+ if status_line.find('S_FINALIZE_JOIN') != -1:
+ return True
+
+ if status_line.find('S_POLICY_ENGINE') != -1:
+ return True
+
+ if status_line.find('S_TRANSITION_ENGINE') != -1:
+ return True
+
+ return False
+
+ def active_resources(self, node):
+ """ Return a list of primitive resources active on the given node """
+
+ (_, output) = self.rsh(node, "crm_resource -c", verbose=1)
+ resources = []
+ for line in output:
+ if not re.search("^Resource", line):
+ continue
+
+ tmp = AuditResource(self, line)
+ if tmp.type == "primitive" and tmp.host == node:
+ resources.append(tmp.id)
+
+ return resources
+
+ def resource_location(self, rid):
+ """ Return a list of nodes on which the given resource is running """
+
+ resource_nodes = []
+ for node in self.env["nodes"]:
+ if self.expected_status[node] != "up":
+ continue
+
+ cmd = self.templates["RscRunning"] % rid
+ (rc, lines) = self.rsh(node, cmd)
+
+ if rc == 127:
+ self.log("Command '%s' failed. Binary or pacemaker-cts package not installed?" % cmd)
+ for line in lines:
+ self.log("Output: %s " % line)
+
+ elif rc == 0:
+ resource_nodes.append(node)
+
+ return resource_nodes
+
+ def find_partitions(self):
+ """ Return a list of all partitions in the cluster. Each element of the
+ list is itself a list of all active nodes in that partition.
+ """
+
+ ccm_partitions = []
+
+ for node in self.env["nodes"]:
+ if self.expected_status[node] != "up":
+ self.debug("Node %s is down... skipping" % node)
+ continue
+
+ (_, out) = self.rsh(node, self.templates["PartitionCmd"], verbose=1)
+
+ if not out:
+ self.log("no partition details for %s" % node)
+ continue
+
+ partition = out[0].strip()
+
+ if len(partition) <= 2:
+ self.log("bad partition details for %s" % node)
+ continue
+
+ nodes = partition.split()
+ nodes.sort()
+ partition = ' '.join(nodes)
+
+ found = 0
+ for a_partition in ccm_partitions:
+ if partition == a_partition:
+ found = 1
+
+ if found == 0:
+ self.debug("Adding partition from %s: %s" % (node, partition))
+ ccm_partitions.append(partition)
+ else:
+ self.debug("Partition '%s' from %s is consistent with existing entries" % (partition, node))
+
+ self.debug("Found partitions: %r" % ccm_partitions)
+ return ccm_partitions
+
+ def has_quorum(self, node_list):
+ """ Return whether or not the cluster has quorum """
+
+ # If we are auditing a partition, then one side will
+ # have quorum and the other not.
+ # So the caller needs to tell us which we are checking
+ # If no value for node_list is specified... assume all nodes
+ if not node_list:
+ node_list = self.env["nodes"]
+
+ for node in node_list:
+ if self.expected_status[node] != "up":
+ continue
+
+ (_, quorum) = self.rsh(node, self.templates["QuorumCmd"], verbose=1)
+ quorum = quorum[0].strip()
+
+ if quorum.find("1") != -1:
+ return True
+
+ if quorum.find("0") != -1:
+ return False
+
+ self.debug("WARN: Unexpected quorum test result from %s:%s" % (node, quorum))
+
+ return False
+
+ @property
+ def components(self):
+ """ A list of all patterns that should be ignored for the cluster's
+ components. This must be provided by all subclasses.
+ """
+
+ raise NotImplementedError
+
+ def in_standby_mode(self, node):
+ """ Return whether or not the node is in Standby """
+
+ (_, out) = self.rsh(node, self.templates["StandbyQueryCmd"] % node, verbose=1)
+
+ if not out:
+ return False
+
+ out = out[0].strip()
+ self.debug("Standby result: %s" % out)
+ return out == "on"
+
+ def set_standby_mode(self, node, status):
+ """ Set node to Standby if status is True, or Active if status is False.
+ Return whether the node is now in the requested status.
+ """
+
+ current_status = self.in_standby_mode(node)
+
+ if current_status == status:
+ return True
+
+ if status:
+ cmd = self.templates["StandbyCmd"] % (node, "on")
+ else:
+ cmd = self.templates["StandbyCmd"] % (node, "off")
+
+ (rc, _) = self.rsh(node, cmd)
+ return rc == 0
+
+ def add_dummy_rsc(self, node, rid):
+ """ Add a dummy resource with the given ID to the given node """
+
+ rsc_xml = """ '<resources>
+ <primitive class=\"ocf\" id=\"%s\" provider=\"pacemaker\" type=\"Dummy\">
+ <operations>
+ <op id=\"%s-interval-10s\" interval=\"10s\" name=\"monitor\"/
+ </operations>
+ </primitive>
+ </resources>'""" % (rid, rid)
+ constraint_xml = """ '<constraints>
+ <rsc_location id=\"location-%s-%s\" node=\"%s\" rsc=\"%s\" score=\"INFINITY\"/>
+ </constraints>'
+ """ % (rid, node, node, rid)
+
+ self.rsh(node, self.templates['CibAddXml'] % rsc_xml)
+ self.rsh(node, self.templates['CibAddXml'] % constraint_xml)
+
+ def remove_dummy_rsc(self, node, rid):
+ """ Remove the previously added dummy resource given by rid on the
+ given node
+ """
+
+ constraint = "\"//rsc_location[@rsc='%s']\"" % rid
+ rsc = "\"//primitive[@id='%s']\"" % rid
+
+ self.rsh(node, self.templates['CibDelXpath'] % constraint)
+ self.rsh(node, self.templates['CibDelXpath'] % rsc)
diff --git a/python/pacemaker/_cts/cmcorosync.py b/python/pacemaker/_cts/cmcorosync.py
new file mode 100644
index 0000000..cac059b
--- /dev/null
+++ b/python/pacemaker/_cts/cmcorosync.py
@@ -0,0 +1,80 @@
+""" Corosync-specific class for Pacemaker's Cluster Test Suite (CTS) """
+
+__all__ = ["Corosync2"]
+__copyright__ = "Copyright 2007-2023 the Pacemaker project contributors"
+__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
+
+from pacemaker._cts.CTS import Process
+from pacemaker._cts.clustermanager import ClusterManager
+from pacemaker._cts.patterns import PatternSelector
+
+# Throughout this file, pylint has trouble understanding that EnvFactory
+# is a singleton instance that can be treated as a subscriptable object.
+# Various warnings are disabled because of this. See also a comment about
+# self._rsh in environment.py.
+# pylint: disable=unsubscriptable-object
+
+class Corosync2(ClusterManager):
+ """ A subclass of ClusterManager specialized to handle corosync2 and later
+ based clusters
+ """
+
+ def __init__(self):
+ """ Create a new Corosync2 instance """
+
+ ClusterManager.__init__(self)
+
+ self._fullcomplist = {}
+ self.templates = PatternSelector(self.name)
+
+ @property
+ def components(self):
+ """ A list of all patterns that should be ignored for the cluster's
+ components.
+ """
+
+ complist = []
+
+ if not self._fullcomplist:
+ common_ignore = self.templates.get_component("common-ignore")
+
+ daemons = [
+ "pacemaker-based",
+ "pacemaker-controld",
+ "pacemaker-attrd",
+ "pacemaker-execd",
+ "pacemaker-fenced"
+ ]
+ for c in daemons:
+ badnews = self.templates.get_component("%s-ignore" % c) + common_ignore
+ proc = Process(self, c, pats=self.templates.get_component(c),
+ badnews_ignore=badnews)
+ self._fullcomplist[c] = proc
+
+ # the scheduler uses dc_pats instead of pats
+ badnews = self.templates.get_component("pacemaker-schedulerd-ignore") + common_ignore
+ proc = Process(self, "pacemaker-schedulerd",
+ dc_pats=self.templates.get_component("pacemaker-schedulerd"),
+ badnews_ignore=badnews)
+ self._fullcomplist["pacemaker-schedulerd"] = proc
+
+ # add (or replace) extra components
+ badnews = self.templates.get_component("corosync-ignore") + common_ignore
+ proc = Process(self, "corosync", pats=self.templates.get_component("corosync"),
+ badnews_ignore=badnews)
+ self._fullcomplist["corosync"] = proc
+
+ # Processes running under valgrind can't be shot with "killall -9 processname",
+ # so don't include them in the returned list
+ vgrind = self.env["valgrind-procs"].split()
+ for (key, val) in self._fullcomplist.items():
+ if self.env["valgrind-tests"] and key in vgrind:
+ self.log("Filtering %s from the component list as it is being profiled by valgrind" % key)
+ continue
+
+ if key == "pacemaker-fenced" and not self.env["DoFencing"]:
+ continue
+
+ complist.append(val)
+
+ return complist
diff --git a/python/pacemaker/_cts/environment.py b/python/pacemaker/_cts/environment.py
index e4d70e6..732ab24 100644
--- a/python/pacemaker/_cts/environment.py
+++ b/python/pacemaker/_cts/environment.py
@@ -11,6 +11,7 @@ import socket
import sys
import time
+from pacemaker.buildoptions import BuildOptions
from pacemaker._cts.logging import LogFactory
from pacemaker._cts.remote import RemoteFactory
from pacemaker._cts.watcher import LogKind
@@ -31,7 +32,7 @@ class Environment:
def __init__(self, args):
""" Create a new Environment instance. This class can be treated kind
of like a dictionary due to the presence of typical dict functions
- like has_key, __getitem__, and __setitem__. However, it is not a
+ like __contains__, __getitem__, and __setitem__. However, it is not a
dictionary so do not rely on standard dictionary behavior.
Arguments:
@@ -100,7 +101,7 @@ class Environment:
return list(self.data.keys())
- def has_key(self, key):
+ def __contains__(self, key):
""" Does the given environment key exist? """
if key == "nodes":
@@ -120,10 +121,7 @@ class Environment:
if key == "Name":
return self._get_stack_short()
- if key in self.data:
- return self.data[key]
-
- return None
+ return self.data.get(key)
def __setitem__(self, key, value):
""" Set the given environment key to the given value, overriding any
@@ -161,6 +159,14 @@ class Environment:
return self.random_gen.choice(self["nodes"])
+ def get(self, key, default=None):
+ """ Return the value for key if key is in the environment, else default """
+
+ if key == "nodes":
+ return self._nodes
+
+ return self.data.get(key, default)
+
def _set_stack(self, name):
""" Normalize the given cluster stack name """
@@ -279,7 +285,7 @@ class Environment:
# pylint: disable=no-member
if int(self["IPBase"].split('.')[3]) >= 240:
self._logger.log("Could not determine an offset for IPaddr resources. Upper bound is too high: %s %s"
- % (self["IPBase"], self["IPBase"].split('.')[3]))
+ % (self["IPBase"], self["IPBase"].split('.')[3]))
self["IPBase"] = " fe80::1234:56:7890:1000"
self._logger.log("Defaulting to '%s', use --test-ip-base to override" % self["IPBase"])
@@ -294,7 +300,7 @@ class Environment:
# it as an int in __init__ and treat it as an int everywhere.
# pylint: disable=bad-string-format-type
self._logger.log("Limiting the number of nodes configured=%d (max=%d)"
- %(len(self["nodes"]), self["node-limit"]))
+ % (len(self["nodes"]), self["node-limit"]))
while len(self["nodes"]) > self["node-limit"]:
self["nodes"].pop(len(self["nodes"])-1)
@@ -398,15 +404,9 @@ class Environment:
grp4.add_argument("--boot",
action="store_true",
help="")
- grp4.add_argument("--bsc",
- action="store_true",
- help="")
grp4.add_argument("--cib-filename",
metavar="PATH",
help="Install the given CIB file to the cluster")
- grp4.add_argument("--container-tests",
- action="store_true",
- help="Include pacemaker_remote tests that run in lxc container resources")
grp4.add_argument("--experimental-tests",
action="store_true",
help="Include experimental tests")
@@ -438,7 +438,7 @@ class Environment:
help="Use QARSH to access nodes instead of SSH")
grp4.add_argument("--schema",
metavar="SCHEMA",
- default="pacemaker-3.0",
+ default="pacemaker-%s" % BuildOptions.CIB_SCHEMA_VERSION,
help="Create a CIB conforming to the given schema")
grp4.add_argument("--seed",
metavar="SEED",
@@ -491,7 +491,6 @@ class Environment:
self["at-boot"] = args.at_boot in ["1", "yes"]
self["benchmark"] = args.benchmark
self["continue"] = args.always_continue
- self["container-tests"] = args.container_tests
self["experimental-tests"] = args.experimental_tests
self["iterations"] = args.iterations
self["loop-minutes"] = args.loop_minutes
@@ -542,10 +541,6 @@ class Environment:
if args.boot:
self["scenario"] = "boot"
- if args.bsc:
- self["DoBSC"] = True
- self["scenario"] = "basic-sanity"
-
if args.cib_filename:
self["CIBfilename"] = args.cib_filename
else:
diff --git a/python/pacemaker/_cts/input.py b/python/pacemaker/_cts/input.py
new file mode 100644
index 0000000..7e734f6
--- /dev/null
+++ b/python/pacemaker/_cts/input.py
@@ -0,0 +1,18 @@
+""" User input related utilities for CTS """
+
+__all__ = ["should_continue"]
+__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
+
+def should_continue(env):
+ """ On failure, prompt the user to see if we should continue """
+
+ if env["continue"]:
+ return True
+
+ try:
+ answer = input("Continue? [yN]")
+ except EOFError:
+ answer = "n"
+
+ return answer in ["y", "Y"]
diff --git a/python/pacemaker/_cts/logging.py b/python/pacemaker/_cts/logging.py
index d9f3012..6c7bfb0 100644
--- a/python/pacemaker/_cts/logging.py
+++ b/python/pacemaker/_cts/logging.py
@@ -21,7 +21,7 @@ class Logger:
self._logfile = filename
if tag:
- self._source = tag + ": "
+ self._source = "%s: " % tag
else:
self._source = ""
diff --git a/python/pacemaker/_cts/network.py b/python/pacemaker/_cts/network.py
new file mode 100644
index 0000000..33e401f
--- /dev/null
+++ b/python/pacemaker/_cts/network.py
@@ -0,0 +1,59 @@
+""" Network related utilities for CTS """
+
+__all__ = ["next_ip"]
+__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
+
+# pylint: disable=global-statement
+CURRENT_IP = None
+
+def next_ip(ip_base=None, reset=False):
+ """ Return the next available IP address.
+
+ Arguments:
+
+ ip_base -- The initial IP address to start from. The first call to next_ip
+ will return the next IP address from this base. Each subsequent
+ call will return the next address from the previous call, so you
+ can just omit this argument for subsequent calls.
+ reset -- Force next_ip to start from ip_base again. This requires also
+ passing the ip_base argument. (Mostly useful for unit testing,
+ but may be useful elsewhere).
+
+ This function only increments the last portion of the IP address. Once it
+ has hit the upper limit, ValueError will be raised.
+ """
+
+ global CURRENT_IP
+
+ if CURRENT_IP is None or reset:
+ CURRENT_IP = ip_base
+
+ new_ip = None
+
+ # Split the existing IP address up into a tuple of:
+ # (everything except the last part of the addr, the separator, the last part of the addr).
+ # For instance, "192.168.1.2" becomes ("192.168.1", ".", "2"). Then,
+ # increment the last part of the address and paste everything back
+ # together.
+ if ":" in CURRENT_IP:
+ # This is an IPv6 address
+ fields = CURRENT_IP.rpartition(":")
+ new_ip = int(fields[2], 16) + 1
+
+ if new_ip > 0xffff:
+ raise ValueError("No more available IP addresses")
+
+ # hex() puts "0x" at the front of the string, so strip it off.
+ new_ip = hex(new_ip)[2:]
+
+ else:
+ # This is an IPv4 address
+ fields = CURRENT_IP.rpartition(".")
+ new_ip = int(fields[2]) + 1
+
+ if new_ip > 255:
+ raise ValueError("No more available IP addresses")
+
+ CURRENT_IP = "%s%s%s" % (fields[0], fields[1], new_ip)
+ return CURRENT_IP
diff --git a/python/pacemaker/_cts/patterns.py b/python/pacemaker/_cts/patterns.py
index 880477a..0fb1c2b 100644
--- a/python/pacemaker/_cts/patterns.py
+++ b/python/pacemaker/_cts/patterns.py
@@ -220,7 +220,7 @@ class Corosync2Patterns(BasePatterns):
r"pending LRM operations at shutdown",
r"Lost connection to the CIB manager",
r"pacemaker-controld.*:\s*Action A_RECOVER .* not supported",
- r"pacemaker-controld.*:\s*Performing A_EXIT_1 - forcefully exiting ",
+ r"pacemaker-controld.*:\s*Exiting now due to errors",
r".*:\s*Requesting fencing \([^)]+\) targeting node ",
r"(Blackbox dump requested|Problem detected)",
]
@@ -238,7 +238,7 @@ class Corosync2Patterns(BasePatterns):
r"error:.*Connection to cib_(shm|rw).* (failed|closed)",
r"error:.*cib_(shm|rw) IPC provider disconnected while waiting",
r"error:.*Connection to (fencer|stonith-ng).* (closed|failed|lost)",
- r"crit: Fencing daemon connection failed",
+ r"error: Lost fencer connection",
# This is overbroad, but we don't have a way to say that only
# certain transition errors are acceptable (if the fencer respawns,
# fence devices may appear multiply active). We have to rely on
@@ -253,7 +253,7 @@ class Corosync2Patterns(BasePatterns):
# it's possible for another daemon to lose that connection and
# exit before losing the cluster connection.
r"pacemakerd.*:\s*warning:.*Lost connection to cluster layer",
- r"pacemaker-attrd.*:\s*(crit|error):.*Lost connection to (cluster layer|the CIB manager)",
+ r"pacemaker-attrd.*:\s*(crit|error):.*Lost connection to (Corosync process group|the CIB manager)",
r"pacemaker-based.*:\s*(crit|error):.*Lost connection to cluster layer",
r"pacemaker-controld.*:\s*(crit|error):.*Lost connection to (cluster layer|the CIB manager)",
r"pacemaker-fenced.*:\s*(crit|error):.*Lost connection to (cluster layer|the CIB manager)",
@@ -290,7 +290,7 @@ class Corosync2Patterns(BasePatterns):
]
self._components["pacemaker-execd"] = [
- r"pacemaker-controld.*Connection to executor failed",
+ r"pacemaker-controld.*Lost connection to local executor",
r"pacemaker-controld.*I_ERROR.*lrm_connection_destroy",
r"pacemaker-controld.*State transition .* S_RECOVERY",
r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover",
@@ -317,7 +317,7 @@ class Corosync2Patterns(BasePatterns):
r"State transition .* S_RECOVERY",
r"pacemakerd.* Respawning pacemaker-controld subdaemon after unexpected exit",
r"pacemaker-controld\[[0-9]+\] exited with status 1 \(",
- r"Connection to the scheduler failed",
+ r"pacemaker-controld.*Lost connection to the scheduler",
r"pacemaker-controld.*I_ERROR.*save_cib_contents",
r"pacemaker-controld.*: Input I_TERMINATE .*from do_recover",
r"pacemaker-controld.*Could not recover from internal error",
@@ -329,13 +329,13 @@ class Corosync2Patterns(BasePatterns):
self._components["pacemaker-fenced"] = [
r"error:.*Connection to (fencer|stonith-ng).* (closed|failed|lost)",
- r"Fencing daemon connection failed",
+ r"Lost fencer connection",
r"pacemaker-controld.*Fencer successfully connected",
]
self._components["pacemaker-fenced-ignore"] = [
r"(error|warning):.*Connection to (fencer|stonith-ng).* (closed|failed|lost)",
- r"crit:.*Fencing daemon connection failed",
+ r"error:.*Lost fencer connection",
r"error:.*Fencer connection failed \(will retry\)",
r"pacemaker-controld.*:\s+Result of .* operation for Fencing.*Error \(Lost connection to fencer\)",
# This is overbroad, but we don't have a way to say that only
diff --git a/python/pacemaker/_cts/process.py b/python/pacemaker/_cts/process.py
index 2940b71..757360c 100644
--- a/python/pacemaker/_cts/process.py
+++ b/python/pacemaker/_cts/process.py
@@ -63,7 +63,7 @@ def pipe_communicate(pipes, check_stderr=False, stdin=None):
output = pipe_outputs[0].decode(sys.stdout.encoding)
if check_stderr:
- output = output + pipe_outputs[1].decode(sys.stderr.encoding)
+ output += pipe_outputs[1].decode(sys.stderr.encoding)
return output
diff --git a/python/pacemaker/_cts/remote.py b/python/pacemaker/_cts/remote.py
index 99d2ed7..4b6b8f6 100644
--- a/python/pacemaker/_cts/remote.py
+++ b/python/pacemaker/_cts/remote.py
@@ -7,7 +7,7 @@ __license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT AN
import re
import os
-from subprocess import Popen,PIPE
+from subprocess import Popen, PIPE
from threading import Thread
from pacemaker._cts.logging import LogFactory
@@ -71,7 +71,7 @@ class AsyncCmd(Thread):
self._proc.wait()
if self._delegate:
- self._logger.debug("cmd: pid %d returned %d to %s" % (self._proc.pid, self._proc.returncode, repr(self._delegate)))
+ self._logger.debug("cmd: pid %d returned %d to %r" % (self._proc.pid, self._proc.returncode, self._delegate))
else:
self._logger.debug("cmd: pid %d returned %d" % (self._proc.pid, self._proc.returncode))
@@ -126,7 +126,7 @@ class RemoteExec:
sysname = args[0]
command = args[1]
- if sysname is None or sysname.lower() == self._our_node or sysname == "localhost":
+ if sysname is None or sysname.lower() in [self._our_node, "localhost"]:
ret = command
else:
ret = "%s %s '%s'" % (self._command, sysname, self._fixcmd(command))
@@ -188,7 +188,7 @@ class RemoteExec:
result = None
# pylint: disable=consider-using-with
proc = Popen(self._cmd([node, command]),
- stdout = PIPE, stderr = PIPE, close_fds = True, shell = True)
+ stdout=PIPE, stderr=PIPE, close_fds=True, shell=True)
if not synchronous and proc.pid > 0 and not self._silent:
aproc = AsyncCmd(node, command, proc=proc)
diff --git a/python/pacemaker/_cts/scenarios.py b/python/pacemaker/_cts/scenarios.py
new file mode 100644
index 0000000..769b2d0
--- /dev/null
+++ b/python/pacemaker/_cts/scenarios.py
@@ -0,0 +1,422 @@
+""" Test scenario classes for Pacemaker's Cluster Test Suite (CTS) """
+
+__all__ = [
+ "AllOnce",
+ "Boot",
+ "BootCluster",
+ "LeaveBooted",
+ "RandomTests",
+ "Sequence",
+]
+__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
+
+import re
+import time
+
+from pacemaker._cts.audits import ClusterAudit
+from pacemaker._cts.input import should_continue
+from pacemaker._cts.tests.ctstest import CTSTest
+from pacemaker._cts.watcher import LogWatcher
+
+class ScenarioComponent:
+ """ The base class for all scenario components. A scenario component is
+ one single step in a scenario. Each component is basically just a setup
+ and teardown method.
+ """
+
+ def __init__(self, cm, env):
+ """ Create a new ScenarioComponent instance
+
+ Arguments:
+
+ cm -- A ClusterManager instance
+ env -- An Environment instance
+ """
+
+ # pylint: disable=invalid-name
+ self._cm = cm
+ self._env = env
+
+ def is_applicable(self):
+ """ Return True if this component is applicable in the given Environment.
+ This method must be provided by all subclasses.
+ """
+
+ raise NotImplementedError
+
+ def setup(self):
+ """ Set up the component, returning True on success. This method must be
+ provided by all subclasses.
+ """
+
+ raise NotImplementedError
+
+ def teardown(self):
+ """ Tear down the given component. This method must be provided by all
+ subclasses.
+ """
+
+ raise NotImplementedError
+
+
+class Scenario:
+ """ The base class for scenario. A scenario is an ordered list of
+ ScenarioComponent objects. A scenario proceeds by setting up all its
+ components in sequence, running a list of tests and audits, and then
+ tearing down its components in reverse.
+ """
+
+ def __init__(self, cm, components, audits, tests):
+ """ Create a new Scenario instance
+
+ Arguments:
+
+ cm -- A ClusterManager instance
+ components -- A list of ScenarioComponents comprising this Scenario
+ audits -- A list of ClusterAudits that will be performed as
+ part of this Scenario
+ tests -- A list of CTSTests that will be run
+ """
+
+ # pylint: disable=invalid-name
+
+ self.stats = {
+ "success": 0,
+ "failure": 0,
+ "BadNews": 0,
+ "skipped": 0
+ }
+ self.tests = tests
+
+ self._audits = audits
+ self._bad_news = None
+ self._cm = cm
+ self._components = components
+
+ for comp in components:
+ if not issubclass(comp.__class__, ScenarioComponent):
+ raise ValueError("Init value must be subclass of ScenarioComponent")
+
+ for audit in audits:
+ if not issubclass(audit.__class__, ClusterAudit):
+ raise ValueError("Init value must be subclass of ClusterAudit")
+
+ for test in tests:
+ if not issubclass(test.__class__, CTSTest):
+ raise ValueError("Init value must be a subclass of CTSTest")
+
+ def is_applicable(self):
+ """ Return True if all ScenarioComponents are applicable """
+
+ for comp in self._components:
+ if not comp.is_applicable():
+ return False
+
+ return True
+
+ def setup(self):
+ """ Set up the scenario, returning True on success. If setup fails at
+ some point, tear down those components that did successfully set up.
+ """
+
+ self._cm.prepare()
+ self.audit() # Also detects remote/local log config
+ self._cm.ns.wait_for_all_nodes(self._cm.env["nodes"])
+
+ self.audit()
+ self._cm.install_support()
+
+ self._bad_news = LogWatcher(self._cm.env["LogFileName"],
+ self._cm.templates.get_patterns("BadNews"),
+ self._cm.env["nodes"],
+ self._cm.env["LogWatcher"],
+ "BadNews", 0)
+ self._bad_news.set_watch() # Call after we've figured out what type of log watching to do in LogAudit
+
+ j = 0
+ while j < len(self._components):
+ if not self._components[j].setup():
+ # OOPS! We failed. Tear partial setups down.
+ self.audit()
+ self._cm.log("Tearing down partial setup")
+ self.teardown(j)
+ return False
+
+ j += 1
+
+ self.audit()
+ return True
+
+ def teardown(self, n_components=None):
+ """ Tear down the scenario in the reverse order it was set up. If
+ n_components is not None, only tear down that many components.
+ """
+
+ if not n_components:
+ n_components = len(self._components)-1
+
+ j = n_components
+
+ while j >= 0:
+ self._components[j].teardown()
+ j -= 1
+
+ self.audit()
+ self._cm.install_support("uninstall")
+
+ def incr(self, name):
+ """ Increment the given stats key """
+
+ if not name in self.stats:
+ self.stats[name] = 0
+
+ self.stats[name] += 1
+
+ def run(self, iterations):
+ """ Run all tests in the scenario the given number of times """
+
+ self._cm.oprofile_start()
+
+ try:
+ self._run_loop(iterations)
+ self._cm.oprofile_stop()
+ except:
+ self._cm.oprofile_stop()
+ raise
+
+ def _run_loop(self, iterations):
+ """ Do the hard part of the run method - actually run all the tests the
+ given number of times.
+ """
+
+ raise NotImplementedError
+
+ def run_test(self, test, testcount):
+ """ Run the given test. testcount is the number of tests (including
+ this one) that have been run across all iterations.
+ """
+
+ nodechoice = self._cm.env.random_node()
+
+ ret = True
+ did_run = False
+
+ self._cm.clear_instance_errors_to_ignore()
+ choice = "(%s)" % nodechoice
+ self._cm.log("Running test {:<22} {:<15} [{:>3}]".format(test.name, choice, testcount))
+
+ starttime = test.set_timer()
+
+ if not test.setup(nodechoice):
+ self._cm.log("Setup failed")
+ ret = False
+ else:
+ did_run = True
+ ret = test(nodechoice)
+
+ if not test.teardown(nodechoice):
+ self._cm.log("Teardown failed")
+
+ if not should_continue(self._cm.env):
+ raise ValueError("Teardown of %s on %s failed" % (test.name, nodechoice))
+
+ ret = False
+
+ stoptime = time.time()
+ self._cm.oprofile_save(testcount)
+
+ elapsed_time = stoptime - starttime
+ test_time = stoptime - test.get_timer()
+
+ if "min_time" not in test.stats:
+ test.stats["elapsed_time"] = elapsed_time
+ test.stats["min_time"] = test_time
+ test.stats["max_time"] = test_time
+ else:
+ test.stats["elapsed_time"] += elapsed_time
+
+ if test_time < test.stats["min_time"]:
+ test.stats["min_time"] = test_time
+
+ if test_time > test.stats["max_time"]:
+ test.stats["max_time"] = test_time
+
+ if ret:
+ self.incr("success")
+ test.log_timer()
+ else:
+ self.incr("failure")
+ self._cm.statall()
+ did_run = True # Force the test count to be incremented anyway so test extraction works
+
+ self.audit(test.errors_to_ignore)
+ return did_run
+
+ def summarize(self):
+ """ Output scenario results """
+
+ self._cm.log("****************")
+ self._cm.log("Overall Results:%r" % self.stats)
+ self._cm.log("****************")
+
+ stat_filter = {
+ "calls": 0,
+ "failure": 0,
+ "skipped": 0,
+ "auditfail": 0,
+ }
+
+ self._cm.log("Test Summary")
+ for test in self.tests:
+ for key in stat_filter:
+ stat_filter[key] = test.stats[key]
+
+ name = "Test %s:" % test.name
+ self._cm.log("{:<25} {!r}".format(name, stat_filter))
+
+ self._cm.debug("Detailed Results")
+ for test in self.tests:
+ name = "Test %s:" % test.name
+ self._cm.debug("{:<25} {!r}".format(name, stat_filter))
+
+ self._cm.log("<<<<<<<<<<<<<<<< TESTS COMPLETED")
+
+ def audit(self, local_ignore=None):
+ """ Perform all scenario audits and log results. If there are too many
+ failures, prompt the user to confirm that the scenario should continue
+ running.
+ """
+
+ errcount = 0
+
+ ignorelist = ["CTS:"]
+
+ if local_ignore:
+ ignorelist.extend(local_ignore)
+
+ ignorelist.extend(self._cm.errors_to_ignore)
+ ignorelist.extend(self._cm.instance_errors_to_ignore)
+
+ # This makes sure everything is stabilized before starting...
+ failed = 0
+ for audit in self._audits:
+ if not audit():
+ self._cm.log("Audit %s FAILED." % audit.name)
+ failed += 1
+ else:
+ self._cm.debug("Audit %s passed." % audit.name)
+
+ while errcount < 1000:
+ match = None
+ if self._bad_news:
+ match = self._bad_news.look(0)
+
+ if match:
+ add_err = True
+
+ for ignore in ignorelist:
+ if add_err and re.search(ignore, match):
+ add_err = False
+
+ if add_err:
+ self._cm.log("BadNews: %s" % match)
+ self.incr("BadNews")
+ errcount += 1
+ else:
+ break
+ else:
+ print("Big problems")
+ if not should_continue(self._cm.env):
+ self._cm.log("Shutting down.")
+ self.summarize()
+ self.teardown()
+ raise ValueError("Looks like we hit a BadNews jackpot!")
+
+ if self._bad_news:
+ self._bad_news.end()
+
+ return failed
+
+
+class AllOnce(Scenario):
+ """ Every Test Once """
+
+ def _run_loop(self, iterations):
+ testcount = 1
+
+ for test in self.tests:
+ self.run_test(test, testcount)
+ testcount += 1
+
+
+class RandomTests(Scenario):
+ """ Random Test Execution """
+
+ def _run_loop(self, iterations):
+ testcount = 1
+
+ while testcount <= iterations:
+ test = self._cm.env.random_gen.choice(self.tests)
+ self.run_test(test, testcount)
+ testcount += 1
+
+
+class Sequence(Scenario):
+ """ Named Tests in Sequence """
+
+ def _run_loop(self, iterations):
+ testcount = 1
+
+ while testcount <= iterations:
+ for test in self.tests:
+ self.run_test(test, testcount)
+ testcount += 1
+
+
+class Boot(Scenario):
+ """ Start the Cluster """
+
+ def _run_loop(self, iterations):
+ return
+
+
+class BootCluster(ScenarioComponent):
+ """ The BootCluster component simply starts the cluster manager on all
+ nodes, waiting for each to come up before starting given that a node
+ might have been rebooted or crashed beforehand.
+ """
+
+ def is_applicable(self):
+ """ BootCluster is always applicable """
+
+ return True
+
+ def setup(self):
+ """ Set up the component, returning True on success """
+
+ self._cm.prepare()
+
+ # Clear out the cobwebs ;-)
+ self._cm.stopall(verbose=True, force=True)
+
+ # Now start the Cluster Manager on all the nodes.
+ self._cm.log("Starting Cluster Manager on all nodes.")
+ return self._cm.startall(verbose=True, quick=True)
+
+ def teardown(self):
+ """ Tear down the component """
+
+ self._cm.log("Stopping Cluster Manager on all nodes")
+ self._cm.stopall(verbose=True, force=False)
+
+
+class LeaveBooted(BootCluster):
+ """ The LeaveBooted component leaves all nodes up when the scenario
+ is complete.
+ """
+
+ def teardown(self):
+ """ Tear down the component """
+
+ self._cm.log("Leaving Cluster running on all nodes")
diff --git a/python/pacemaker/_cts/test.py b/python/pacemaker/_cts/test.py
index fb809a9..577ebb3 100644
--- a/python/pacemaker/_cts/test.py
+++ b/python/pacemaker/_cts/test.py
@@ -147,7 +147,7 @@ class Test:
this requires all subclasses to set self._daemon_location before
accessing this property or an exception will be raised.
"""
- return os.path.join(self.logdir, self._daemon_location + ".log")
+ return os.path.join(self.logdir, "%s.log" % self._daemon_location)
###
### PRIVATE METHODS
@@ -287,6 +287,17 @@ class Test:
self._patterns.append(Pattern(pattern, negative=negative, regex=regex))
+ def _signal_dict(self):
+ """ Return a dictionary mapping signal numbers to their names """
+
+ # FIXME: When we support python >= 3.5, this function can be replaced with:
+ # signal.Signals(self.daemon_process.returncode).name
+ return {
+ getattr(signal, _signame): _signame
+ for _signame in dir(signal)
+ if _signame.startswith("SIG") and not _signame.startswith("SIG_")
+ }
+
def clean_environment(self):
""" Clean up the host after executing a test """
@@ -295,13 +306,11 @@ class Test:
self._daemon_process.terminate()
self._daemon_process.wait()
else:
- return_code = {
- getattr(signal, _signame): _signame
- for _signame in dir(signal)
- if _signame.startswith('SIG') and not _signame.startswith("SIG_")
- }.get(-self._daemon_process.returncode, "RET=%d" % (self._daemon_process.returncode))
+ rc = self._daemon_process.returncode
+ signame = self._signal_dict().get(-rc, "RET=%s" % rc)
msg = "FAILURE - '%s' failed. %s abnormally exited during test (%s)."
- self._result_txt = msg % (self.name, self._daemon_location, return_code)
+
+ self._result_txt = msg % (self.name, self._daemon_location, signame)
self.exitcode = ExitStatus.ERROR
self._daemon_process = None
@@ -311,7 +320,7 @@ class Test:
# makes fenced output any kind of 8 bit value - while still interesting
# for debugging and we'd still like the regression-test to go over the
# full set of test-cases
- with open(self.logpath, 'rt', encoding = "ISO-8859-1") as logfile:
+ with open(self.logpath, 'rt', encoding="ISO-8859-1") as logfile:
for line in logfile.readlines():
self._daemon_output += line
@@ -361,7 +370,7 @@ class Test:
if self.verbose:
print("Step %d SUCCESS" % (i))
- i = i + 1
+ i += 1
self.clean_environment()
@@ -427,7 +436,7 @@ class Test:
if args['validate']:
if args['check_rng']:
- rng_file = rng_directory() + "/api/api-result.rng"
+ rng_file = "%s/api/api-result.rng" % rng_directory()
else:
rng_file = None
@@ -478,7 +487,7 @@ class Test:
if not self.force_wait and logfile is None \
and os.path.exists(self.logpath):
- logfile = io.open(self.logpath, 'rt', encoding = "ISO-8859-1")
+ logfile = io.open(self.logpath, 'rt', encoding="ISO-8859-1")
if not self.force_wait and logfile is not None:
for line in logfile.readlines():
@@ -562,10 +571,10 @@ class Tests:
continue
if test.exitcode != ExitStatus.OK:
- failures = failures + 1
+ failures += 1
test.print_result(" ")
else:
- success = success + 1
+ success += 1
if failures == 0:
print(" None")
diff --git a/python/pacemaker/_cts/tests/Makefile.am b/python/pacemaker/_cts/tests/Makefile.am
new file mode 100644
index 0000000..0dba74b
--- /dev/null
+++ b/python/pacemaker/_cts/tests/Makefile.am
@@ -0,0 +1,14 @@
+#
+# Copyright 2023 the Pacemaker project contributors
+#
+# The version control history for this file may have further details.
+#
+# This source code is licensed under the GNU General Public License version 2
+# or later (GPLv2+) WITHOUT ANY WARRANTY.
+#
+
+MAINTAINERCLEANFILES = Makefile.in
+
+pkgpythondir = $(pythondir)/$(PACKAGE)/_cts/tests
+
+pkgpython_PYTHON = $(wildcard *.py)
diff --git a/python/pacemaker/_cts/tests/__init__.py b/python/pacemaker/_cts/tests/__init__.py
new file mode 100644
index 0000000..63b34aa
--- /dev/null
+++ b/python/pacemaker/_cts/tests/__init__.py
@@ -0,0 +1,87 @@
+"""
+Test classes for the `pacemaker._cts` package.
+"""
+
+__copyright__ = "Copyright 2023 the Pacemaker project contributors"
+__license__ = "GNU Lesser General Public License version 2.1 or later (LGPLv2.1+)"
+
+from pacemaker._cts.tests.componentfail import ComponentFail
+from pacemaker._cts.tests.ctstest import CTSTest
+from pacemaker._cts.tests.fliptest import FlipTest
+from pacemaker._cts.tests.maintenancemode import MaintenanceMode
+from pacemaker._cts.tests.nearquorumpointtest import NearQuorumPointTest
+from pacemaker._cts.tests.partialstart import PartialStart
+from pacemaker._cts.tests.reattach import Reattach
+from pacemaker._cts.tests.restartonebyone import RestartOnebyOne
+from pacemaker._cts.tests.resourcerecover import ResourceRecover
+from pacemaker._cts.tests.restarttest import RestartTest
+from pacemaker._cts.tests.resynccib import ResyncCIB
+from pacemaker._cts.tests.remotebasic import RemoteBasic
+from pacemaker._cts.tests.remotedriver import RemoteDriver
+from pacemaker._cts.tests.remotemigrate import RemoteMigrate
+from pacemaker._cts.tests.remoterscfailure import RemoteRscFailure
+from pacemaker._cts.tests.remotestonithd import RemoteStonithd
+from pacemaker._cts.tests.simulstart import SimulStart
+from pacemaker._cts.tests.simulstop import SimulStop
+from pacemaker._cts.tests.simulstartlite import SimulStartLite
+from pacemaker._cts.tests.simulstoplite import SimulStopLite
+from pacemaker._cts.tests.splitbraintest import SplitBrainTest
+from pacemaker._cts.tests.standbytest import StandbyTest
+from pacemaker._cts.tests.starttest import StartTest
+from pacemaker._cts.tests.startonebyone import StartOnebyOne
+from pacemaker._cts.tests.stonithdtest import StonithdTest
+from pacemaker._cts.tests.stoponebyone import StopOnebyOne
+from pacemaker._cts.tests.stoptest import StopTest
+
+def test_list(cm, audits):
+ """ Return a list of test class objects that are enabled and whose
+ is_applicable methods return True. These are the tests that
+ should be run.
+ """
+
+ # cm is a reasonable name here.
+ # pylint: disable=invalid-name
+
+ # A list of all enabled test classes, in the order that they should
+ # be run (if we're doing --once). There are various other ways of
+ # specifying which tests should be run, in which case the order here
+ # will not matter.
+ #
+ # Note that just because a test is listed here doesn't mean it will
+ # definitely be run - is_applicable is still taken into consideration.
+ # Also note that there are other tests that are excluded from this
+ # list for various reasons.
+ enabled_test_classes = [
+ FlipTest,
+ RestartTest,
+ StonithdTest,
+ StartOnebyOne,
+ SimulStart,
+ SimulStop,
+ StopOnebyOne,
+ RestartOnebyOne,
+ PartialStart,
+ StandbyTest,
+ MaintenanceMode,
+ ResourceRecover,
+ ComponentFail,
+ SplitBrainTest,
+ Reattach,
+ ResyncCIB,
+ NearQuorumPointTest,
+ RemoteBasic,
+ RemoteStonithd,
+ RemoteMigrate,
+ RemoteRscFailure,
+ ]
+
+ result = []
+
+ for testclass in enabled_test_classes:
+ bound_test = testclass(cm)
+
+ if bound_test.is_applicable():
+ bound_test.audits = audits
+ result.append(bound_test)
+
+ return result
diff --git a/python/pacemaker/_cts/tests/componentfail.py b/python/pacemaker/_cts/tests/componentfail.py
new file mode 100644
index 0000000..f3d3622
--- /dev/null
+++ b/python/pacemaker/_cts/tests/componentfail.py
@@ -0,0 +1,167 @@
+""" Kill a pacemaker daemon and test how the cluster recovers """
+
+__all__ = ["ComponentFail"]
+__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
+
+import re
+
+from pacemaker._cts.audits import AuditResource
+from pacemaker._cts.tests.ctstest import CTSTest
+from pacemaker._cts.tests.simulstartlite import SimulStartLite
+
+# Disable various pylint warnings that occur in so many places throughout this
+# file it's easiest to just take care of them globally. This does introduce the
+# possibility that we'll miss some other cause of the same warning, but we'll
+# just have to be careful.
+
+# pylint doesn't understand that self._rsh is callable.
+# pylint: disable=not-callable
+# pylint doesn't understand that self._env is subscriptable.
+# pylint: disable=unsubscriptable-object
+
+
+class ComponentFail(CTSTest):
+ """ A concrete test that kills a random pacemaker daemon and waits for the
+ cluster to recover
+ """
+
+ def __init__(self, cm):
+ """ Create a new ComponentFail instance
+
+ Arguments:
+
+ cm -- A ClusterManager instance
+ """
+
+ CTSTest.__init__(self, cm)
+
+ self.is_unsafe = True
+ self.name = "ComponentFail"
+
+ self._complist = cm.components
+ self._okerrpatterns = []
+ self._patterns = []
+ self._startall = SimulStartLite(cm)
+
+ def __call__(self, node):
+ """ Perform this test """
+
+ self.incr("calls")
+ self._patterns = []
+ self._okerrpatterns = []
+
+ # start all nodes
+ ret = self._startall(None)
+ if not ret:
+ return self.failure("Setup failed")
+
+ if not self._cm.cluster_stable(self._env["StableTime"]):
+ return self.failure("Setup failed - unstable")
+
+ node_is_dc = self._cm.is_node_dc(node, None)
+
+ # select a component to kill
+ chosen = self._env.random_gen.choice(self._complist)
+ while chosen.dc_only and not node_is_dc:
+ chosen = self._env.random_gen.choice(self._complist)
+
+ self.debug("...component %s (dc=%s)" % (chosen.name, node_is_dc))
+ self.incr(chosen.name)
+
+ if chosen.name != "corosync":
+ self._patterns.extend([
+ self.templates["Pat:ChildKilled"] % (node, chosen.name),
+ self.templates["Pat:ChildRespawn"] % (node, chosen.name),
+ ])
+
+ self._patterns.extend(chosen.pats)
+ if node_is_dc:
+ self._patterns.extend(chosen.dc_pats)
+
+ # @TODO this should be a flag in the Component
+ if chosen.name in ["corosync", "pacemaker-based", "pacemaker-fenced"]:
+ # Ignore actions for fence devices if fencer will respawn
+ # (their registration will be lost, and probes will fail)
+ self._okerrpatterns = [
+ self.templates["Pat:Fencing_active"],
+ ]
+ (_, lines) = self._rsh(node, "crm_resource -c", verbose=1)
+
+ for line in lines:
+ if re.search("^Resource", line):
+ r = AuditResource(self._cm, line)
+
+ if r.rclass == "stonith":
+ self._okerrpatterns.extend([
+ self.templates["Pat:Fencing_recover"] % r.id,
+ self.templates["Pat:Fencing_probe"] % r.id,
+ ])
+
+ # supply a copy so self.patterns doesn't end up empty
+ tmp_pats = self._patterns.copy()
+ self._patterns.extend(chosen.badnews_ignore)
+
+ # Look for STONITH ops, depending on Env["at-boot"] we might need to change the nodes status
+ stonith_pats = [
+ self.templates["Pat:Fencing_ok"] % node
+ ]
+ stonith = self.create_watch(stonith_pats, 0)
+ stonith.set_watch()
+
+ # set the watch for stable
+ watch = self.create_watch(
+ tmp_pats, self._env["DeadTime"] + self._env["StableTime"] + self._env["StartTime"])
+
+ watch.set_watch()
+
+ # kill the component
+ chosen.kill(node)
+
+ self.debug("Waiting for the cluster to recover")
+ self._cm.cluster_stable()
+
+ self.debug("Waiting for any fenced node to come back up")
+ self._cm.ns.wait_for_all_nodes(self._env["nodes"], 600)
+
+ self.debug("Waiting for the cluster to re-stabilize with all nodes")
+ self._cm.cluster_stable(self._env["StartTime"])
+
+ self.debug("Checking if %s was shot" % node)
+ shot = stonith.look(60)
+
+ if shot:
+ self.debug("Found: %r" % shot)
+ self._okerrpatterns.append(self.templates["Pat:Fencing_start"] % node)
+
+ if not self._env["at-boot"]:
+ self._cm.expected_status[node] = "down"
+
+ # If fencing occurred, chances are many (if not all) the expected logs
+ # will not be sent - or will be lost when the node reboots
+ return self.success()
+
+ # check for logs indicating a graceful recovery
+ matched = watch.look_for_all(allow_multiple_matches=True)
+ if watch.unmatched:
+ self._logger.log("Patterns not found: %r" % watch.unmatched)
+
+ self.debug("Waiting for the cluster to re-stabilize with all nodes")
+ is_stable = self._cm.cluster_stable(self._env["StartTime"])
+
+ if not matched:
+ return self.failure("Didn't find all expected %s patterns" % chosen.name)
+
+ if not is_stable:
+ return self.failure("Cluster did not become stable after killing %s" % chosen.name)
+
+ return self.success()
+
+ @property
+ def errors_to_ignore(self):
+ """ Return list of errors which should be ignored """
+
+ # Note that okerrpatterns refers to the last time we ran this test
+ # The good news is that this works fine for us...
+ self._okerrpatterns.extend(self._patterns)
+ return self._okerrpatterns
diff --git a/python/pacemaker/_cts/tests/ctstest.py b/python/pacemaker/_cts/tests/ctstest.py
new file mode 100644
index 0000000..8669e48
--- /dev/null
+++ b/python/pacemaker/_cts/tests/ctstest.py
@@ -0,0 +1,252 @@
+""" Base classes for CTS tests """
+
+__all__ = ["CTSTest"]
+__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
+
+import re
+
+from pacemaker._cts.environment import EnvFactory
+from pacemaker._cts.logging import LogFactory
+from pacemaker._cts.patterns import PatternSelector
+from pacemaker._cts.remote import RemoteFactory
+from pacemaker._cts.timer import Timer
+from pacemaker._cts.watcher import LogWatcher
+
+# Disable various pylint warnings that occur in so many places throughout this
+# file it's easiest to just take care of them globally. This does introduce the
+# possibility that we'll miss some other cause of the same warning, but we'll
+# just have to be careful.
+
+# pylint doesn't understand that self._rsh is callable.
+# pylint: disable=not-callable
+
+
+class CTSTest:
+ """ The base class for all cluster tests. This implements a basic set of
+ properties and behaviors like setup, tear down, time keeping, and
+ statistics tracking. It is up to specific tests to implement their own
+ specialized behavior on top of this class.
+ """
+
+ def __init__(self, cm):
+ """ Create a new CTSTest instance
+
+ Arguments:
+
+ cm -- A ClusterManager instance
+ """
+
+ # pylint: disable=invalid-name
+
+ self.audits = []
+ self.name = None
+ self.templates = PatternSelector(cm["Name"])
+
+ self.stats = {
+ "auditfail": 0,
+ "calls": 0,
+ "failure": 0,
+ "skipped": 0,
+ "success": 0
+ }
+
+ self._cm = cm
+ self._env = EnvFactory().getInstance()
+ self._r_o2cb = None
+ self._r_ocfs2 = []
+ self._rsh = RemoteFactory().getInstance()
+ self._logger = LogFactory()
+ self._timers = {}
+
+ self.benchmark = True # which tests to benchmark
+ self.failed = False
+ self.is_experimental = False
+ self.is_loop = False
+ self.is_unsafe = False
+ self.is_valgrind = False
+ self.passed = True
+
+ def log(self, args):
+ """ Log a message """
+
+ self._logger.log(args)
+
+ def debug(self, args):
+ """ Log a debug message """
+
+ self._logger.debug(args)
+
+ def get_timer(self, key="test"):
+ """ Get the start time of the given timer """
+
+ try:
+ return self._timers[key].start_time
+ except KeyError:
+ return 0
+
+ def set_timer(self, key="test"):
+ """ Set the start time of the given timer to now, and return
+ that time
+ """
+
+ if key not in self._timers:
+ self._timers[key] = Timer(self._logger, self.name, key)
+
+ self._timers[key].start()
+ return self._timers[key].start_time
+
+ def log_timer(self, key="test"):
+ """ Log the elapsed time of the given timer """
+
+ if key not in self._timers:
+ return
+
+ elapsed = self._timers[key].elapsed
+ self.debug("%s:%s runtime: %.2f" % (self.name, key, elapsed))
+ del self._timers[key]
+
+ def incr(self, name):
+ """ Increment the given stats key """
+
+ if name not in self.stats:
+ self.stats[name] = 0
+
+ self.stats[name] += 1
+
+ # Reset the test passed boolean
+ if name == "calls":
+ self.passed = True
+
+ def failure(self, reason="none"):
+ """ Increment the failure count, with an optional failure reason """
+
+ self.passed = False
+ self.incr("failure")
+ self._logger.log(("Test %s" % self.name).ljust(35) + " FAILED: %s" % reason)
+
+ return False
+
+ def success(self):
+ """ Increment the success count """
+
+ self.incr("success")
+ return True
+
+ def skipped(self):
+ """ Increment the skipped count """
+
+ self.incr("skipped")
+ return True
+
+ def __call__(self, node):
+ """ Perform this test """
+
+ raise NotImplementedError
+
+ def audit(self):
+ """ Perform all the relevant audits (see ClusterAudit), returning
+ whether or not they all passed.
+ """
+
+ passed = True
+
+ for audit in self.audits:
+ if not audit():
+ self._logger.log("Internal %s Audit %s FAILED." % (self.name, audit.name))
+ self.incr("auditfail")
+ passed = False
+
+ return passed
+
+ def setup(self, node):
+ """ Setup this test """
+
+ # node is used in subclasses
+ # pylint: disable=unused-argument
+
+ return self.success()
+
+ def teardown(self, node):
+ """ Tear down this test """
+
+ # node is used in subclasses
+ # pylint: disable=unused-argument
+
+ return self.success()
+
+ def create_watch(self, patterns, timeout, name=None):
+ """ Create a new LogWatcher object with the given patterns, timeout,
+ and optional name. This object can be used to search log files
+ for matching patterns during this test's run.
+ """
+ if not name:
+ name = self.name
+
+ return LogWatcher(self._env["LogFileName"], patterns, self._env["nodes"], self._env["LogWatcher"], name, timeout)
+
+ def local_badnews(self, prefix, watch, local_ignore=None):
+ """ Use the given watch object to search through log files for messages
+ starting with the given prefix. If no prefix is given, use
+ "LocalBadNews:" by default. The optional local_ignore list should
+ be a list of regexes that, if found in a line, will cause that line
+ to be ignored.
+
+ Return the number of matches found.
+ """
+ errcount = 0
+ if not prefix:
+ prefix = "LocalBadNews:"
+
+ ignorelist = [" CTS: ", prefix]
+
+ if local_ignore:
+ ignorelist += local_ignore
+
+ while errcount < 100:
+ match = watch.look(0)
+ if match:
+ add_err = True
+
+ for ignore in ignorelist:
+ if add_err and re.search(ignore, match):
+ add_err = False
+
+ if add_err:
+ self._logger.log("%s %s" % (prefix, match))
+ errcount += 1
+ else:
+ break
+ else:
+ self._logger.log("Too many errors!")
+
+ watch.end()
+ return errcount
+
+ def is_applicable(self):
+ """ Return True if this test is applicable in the current test configuration.
+ This method must be implemented by all subclasses.
+ """
+
+ if self.is_loop and not self._env["loop-tests"]:
+ return False
+
+ if self.is_unsafe and not self._env["unsafe-tests"]:
+ return False
+
+ if self.is_valgrind and not self._env["valgrind-tests"]:
+ return False
+
+ if self.is_experimental and not self._env["experimental-tests"]:
+ return False
+
+ if self._env["benchmark"] and not self.benchmark:
+ return False
+
+ return True
+
+ @property
+ def errors_to_ignore(self):
+ """ Return list of errors which should be ignored """
+
+ return []
diff --git a/python/pacemaker/_cts/tests/fliptest.py b/python/pacemaker/_cts/tests/fliptest.py
new file mode 100644
index 0000000..5e77936
--- /dev/null
+++ b/python/pacemaker/_cts/tests/fliptest.py
@@ -0,0 +1,61 @@
+""" Stop running nodes, and start stopped nodes """
+
+__all__ = ["FlipTest"]
+__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
+
+import time
+
+from pacemaker._cts.tests.ctstest import CTSTest
+from pacemaker._cts.tests.starttest import StartTest
+from pacemaker._cts.tests.stoptest import StopTest
+
+# Disable various pylint warnings that occur in so many places throughout this
+# file it's easiest to just take care of them globally. This does introduce the
+# possibility that we'll miss some other cause of the same warning, but we'll
+# just have to be careful.
+
+# pylint doesn't understand that self._env is subscriptable.
+# pylint: disable=unsubscriptable-object
+
+
+class FlipTest(CTSTest):
+ """ A concrete test that stops running nodes and starts stopped nodes """
+
+ def __init__(self, cm):
+ """ Create a new FlipTest instance
+
+ Arguments:
+
+ cm -- A ClusterManager instance
+ """
+
+ CTSTest.__init__(self, cm)
+ self.name = "Flip"
+
+ self._start = StartTest(cm)
+ self._stop = StopTest(cm)
+
+ def __call__(self, node):
+ """ Perform this test """
+
+ self.incr("calls")
+
+ if self._cm.expected_status[node] == "up":
+ self.incr("stopped")
+ ret = self._stop(node)
+ kind = "up->down"
+ # Give the cluster time to recognize it's gone...
+ time.sleep(self._env["StableTime"])
+ elif self._cm.expected_status[node] == "down":
+ self.incr("started")
+ ret = self._start(node)
+ kind = "down->up"
+ else:
+ return self.skipped()
+
+ self.incr(kind)
+ if ret:
+ return self.success()
+
+ return self.failure("%s failure" % kind)
diff --git a/python/pacemaker/_cts/tests/maintenancemode.py b/python/pacemaker/_cts/tests/maintenancemode.py
new file mode 100644
index 0000000..3c57c07
--- /dev/null
+++ b/python/pacemaker/_cts/tests/maintenancemode.py
@@ -0,0 +1,238 @@
+""" Toggle nodes in and out of maintenance mode """
+
+__all__ = ["MaintenanceMode"]
+__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
+
+import re
+
+from pacemaker._cts.audits import AuditResource
+from pacemaker._cts.tests.ctstest import CTSTest
+from pacemaker._cts.tests.simulstartlite import SimulStartLite
+from pacemaker._cts.tests.starttest import StartTest
+from pacemaker._cts.timer import Timer
+
+# Disable various pylint warnings that occur in so many places throughout this
+# file it's easiest to just take care of them globally. This does introduce the
+# possibility that we'll miss some other cause of the same warning, but we'll
+# just have to be careful.
+
+# pylint doesn't understand that self._rsh is callable.
+# pylint: disable=not-callable
+
+
+class MaintenanceMode(CTSTest):
+ """ A concrete test that toggles nodes in and out of maintenance mode """
+
+ def __init__(self, cm):
+ """ Create a new MaintenanceMode instance
+
+ Arguments:
+
+ cm -- A ClusterManager instance
+ """
+
+ CTSTest.__init__(self, cm)
+
+ self.benchmark = True
+ self.name = "MaintenanceMode"
+
+ self._action = "asyncmon"
+ self._rid = "maintenanceDummy"
+ self._start = StartTest(cm)
+ self._startall = SimulStartLite(cm)
+
+ def _toggle_maintenance_mode(self, node, enabled):
+ """ Toggle maintenance mode on the given node """
+
+ pats = [
+ self.templates["Pat:DC_IDLE"]
+ ]
+
+ if enabled:
+ action = "On"
+ else:
+ action = "Off"
+
+ # fail the resource right after turning Maintenance mode on
+ # verify it is not recovered until maintenance mode is turned off
+ if enabled:
+ pats.append(self.templates["Pat:RscOpFail"] % (self._action, self._rid))
+ else:
+ pats.extend([
+ self.templates["Pat:RscOpOK"] % ("stop", self._rid),
+ self.templates["Pat:RscOpOK"] % ("start", self._rid)
+ ])
+
+ watch = self.create_watch(pats, 60)
+ watch.set_watch()
+
+ self.debug("Turning maintenance mode %s" % action)
+ self._rsh(node, self.templates["MaintenanceMode%s" % action])
+
+ if enabled:
+ self._rsh(node, "crm_resource -V -F -r %s -H %s &>/dev/null" % (self._rid, node))
+
+ with Timer(self._logger, self.name, "recover%s" % action):
+ watch.look_for_all()
+
+ if watch.unmatched:
+ self.debug("Failed to find patterns when turning maintenance mode %s" % action)
+ return repr(watch.unmatched)
+
+ return ""
+
+ def _insert_maintenance_dummy(self, node):
+ """ Create a dummy resource on the given node """
+
+ pats = [
+ ("%s.*" % node) + (self.templates["Pat:RscOpOK"] % ("start", self._rid))
+ ]
+
+ watch = self.create_watch(pats, 60)
+ watch.set_watch()
+
+ self._cm.add_dummy_rsc(node, self._rid)
+
+ with Timer(self._logger, self.name, "addDummy"):
+ watch.look_for_all()
+
+ if watch.unmatched:
+ self.debug("Failed to find patterns when adding maintenance dummy resource")
+ return repr(watch.unmatched)
+
+ return ""
+
+ def _remove_maintenance_dummy(self, node):
+ """ Remove the previously created dummy resource on the given node """
+
+ pats = [
+ self.templates["Pat:RscOpOK"] % ("stop", self._rid)
+ ]
+
+ watch = self.create_watch(pats, 60)
+ watch.set_watch()
+ self._cm.remove_dummy_rsc(node, self._rid)
+
+ with Timer(self._logger, self.name, "removeDummy"):
+ watch.look_for_all()
+
+ if watch.unmatched:
+ self.debug("Failed to find patterns when removing maintenance dummy resource")
+ return repr(watch.unmatched)
+
+ return ""
+
+ def _managed_rscs(self, node):
+ """ Return a list of all resources managed by the cluster """
+
+ rscs = []
+ (_, lines) = self._rsh(node, "crm_resource -c", verbose=1)
+
+ for line in lines:
+ if re.search("^Resource", line):
+ tmp = AuditResource(self._cm, line)
+
+ if tmp.managed:
+ rscs.append(tmp.id)
+
+ return rscs
+
+ def _verify_resources(self, node, rscs, managed):
+ """ Verify that all resources in rscList are managed if they are expected
+ to be, or unmanaged if they are expected to be.
+ """
+
+ managed_rscs = rscs
+ managed_str = "managed"
+
+ if not managed:
+ managed_str = "unmanaged"
+
+ (_, lines) = self._rsh(node, "crm_resource -c", verbose=1)
+ for line in lines:
+ if re.search("^Resource", line):
+ tmp = AuditResource(self._cm, line)
+
+ if managed and not tmp.managed:
+ continue
+
+ if not managed and tmp.managed:
+ continue
+
+ if managed_rscs.count(tmp.id):
+ managed_rscs.remove(tmp.id)
+
+ if not managed_rscs:
+ self.debug("Found all %s resources on %s" % (managed_str, node))
+ return True
+
+ self._logger.log("Could not find all %s resources on %s. %s" % (managed_str, node, managed_rscs))
+ return False
+
+ def __call__(self, node):
+ """ Perform this test """
+
+ self.incr("calls")
+ verify_managed = False
+ verify_unmanaged = False
+ fail_pat = ""
+
+ if not self._startall(None):
+ return self.failure("Setup failed")
+
+ # get a list of all the managed resources. We use this list
+ # after enabling maintenance mode to verify all managed resources
+ # become un-managed. After maintenance mode is turned off, we use
+ # this list to verify all the resources become managed again.
+ managed_rscs = self._managed_rscs(node)
+ if not managed_rscs:
+ self._logger.log("No managed resources on %s" % node)
+ return self.skipped()
+
+ # insert a fake resource we can fail during maintenance mode
+ # so we can verify recovery does not take place until after maintenance
+ # mode is disabled.
+ fail_pat += self._insert_maintenance_dummy(node)
+
+ # toggle maintenance mode ON, then fail dummy resource.
+ fail_pat += self._toggle_maintenance_mode(node, True)
+
+ # verify all the resources are now unmanaged
+ if self._verify_resources(node, managed_rscs, False):
+ verify_unmanaged = True
+
+ # Toggle maintenance mode OFF, verify dummy is recovered.
+ fail_pat += self._toggle_maintenance_mode(node, False)
+
+ # verify all the resources are now managed again
+ if self._verify_resources(node, managed_rscs, True):
+ verify_managed = True
+
+ # Remove our maintenance dummy resource.
+ fail_pat += self._remove_maintenance_dummy(node)
+
+ self._cm.cluster_stable()
+
+ if fail_pat != "":
+ return self.failure("Unmatched patterns: %s" % fail_pat)
+
+ if not verify_unmanaged:
+ return self.failure("Failed to verify resources became unmanaged during maintenance mode")
+
+ if not verify_managed:
+ return self.failure("Failed to verify resources switched back to managed after disabling maintenance mode")
+
+ return self.success()
+
+ @property
+ def errors_to_ignore(self):
+ """ Return list of errors which should be ignored """
+
+ return [
+ r"Updating failcount for %s" % self._rid,
+ r"schedulerd.*: Recover\s+%s\s+\(.*\)" % self._rid,
+ r"Unknown operation: fail",
+ self.templates["Pat:RscOpOK"] % (self._action, self._rid),
+ r"(ERROR|error).*: Action %s_%s_%d .* initiated outside of a transition" % (self._rid, self._action, 0)
+ ]
diff --git a/python/pacemaker/_cts/tests/nearquorumpointtest.py b/python/pacemaker/_cts/tests/nearquorumpointtest.py
new file mode 100644
index 0000000..c5b70b7
--- /dev/null
+++ b/python/pacemaker/_cts/tests/nearquorumpointtest.py
@@ -0,0 +1,125 @@
+""" Randomly start and stop nodes to bring the cluster close to the quorum point """
+
+__all__ = ["NearQuorumPointTest"]
+__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
+
+from pacemaker._cts.tests.ctstest import CTSTest
+
+# Disable various pylint warnings that occur in so many places throughout this
+# file it's easiest to just take care of them globally. This does introduce the
+# possibility that we'll miss some other cause of the same warning, but we'll
+# just have to be careful.
+
+# pylint doesn't understand that self._rsh is callable.
+# pylint: disable=not-callable
+# pylint doesn't understand that self._env is subscriptable.
+# pylint: disable=unsubscriptable-object
+
+
+class NearQuorumPointTest(CTSTest):
+ """ A concrete test that randomly starts and stops nodes to bring the
+ cluster close to the quorum point
+ """
+
+ def __init__(self, cm):
+ """ Create a new NearQuorumPointTest instance
+
+ Arguments:
+
+ cm -- A ClusterManager instance
+ """
+
+ CTSTest.__init__(self, cm)
+
+ self.name = "NearQuorumPoint"
+
+ def __call__(self, dummy):
+ """ Perform this test """
+
+ self.incr("calls")
+ startset = []
+ stopset = []
+
+ stonith = self._cm.prepare_fencing_watcher()
+ #decide what to do with each node
+ for node in self._env["nodes"]:
+ action = self._env.random_gen.choice(["start", "stop"])
+
+ if action == "start":
+ startset.append(node)
+ elif action == "stop":
+ stopset.append(node)
+
+ self.debug("start nodes:%r" % startset)
+ self.debug("stop nodes:%r" % stopset)
+
+ #add search patterns
+ watchpats = []
+ for node in stopset:
+ if self._cm.expected_status[node] == "up":
+ watchpats.append(self.templates["Pat:We_stopped"] % node)
+
+ for node in startset:
+ if self._cm.expected_status[node] == "down":
+ watchpats.append(self.templates["Pat:Local_started"] % node)
+ else:
+ for stopping in stopset:
+ if self._cm.expected_status[stopping] == "up":
+ watchpats.append(self.templates["Pat:They_stopped"] % (node, stopping))
+
+ if not watchpats:
+ return self.skipped()
+
+ if startset:
+ watchpats.append(self.templates["Pat:DC_IDLE"])
+
+ watch = self.create_watch(watchpats, self._env["DeadTime"] + 10)
+
+ watch.set_watch()
+
+ #begin actions
+ for node in stopset:
+ if self._cm.expected_status[node] == "up":
+ self._cm.stop_cm_async(node)
+
+ for node in startset:
+ if self._cm.expected_status[node] == "down":
+ self._cm.start_cm_async(node)
+
+ #get the result
+ if watch.look_for_all():
+ self._cm.cluster_stable()
+ self._cm.fencing_cleanup("NearQuorumPoint", stonith)
+ return self.success()
+
+ self._logger.log("Warn: Patterns not found: %r" % watch.unmatched)
+
+ #get the "bad" nodes
+ upnodes = []
+ for node in stopset:
+ if self._cm.stat_cm(node):
+ upnodes.append(node)
+
+ downnodes = []
+ for node in startset:
+ if not self._cm.stat_cm(node):
+ downnodes.append(node)
+
+ self._cm.fencing_cleanup("NearQuorumPoint", stonith)
+ if not upnodes and not downnodes:
+ self._cm.cluster_stable()
+
+ # Make sure they're completely down with no residule
+ for node in stopset:
+ self._rsh(node, self.templates["StopCmd"])
+
+ return self.success()
+
+ if upnodes:
+ self._logger.log("Warn: Unstoppable nodes: %r" % upnodes)
+
+ if downnodes:
+ self._logger.log("Warn: Unstartable nodes: %r" % downnodes)
+
+ return self.failure()
diff --git a/python/pacemaker/_cts/tests/partialstart.py b/python/pacemaker/_cts/tests/partialstart.py
new file mode 100644
index 0000000..1b074e6
--- /dev/null
+++ b/python/pacemaker/_cts/tests/partialstart.py
@@ -0,0 +1,75 @@
+""" Start a node and then tell it to stop before it is fully running """
+
+__all__ = ["PartialStart"]
+__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
+
+from pacemaker._cts.tests.ctstest import CTSTest
+from pacemaker._cts.tests.simulstartlite import SimulStartLite
+from pacemaker._cts.tests.simulstoplite import SimulStopLite
+from pacemaker._cts.tests.stoptest import StopTest
+
+# Disable various pylint warnings that occur in so many places throughout this
+# file it's easiest to just take care of them globally. This does introduce the
+# possibility that we'll miss some other cause of the same warning, but we'll
+# just have to be careful.
+
+# pylint doesn't understand that self._env is subscriptable.
+# pylint: disable=unsubscriptable-object
+
+
+class PartialStart(CTSTest):
+ """ A concrete test that interrupts a node before it's finished starting up """
+
+ def __init__(self, cm):
+ """ Create a new PartialStart instance
+
+ Arguments:
+
+ cm -- A ClusterManager instance
+ """
+
+ CTSTest.__init__(self, cm)
+
+ self.name = "PartialStart"
+
+ self._startall = SimulStartLite(cm)
+ self._stop = StopTest(cm)
+ self._stopall = SimulStopLite(cm)
+
+ def __call__(self, node):
+ """ Perform this test """
+
+ self.incr("calls")
+
+ ret = self._stopall(None)
+ if not ret:
+ return self.failure("Setup failed")
+
+ watchpats = [
+ "pacemaker-controld.*Connecting to .* cluster infrastructure"
+ ]
+ watch = self.create_watch(watchpats, self._env["DeadTime"] + 10)
+ watch.set_watch()
+
+ self._cm.start_cm_async(node)
+ ret = watch.look_for_all()
+ if not ret:
+ self._logger.log("Patterns not found: %r" % watch.unmatched)
+ return self.failure("Setup of %s failed" % node)
+
+ ret = self._stop(node)
+ if not ret:
+ return self.failure("%s did not stop in time" % node)
+
+ return self.success()
+
+ @property
+ def errors_to_ignore(self):
+ """ Return list of errors which should be ignored """
+
+ # We might do some fencing in the 2-node case if we make it up far enough
+ return [
+ r"Executing reboot fencing operation",
+ r"Requesting fencing \([^)]+\) targeting node "
+ ]
diff --git a/python/pacemaker/_cts/tests/reattach.py b/python/pacemaker/_cts/tests/reattach.py
new file mode 100644
index 0000000..4452bc0
--- /dev/null
+++ b/python/pacemaker/_cts/tests/reattach.py
@@ -0,0 +1,221 @@
+""" Restart the cluster and verify resources remain running """
+
+__all__ = ["Reattach"]
+__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
+
+import re
+import time
+
+from pacemaker.exitstatus import ExitStatus
+from pacemaker._cts.audits import AuditResource
+from pacemaker._cts.tests.ctstest import CTSTest
+from pacemaker._cts.tests.simulstartlite import SimulStartLite
+from pacemaker._cts.tests.simulstoplite import SimulStopLite
+from pacemaker._cts.tests.starttest import StartTest
+
+# Disable various pylint warnings that occur in so many places throughout this
+# file it's easiest to just take care of them globally. This does introduce the
+# possibility that we'll miss some other cause of the same warning, but we'll
+# just have to be careful.
+
+# pylint doesn't understand that self._rsh is callable.
+# pylint: disable=not-callable
+
+
+class Reattach(CTSTest):
+ """ A concrete test that restarts the cluster and verifies that resources
+ remain running throughout
+ """
+
+ def __init__(self, cm):
+ """ Create a new Reattach instance
+
+ Arguments:
+
+ cm -- A ClusterManager instance
+ """
+
+ CTSTest.__init__(self, cm)
+
+ self.name = "Reattach"
+
+ self._startall = SimulStartLite(cm)
+ self._stopall = SimulStopLite(cm)
+
+ def _is_managed(self, node):
+ """ Are resources managed by the cluster? """
+
+ (_, is_managed) = self._rsh(node, "crm_attribute -t rsc_defaults -n is-managed -q -G -d true", verbose=1)
+ is_managed = is_managed[0].strip()
+ return is_managed == "true"
+
+ def _set_unmanaged(self, node):
+ """ Disable resource management """
+
+ self.debug("Disable resource management")
+ self._rsh(node, "crm_attribute -t rsc_defaults -n is-managed -v false")
+
+ def _set_managed(self, node):
+ """ Enable resource management """
+
+ self.debug("Re-enable resource management")
+ self._rsh(node, "crm_attribute -t rsc_defaults -n is-managed -D")
+
+ def _disable_incompatible_rscs(self, node):
+ """ Disable resources that are incompatible with this test
+
+ Starts and stops of stonith-class resources are implemented internally
+ by Pacemaker, which means that they must stop when Pacemaker is
+ stopped, even if unmanaged. Disable them before running the Reattach
+ test so they don't affect resource placement.
+
+ OCFS2 resources must be disabled too for some reason.
+
+ Set target-role to "Stopped" for any of these resources in the CIB.
+ """
+
+ self.debug("Disable incompatible (stonith/OCFS2) resources")
+ xml = """'<meta_attributes id="cts-lab-Reattach-meta">
+ <nvpair id="cts-lab-Reattach-target-role" name="target-role" value="Stopped"/>
+ <rule id="cts-lab-Reattach-rule" boolean-op="or" score="INFINITY">
+ <rsc_expression id="cts-lab-Reattach-stonith" class="stonith"/>
+ <rsc_expression id="cts-lab-Reattach-o2cb" type="o2cb"/>
+ </rule>
+ </meta_attributes>' --scope rsc_defaults"""
+ return self._rsh(node, self._cm.templates['CibAddXml'] % xml)
+
+ def _enable_incompatible_rscs(self, node):
+ """ Re-enable resources that were incompatible with this test """
+
+ self.debug("Re-enable incompatible (stonith/OCFS2) resources")
+ xml = """<meta_attributes id="cts-lab-Reattach-meta">"""
+ return self._rsh(node, """cibadmin --delete --xml-text '%s'""" % xml)
+
+ def _reprobe(self, node):
+ """ Reprobe all resources
+
+ The placement of some resources (such as promotable-1 in the
+ lab-generated CIB) is affected by constraints using node-attribute-based
+ rules. An earlier test may have erased the relevant node attribute, so
+ do a reprobe, which should add the attribute back.
+ """
+
+ return self._rsh(node, """crm_resource --refresh""")
+
+ def setup(self, node):
+ """ Setup this test """
+
+ if not self._startall(None):
+ return self.failure("Startall failed")
+
+ (rc, _) = self._disable_incompatible_rscs(node)
+ if rc != ExitStatus.OK:
+ return self.failure("Couldn't modify CIB to stop incompatible resources")
+
+ (rc, _) = self._reprobe(node)
+ if rc != ExitStatus.OK:
+ return self.failure("Couldn't reprobe resources")
+
+ if not self._cm.cluster_stable(double_check=True):
+ return self.failure("Cluster did not stabilize after setup")
+
+ return self.success()
+
+ def teardown(self, node):
+ """ Tear down this test """
+
+ # Make sure 'node' is up
+ start = StartTest(self._cm)
+ start(node)
+
+ if not self._is_managed(node):
+ self._set_managed(node)
+
+ (rc, _) = self._enable_incompatible_rscs(node)
+ if rc != ExitStatus.OK:
+ return self.failure("Couldn't modify CIB to re-enable incompatible resources")
+
+ if not self._cm.cluster_stable():
+ return self.failure("Cluster did not stabilize after teardown")
+ if not self._is_managed(node):
+ return self.failure("Could not re-enable resource management")
+
+ return self.success()
+
+ def __call__(self, node):
+ """ Perform this test """
+
+ self.incr("calls")
+
+ # Conveniently, the scheduler will display this message when disabling
+ # management, even if fencing is not enabled, so we can rely on it.
+ managed = self.create_watch(["No fencing will be done"], 60)
+ managed.set_watch()
+
+ self._set_unmanaged(node)
+
+ if not managed.look_for_all():
+ self._logger.log("Patterns not found: %r" % managed.unmatched)
+ return self.failure("Resource management not disabled")
+
+ pats = [
+ self.templates["Pat:RscOpOK"] % ("start", ".*"),
+ self.templates["Pat:RscOpOK"] % ("stop", ".*"),
+ self.templates["Pat:RscOpOK"] % ("promote", ".*"),
+ self.templates["Pat:RscOpOK"] % ("demote", ".*"),
+ self.templates["Pat:RscOpOK"] % ("migrate", ".*")
+ ]
+
+ watch = self.create_watch(pats, 60, "ShutdownActivity")
+ watch.set_watch()
+
+ self.debug("Shutting down the cluster")
+ ret = self._stopall(None)
+ if not ret:
+ self._set_managed(node)
+ return self.failure("Couldn't shut down the cluster")
+
+ self.debug("Bringing the cluster back up")
+ ret = self._startall(None)
+ time.sleep(5) # allow ping to update the CIB
+ if not ret:
+ self._set_managed(node)
+ return self.failure("Couldn't restart the cluster")
+
+ if self.local_badnews("ResourceActivity:", watch):
+ self._set_managed(node)
+ return self.failure("Resources stopped or started during cluster restart")
+
+ watch = self.create_watch(pats, 60, "StartupActivity")
+ watch.set_watch()
+
+ # Re-enable resource management (and verify it happened).
+ self._set_managed(node)
+ self._cm.cluster_stable()
+ if not self._is_managed(node):
+ return self.failure("Could not re-enable resource management")
+
+ # Ignore actions for STONITH resources
+ ignore = []
+ (_, lines) = self._rsh(node, "crm_resource -c", verbose=1)
+ for line in lines:
+ if re.search("^Resource", line):
+ r = AuditResource(self._cm, line)
+
+ if r.rclass == "stonith":
+ self.debug("Ignoring start actions for %s" % r.id)
+ ignore.append(self.templates["Pat:RscOpOK"] % ("start", r.id))
+
+ if self.local_badnews("ResourceActivity:", watch, ignore):
+ return self.failure("Resources stopped or started after resource management was re-enabled")
+
+ return ret
+
+ @property
+ def errors_to_ignore(self):
+ """ Return list of errors which should be ignored """
+
+ return [
+ r"resource( was|s were) active at shutdown"
+ ]
diff --git a/python/pacemaker/_cts/tests/remotebasic.py b/python/pacemaker/_cts/tests/remotebasic.py
new file mode 100644
index 0000000..2f25aaf
--- /dev/null
+++ b/python/pacemaker/_cts/tests/remotebasic.py
@@ -0,0 +1,39 @@
+""" Start and stop a remote node """
+
+__all__ = ["RemoteBasic"]
+__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
+
+from pacemaker._cts.tests.remotedriver import RemoteDriver
+
+
+class RemoteBasic(RemoteDriver):
+ """ A concrete test that starts and stops a remote node """
+
+ def __init__(self, cm):
+ """ Create a new RemoteBasic instance
+
+ Arguments:
+
+ cm -- A ClusterManager instance
+ """
+
+ RemoteDriver.__init__(self, cm)
+
+ self.name = "RemoteBasic"
+
+ def __call__(self, node):
+ """ Perform this test """
+
+ if not self.start_new_test(node):
+ return self.failure(self.fail_string)
+
+ self.test_attributes(node)
+ self.cleanup_metal(node)
+
+ self.debug("Waiting for the cluster to recover")
+ self._cm.cluster_stable()
+ if self.failed:
+ return self.failure(self.fail_string)
+
+ return self.success()
diff --git a/python/pacemaker/_cts/tests/remotedriver.py b/python/pacemaker/_cts/tests/remotedriver.py
new file mode 100644
index 0000000..c5b0292
--- /dev/null
+++ b/python/pacemaker/_cts/tests/remotedriver.py
@@ -0,0 +1,556 @@
+""" Base classes for CTS tests """
+
+__all__ = ["RemoteDriver"]
+__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
+
+import os
+import time
+import subprocess
+import tempfile
+
+from pacemaker._cts.tests.ctstest import CTSTest
+from pacemaker._cts.tests.simulstartlite import SimulStartLite
+from pacemaker._cts.tests.starttest import StartTest
+from pacemaker._cts.tests.stoptest import StopTest
+from pacemaker._cts.timer import Timer
+
+# Disable various pylint warnings that occur in so many places throughout this
+# file it's easiest to just take care of them globally. This does introduce the
+# possibility that we'll miss some other cause of the same warning, but we'll
+# just have to be careful.
+
+# pylint doesn't understand that self._rsh is callable.
+# pylint: disable=not-callable
+
+
+class RemoteDriver(CTSTest):
+ """ A specialized base class for cluster tests that run on Pacemaker
+ Remote nodes. This builds on top of CTSTest to provide methods
+ for starting and stopping services and resources, and managing
+ remote nodes. This is still just an abstract class -- specific
+ tests need to implement their own specialized behavior.
+ """
+
+ def __init__(self, cm):
+ """ Create a new RemoteDriver instance
+
+ Arguments:
+
+ cm -- A ClusterManager instance
+ """
+
+ CTSTest.__init__(self, cm)
+ self.name = "RemoteDriver"
+
+ self._corosync_enabled = False
+ self._pacemaker_enabled = False
+ self._remote_node = None
+ self._remote_rsc = "remote-rsc"
+ self._start = StartTest(cm)
+ self._startall = SimulStartLite(cm)
+ self._stop = StopTest(cm)
+
+ self.reset()
+
+ def reset(self):
+ """ Reset the state of this test back to what it was before the test
+ was run
+ """
+
+ self.failed = False
+ self.fail_string = ""
+
+ self._pcmk_started = False
+ self._remote_node_added = False
+ self._remote_rsc_added = False
+ self._remote_use_reconnect_interval = self._env.random_gen.choice([True, False])
+
+ def fail(self, msg):
+ """ Mark test as failed """
+
+ self.failed = True
+
+ # Always log the failure.
+ self._logger.log(msg)
+
+ # Use first failure as test status, as it's likely to be most useful.
+ if not self.fail_string:
+ self.fail_string = msg
+
+ def _get_other_node(self, node):
+ """ Get the first cluster node out of the environment that is not the
+ given node. Typically, this is used to find some node that will
+ still be active that we can run cluster commands on.
+ """
+
+ for othernode in self._env["nodes"]:
+ if othernode == node:
+ # we don't want to try and use the cib that we just shutdown.
+ # find a cluster node that is not our soon to be remote-node.
+ continue
+
+ return othernode
+
+ def _del_rsc(self, node, rsc):
+ """ Delete the given named resource from the cluster. The given `node`
+ is the cluster node on which we should *not* run the delete command.
+ """
+
+ othernode = self._get_other_node(node)
+ (rc, _) = self._rsh(othernode, "crm_resource -D -r %s -t primitive" % rsc)
+ if rc != 0:
+ self.fail("Removal of resource '%s' failed" % rsc)
+
+ def _add_rsc(self, node, rsc_xml):
+ """ Add a resource given in XML format to the cluster. The given `node`
+ is the cluster node on which we should *not* run the add command.
+ """
+
+ othernode = self._get_other_node(node)
+ (rc, _) = self._rsh(othernode, "cibadmin -C -o resources -X '%s'" % rsc_xml)
+ if rc != 0:
+ self.fail("resource creation failed")
+
+ def _add_primitive_rsc(self, node):
+ """ Add a primitive heartbeat resource for the remote node to the
+ cluster. The given `node` is the cluster node on which we should
+ *not* run the add command.
+ """
+
+ rsc_xml = """
+<primitive class="ocf" id="%(node)s" provider="heartbeat" type="Dummy">
+ <meta_attributes id="%(node)s-meta_attributes"/>
+ <operations>
+ <op id="%(node)s-monitor-interval-20s" interval="20s" name="monitor"/>
+ </operations>
+</primitive>""" % {
+ "node": self._remote_rsc
+}
+
+ self._add_rsc(node, rsc_xml)
+ if not self.failed:
+ self._remote_rsc_added = True
+
+ def _add_connection_rsc(self, node):
+ """ Add a primitive connection resource for the remote node to the
+ cluster. The given `node` is teh cluster node on which we should
+ *not* run the add command.
+ """
+
+ rsc_xml = """
+<primitive class="ocf" id="%(node)s" provider="pacemaker" type="remote">
+ <instance_attributes id="%(node)s-instance_attributes">
+ <nvpair id="%(node)s-instance_attributes-server" name="server" value="%(server)s"/>
+""" % {
+ "node": self._remote_node, "server": node
+}
+
+ if self._remote_use_reconnect_interval:
+ # Set reconnect interval on resource
+ rsc_xml += """
+ <nvpair id="%s-instance_attributes-reconnect_interval" name="reconnect_interval" value="60s"/>
+""" % self._remote_node
+
+ rsc_xml += """
+ </instance_attributes>
+ <operations>
+ <op id="%(node)s-start" name="start" interval="0" timeout="120s"/>
+ <op id="%(node)s-monitor-20s" name="monitor" interval="20s" timeout="45s"/>
+ </operations>
+</primitive>
+""" % {
+ "node": self._remote_node
+}
+
+ self._add_rsc(node, rsc_xml)
+ if not self.failed:
+ self._remote_node_added = True
+
+ def _disable_services(self, node):
+ """ Disable the corosync and pacemaker services on the given node """
+
+ self._corosync_enabled = self._env.service_is_enabled(node, "corosync")
+ if self._corosync_enabled:
+ self._env.disable_service(node, "corosync")
+
+ self._pacemaker_enabled = self._env.service_is_enabled(node, "pacemaker")
+ if self._pacemaker_enabled:
+ self._env.disable_service(node, "pacemaker")
+
+ def _enable_services(self, node):
+ """ Enable the corosync and pacemaker services on the given node """
+
+ if self._corosync_enabled:
+ self._env.enable_service(node, "corosync")
+
+ if self._pacemaker_enabled:
+ self._env.enable_service(node, "pacemaker")
+
+ def _stop_pcmk_remote(self, node):
+ """ Stop the Pacemaker Remote service on the given node """
+
+ for _ in range(10):
+ (rc, _) = self._rsh(node, "service pacemaker_remote stop")
+ if rc != 0:
+ time.sleep(6)
+ else:
+ break
+
+ def _start_pcmk_remote(self, node):
+ """ Start the Pacemaker Remote service on the given node """
+
+ for _ in range(10):
+ (rc, _) = self._rsh(node, "service pacemaker_remote start")
+ if rc != 0:
+ time.sleep(6)
+ else:
+ self._pcmk_started = True
+ break
+
+ def _freeze_pcmk_remote(self, node):
+ """ Simulate a Pacemaker Remote daemon failure """
+
+ self._rsh(node, "killall -STOP pacemaker-remoted")
+
+ def _resume_pcmk_remote(self, node):
+ """ Simulate the Pacemaker Remote daemon recovering """
+
+ self._rsh(node, "killall -CONT pacemaker-remoted")
+
+ def _start_metal(self, node):
+ """ Setup a Pacemaker Remote configuration. Remove any existing
+ connection resources or nodes. Start the pacemaker_remote service.
+ Create a connection resource.
+ """
+
+ # Cluster nodes are reused as remote nodes in remote tests. If cluster
+ # services were enabled at boot, in case the remote node got fenced, the
+ # cluster node would join instead of the expected remote one. Meanwhile
+ # pacemaker_remote would not be able to start. Depending on the chances,
+ # the situations might not be able to be orchestrated gracefully any more.
+ #
+ # Temporarily disable any enabled cluster serivces.
+ self._disable_services(node)
+
+ # make sure the resource doesn't already exist for some reason
+ self._rsh(node, "crm_resource -D -r %s -t primitive" % self._remote_rsc)
+ self._rsh(node, "crm_resource -D -r %s -t primitive" % self._remote_node)
+
+ if not self._stop(node):
+ self.fail("Failed to shutdown cluster node %s" % node)
+ return
+
+ self._start_pcmk_remote(node)
+
+ if not self._pcmk_started:
+ self.fail("Failed to start pacemaker_remote on node %s" % node)
+ return
+
+ # Convert node to baremetal now that it has shutdown the cluster stack
+ pats = []
+ watch = self.create_watch(pats, 120)
+ watch.set_watch()
+
+ pats.extend([
+ self.templates["Pat:RscOpOK"] % ("start", self._remote_node),
+ self.templates["Pat:DC_IDLE"]
+ ])
+
+ self._add_connection_rsc(node)
+
+ with Timer(self._logger, self.name, "remoteMetalInit"):
+ watch.look_for_all()
+
+ if watch.unmatched:
+ self.fail("Unmatched patterns: %s" % watch.unmatched)
+
+ def migrate_connection(self, node):
+ """ Move the remote connection resource from the node it's currently
+ running on to any other available node
+ """
+
+ if self.failed:
+ return
+
+ pats = [
+ self.templates["Pat:RscOpOK"] % ("migrate_to", self._remote_node),
+ self.templates["Pat:RscOpOK"] % ("migrate_from", self._remote_node),
+ self.templates["Pat:DC_IDLE"]
+ ]
+
+ watch = self.create_watch(pats, 120)
+ watch.set_watch()
+
+ (rc, _) = self._rsh(node, "crm_resource -M -r %s" % self._remote_node, verbose=1)
+ if rc != 0:
+ self.fail("failed to move remote node connection resource")
+ return
+
+ with Timer(self._logger, self.name, "remoteMetalMigrate"):
+ watch.look_for_all()
+
+ if watch.unmatched:
+ self.fail("Unmatched patterns: %s" % watch.unmatched)
+
+ def fail_rsc(self, node):
+ """ Cause the dummy resource running on a Pacemaker Remote node to fail
+ and verify that the failure is logged correctly
+ """
+
+ if self.failed:
+ return
+
+ watchpats = [
+ self.templates["Pat:RscRemoteOpOK"] % ("stop", self._remote_rsc, self._remote_node),
+ self.templates["Pat:RscRemoteOpOK"] % ("start", self._remote_rsc, self._remote_node),
+ self.templates["Pat:DC_IDLE"]
+ ]
+
+ watch = self.create_watch(watchpats, 120)
+ watch.set_watch()
+
+ self.debug("causing dummy rsc to fail.")
+
+ self._rsh(node, "rm -f /var/run/resource-agents/Dummy*")
+
+ with Timer(self._logger, self.name, "remoteRscFail"):
+ watch.look_for_all()
+
+ if watch.unmatched:
+ self.fail("Unmatched patterns during rsc fail: %s" % watch.unmatched)
+
+ def fail_connection(self, node):
+ """ Cause the remote connection resource to fail and verify that the
+ node is fenced and the connection resource is restarted on another
+ node.
+ """
+
+ if self.failed:
+ return
+
+ watchpats = [
+ self.templates["Pat:Fencing_ok"] % self._remote_node,
+ self.templates["Pat:NodeFenced"] % self._remote_node
+ ]
+
+ watch = self.create_watch(watchpats, 120)
+ watch.set_watch()
+
+ # freeze the pcmk remote daemon. this will result in fencing
+ self.debug("Force stopped active remote node")
+ self._freeze_pcmk_remote(node)
+
+ self.debug("Waiting for remote node to be fenced.")
+
+ with Timer(self._logger, self.name, "remoteMetalFence"):
+ watch.look_for_all()
+
+ if watch.unmatched:
+ self.fail("Unmatched patterns: %s" % watch.unmatched)
+ return
+
+ self.debug("Waiting for the remote node to come back up")
+ self._cm.ns.wait_for_node(node, 120)
+
+ pats = []
+
+ watch = self.create_watch(pats, 240)
+ watch.set_watch()
+
+ pats.append(self.templates["Pat:RscOpOK"] % ("start", self._remote_node))
+
+ if self._remote_rsc_added:
+ pats.append(self.templates["Pat:RscRemoteOpOK"] % ("start", self._remote_rsc, self._remote_node))
+
+ # start the remote node again watch it integrate back into cluster.
+ self._start_pcmk_remote(node)
+ if not self._pcmk_started:
+ self.fail("Failed to start pacemaker_remote on node %s" % node)
+ return
+
+ self.debug("Waiting for remote node to rejoin cluster after being fenced.")
+
+ with Timer(self._logger, self.name, "remoteMetalRestart"):
+ watch.look_for_all()
+
+ if watch.unmatched:
+ self.fail("Unmatched patterns: %s" % watch.unmatched)
+
+ def _add_dummy_rsc(self, node):
+ """ Add a dummy resource that runs on the Pacemaker Remote node """
+
+ if self.failed:
+ return
+
+ # verify we can put a resource on the remote node
+ pats = []
+ watch = self.create_watch(pats, 120)
+ watch.set_watch()
+
+ pats.extend([
+ self.templates["Pat:RscRemoteOpOK"] % ("start", self._remote_rsc, self._remote_node),
+ self.templates["Pat:DC_IDLE"]
+ ])
+
+ # Add a resource that must live on remote-node
+ self._add_primitive_rsc(node)
+
+ # force that rsc to prefer the remote node.
+ (rc, _) = self._cm.rsh(node, "crm_resource -M -r %s -N %s -f" % (self._remote_rsc, self._remote_node), verbose=1)
+ if rc != 0:
+ self.fail("Failed to place remote resource on remote node.")
+ return
+
+ with Timer(self._logger, self.name, "remoteMetalRsc"):
+ watch.look_for_all()
+
+ if watch.unmatched:
+ self.fail("Unmatched patterns: %s" % watch.unmatched)
+
+ def test_attributes(self, node):
+ """ Verify that attributes can be set on the Pacemaker Remote node """
+
+ if self.failed:
+ return
+
+ # This verifies permanent attributes can be set on a remote-node. It also
+ # verifies the remote-node can edit its own cib node section remotely.
+ (rc, line) = self._cm.rsh(node, "crm_attribute -l forever -n testattr -v testval -N %s" % self._remote_node, verbose=1)
+ if rc != 0:
+ self.fail("Failed to set remote-node attribute. rc:%s output:%s" % (rc, line))
+ return
+
+ (rc, _) = self._cm.rsh(node, "crm_attribute -l forever -n testattr -q -N %s" % self._remote_node, verbose=1)
+ if rc != 0:
+ self.fail("Failed to get remote-node attribute")
+ return
+
+ (rc, _) = self._cm.rsh(node, "crm_attribute -l forever -n testattr -D -N %s" % self._remote_node, verbose=1)
+ if rc != 0:
+ self.fail("Failed to delete remote-node attribute")
+
+ def cleanup_metal(self, node):
+ """ Clean up the Pacemaker Remote node configuration previously created by
+ _setup_metal. Stop and remove dummy resources and connection resources.
+ Stop the pacemaker_remote service. Remove the remote node itself.
+ """
+
+ self._enable_services(node)
+
+ if not self._pcmk_started:
+ return
+
+ pats = []
+
+ watch = self.create_watch(pats, 120)
+ watch.set_watch()
+
+ if self._remote_rsc_added:
+ pats.append(self.templates["Pat:RscOpOK"] % ("stop", self._remote_rsc))
+
+ if self._remote_node_added:
+ pats.append(self.templates["Pat:RscOpOK"] % ("stop", self._remote_node))
+
+ with Timer(self._logger, self.name, "remoteMetalCleanup"):
+ self._resume_pcmk_remote(node)
+
+ if self._remote_rsc_added:
+ # Remove dummy resource added for remote node tests
+ self.debug("Cleaning up dummy rsc put on remote node")
+ self._rsh(self._get_other_node(node), "crm_resource -U -r %s" % self._remote_rsc)
+ self._del_rsc(node, self._remote_rsc)
+
+ if self._remote_node_added:
+ # Remove remote node's connection resource
+ self.debug("Cleaning up remote node connection resource")
+ self._rsh(self._get_other_node(node), "crm_resource -U -r %s" % self._remote_node)
+ self._del_rsc(node, self._remote_node)
+
+ watch.look_for_all()
+
+ if watch.unmatched:
+ self.fail("Unmatched patterns: %s" % watch.unmatched)
+
+ self._stop_pcmk_remote(node)
+
+ self.debug("Waiting for the cluster to recover")
+ self._cm.cluster_stable()
+
+ if self._remote_node_added:
+ # Remove remote node itself
+ self.debug("Cleaning up node entry for remote node")
+ self._rsh(self._get_other_node(node), "crm_node --force --remove %s" % self._remote_node)
+
+ def _setup_env(self, node):
+ """ Setup the environment to allow Pacemaker Remote to function. This
+ involves generating a key and copying it to all nodes in the cluster.
+ """
+
+ self._remote_node = "remote-%s" % node
+
+ # we are assuming if all nodes have a key, that it is
+ # the right key... If any node doesn't have a remote
+ # key, we regenerate it everywhere.
+ if self._rsh.exists_on_all("/etc/pacemaker/authkey", self._env["nodes"]):
+ return
+
+ # create key locally
+ (handle, keyfile) = tempfile.mkstemp(".cts")
+ os.close(handle)
+ subprocess.check_call(["dd", "if=/dev/urandom", "of=%s" % keyfile, "bs=4096", "count=1"],
+ stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
+
+ # sync key throughout the cluster
+ for n in self._env["nodes"]:
+ self._rsh(n, "mkdir -p --mode=0750 /etc/pacemaker")
+ self._rsh.copy(keyfile, "root@%s:/etc/pacemaker/authkey" % n)
+ self._rsh(n, "chgrp haclient /etc/pacemaker /etc/pacemaker/authkey")
+ self._rsh(n, "chmod 0640 /etc/pacemaker/authkey")
+
+ os.unlink(keyfile)
+
+ def is_applicable(self):
+ """ Return True if this test is applicable in the current test configuration. """
+
+ if not CTSTest.is_applicable(self):
+ return False
+
+ for node in self._env["nodes"]:
+ (rc, _) = self._rsh(node, "which pacemaker-remoted >/dev/null 2>&1")
+ if rc != 0:
+ return False
+
+ return True
+
+ def start_new_test(self, node):
+ """ Prepare a remote test for running by setting up its environment
+ and resources
+ """
+
+ self.incr("calls")
+ self.reset()
+
+ ret = self._startall(None)
+ if not ret:
+ return self.failure("setup failed: could not start all nodes")
+
+ self._setup_env(node)
+ self._start_metal(node)
+ self._add_dummy_rsc(node)
+ return True
+
+ def __call__(self, node):
+ """ Perform this test """
+
+ raise NotImplementedError
+
+ @property
+ def errors_to_ignore(self):
+ """ Return list of errors which should be ignored """
+
+ return [
+ r"""is running on remote.*which isn't allowed""",
+ r"""Connection terminated""",
+ r"""Could not send remote"""
+ ]
diff --git a/python/pacemaker/_cts/tests/remotemigrate.py b/python/pacemaker/_cts/tests/remotemigrate.py
new file mode 100644
index 0000000..e22e98f
--- /dev/null
+++ b/python/pacemaker/_cts/tests/remotemigrate.py
@@ -0,0 +1,63 @@
+""" Move a connection resource from one node to another """
+
+__all__ = ["RemoteMigrate"]
+__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
+
+from pacemaker._cts.tests.remotedriver import RemoteDriver
+
+# Disable various pylint warnings that occur in so many places throughout this
+# file it's easiest to just take care of them globally. This does introduce the
+# possibility that we'll miss some other cause of the same warning, but we'll
+# just have to be careful.
+
+# pylint doesn't understand that self._env is subscriptable.
+# pylint: disable=unsubscriptable-object
+
+
+class RemoteMigrate(RemoteDriver):
+ """ A concrete test that moves a connection resource from one node to another """
+
+ def __init__(self, cm):
+ """ Create a new RemoteMigrate instance
+
+ Arguments:
+
+ cm -- A ClusterManager instance
+ """
+
+ RemoteDriver.__init__(self, cm)
+
+ self.name = "RemoteMigrate"
+
+ def __call__(self, node):
+ """ Perform this test """
+
+ # This code is very similar to __call__ in remotestonithd.py, but I don't think
+ # it's worth turning into a library function nor making one a subclass of the
+ # other. I think that's more confusing than leaving the duplication.
+ # pylint: disable=duplicate-code
+
+ if not self.start_new_test(node):
+ return self.failure(self.fail_string)
+
+ self.migrate_connection(node)
+ self.cleanup_metal(node)
+
+ self.debug("Waiting for the cluster to recover")
+ self._cm.cluster_stable()
+ if self.failed:
+ return self.failure(self.fail_string)
+
+ return self.success()
+
+ def is_applicable(self):
+ """ Return True if this test is applicable in the current test configuration. """
+
+ if not RemoteDriver.is_applicable(self):
+ return False
+
+ # This test requires at least three nodes: one to convert to a
+ # remote node, one to host the connection originally, and one
+ # to migrate the connection to.
+ return len(self._env["nodes"]) >= 3
diff --git a/python/pacemaker/_cts/tests/remoterscfailure.py b/python/pacemaker/_cts/tests/remoterscfailure.py
new file mode 100644
index 0000000..6f221de
--- /dev/null
+++ b/python/pacemaker/_cts/tests/remoterscfailure.py
@@ -0,0 +1,73 @@
+""" Cause the Pacemaker Remote connection resource to fail """
+
+__all__ = ["RemoteRscFailure"]
+__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
+
+from pacemaker._cts.tests.remotedriver import RemoteDriver
+
+# Disable various pylint warnings that occur in so many places throughout this
+# file it's easiest to just take care of them globally. This does introduce the
+# possibility that we'll miss some other cause of the same warning, but we'll
+# just have to be careful.
+
+# pylint doesn't understand that self._env is subscriptable.
+# pylint: disable=unsubscriptable-object
+
+
+class RemoteRscFailure(RemoteDriver):
+ """ A concrete test that causes the Pacemaker Remote connection resource
+ to fail
+ """
+
+ def __init__(self, cm):
+ """ Create a new RemoteRscFailure instance
+
+ Arguments:
+
+ cm -- A ClusterManager instance
+ """
+
+ RemoteDriver.__init__(self, cm)
+ self.name = "RemoteRscFailure"
+
+ def __call__(self, node):
+ """ Perform this test """
+
+ if not self.start_new_test(node):
+ return self.failure(self.fail_string)
+
+ # This is an important step. We are migrating the connection
+ # before failing the resource. This verifies that the migration
+ # has properly maintained control over the remote-node.
+ self.migrate_connection(node)
+
+ self.fail_rsc(node)
+ self.cleanup_metal(node)
+
+ self.debug("Waiting for the cluster to recover")
+ self._cm.cluster_stable()
+ if self.failed:
+ return self.failure(self.fail_string)
+
+ return self.success()
+
+ @property
+ def errors_to_ignore(self):
+ """ Return list of errors which should be ignored """
+
+ return [
+ r"schedulerd.*: Recover\s+remote-rsc\s+\(.*\)",
+ r"Dummy.*: No process state file found"
+ ] + super().errors_to_ignore
+
+ def is_applicable(self):
+ """ Return True if this test is applicable in the current test configuration. """
+
+ if not RemoteDriver.is_applicable(self):
+ return False
+
+ # This test requires at least three nodes: one to convert to a
+ # remote node, one to host the connection originally, and one
+ # to migrate the connection to.
+ return len(self._env["nodes"]) >= 3
diff --git a/python/pacemaker/_cts/tests/remotestonithd.py b/python/pacemaker/_cts/tests/remotestonithd.py
new file mode 100644
index 0000000..f684992
--- /dev/null
+++ b/python/pacemaker/_cts/tests/remotestonithd.py
@@ -0,0 +1,62 @@
+""" Fail the connection resource and fence the remote node """
+
+__all__ = ["RemoteStonithd"]
+__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
+
+from pacemaker._cts.tests.remotedriver import RemoteDriver
+
+
+class RemoteStonithd(RemoteDriver):
+ """ A concrete test that fails the connection resource and fences the
+ remote node
+ """
+
+ def __init__(self, cm):
+ """ Create a new RemoteStonithd instance
+
+ Arguments:
+
+ cm -- A ClusterManager instance
+ """
+
+ RemoteDriver.__init__(self, cm)
+
+ self.name = "RemoteStonithd"
+
+ def __call__(self, node):
+ """ Perform this test """
+
+ if not self.start_new_test(node):
+ return self.failure(self.fail_string)
+
+ self.fail_connection(node)
+ self.cleanup_metal(node)
+
+ self.debug("Waiting for the cluster to recover")
+ self._cm.cluster_stable()
+ if self.failed:
+ return self.failure(self.fail_string)
+
+ return self.success()
+
+ def is_applicable(self):
+ """ Return True if this test is applicable in the current test configuration. """
+
+ if not RemoteDriver.is_applicable(self):
+ return False
+
+ return self._env.get("DoFencing", True)
+
+ @property
+ def errors_to_ignore(self):
+ """ Return list of errors which should be ignored """
+
+ return [
+ r"Lost connection to Pacemaker Remote node",
+ r"Software caused connection abort",
+ r"pacemaker-controld.*:\s+error.*: Operation remote-.*_monitor",
+ r"pacemaker-controld.*:\s+error.*: Result of monitor operation for remote-.*",
+ r"schedulerd.*:\s+Recover\s+remote-.*\s+\(.*\)",
+ r"error: Result of monitor operation for .* on remote-.*: Internal communication failure"
+ ] + super().errors_to_ignore
diff --git a/python/pacemaker/_cts/tests/resourcerecover.py b/python/pacemaker/_cts/tests/resourcerecover.py
new file mode 100644
index 0000000..252eb1f
--- /dev/null
+++ b/python/pacemaker/_cts/tests/resourcerecover.py
@@ -0,0 +1,175 @@
+""" Fail a random resource and verify its fail count increases """
+
+__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
+
+from pacemaker._cts.audits import AuditResource
+from pacemaker._cts.tests.ctstest import CTSTest
+from pacemaker._cts.tests.simulstartlite import SimulStartLite
+from pacemaker._cts.tests.starttest import StartTest
+from pacemaker._cts.timer import Timer
+
+# Disable various pylint warnings that occur in so many places throughout this
+# file it's easiest to just take care of them globally. This does introduce the
+# possibility that we'll miss some other cause of the same warning, but we'll
+# just have to be careful.
+
+# pylint doesn't understand that self._rsh is callable.
+# pylint: disable=not-callable
+
+
+class ResourceRecover(CTSTest):
+ """ A concrete test that fails a random resource """
+
+ def __init__(self, cm):
+ """ Create a new ResourceRecover instance
+
+ Arguments:
+
+ cm -- A ClusterManager instance
+ """
+
+ CTSTest.__init__(self, cm)
+
+ self.benchmark = True
+ self.name = "ResourceRecover"
+
+ self._action = "asyncmon"
+ self._interval = 0
+ self._rid = None
+ self._rid_alt = None
+ self._start = StartTest(cm)
+ self._startall = SimulStartLite(cm)
+
+ def __call__(self, node):
+ """ Perform this test """
+
+ self.incr("calls")
+
+ if not self._startall(None):
+ return self.failure("Setup failed")
+
+ # List all resources active on the node (skip test if none)
+ resourcelist = self._cm.active_resources(node)
+ if not resourcelist:
+ self._logger.log("No active resources on %s" % node)
+ return self.skipped()
+
+ # Choose one resource at random
+ rsc = self._choose_resource(node, resourcelist)
+ if rsc is None:
+ return self.failure("Could not get details of resource '%s'" % self._rid)
+
+ if rsc.id == rsc.clone_id:
+ self.debug("Failing %s" % rsc.id)
+ else:
+ self.debug("Failing %s (also known as %s)" % (rsc.id, rsc.clone_id))
+
+ # Log patterns to watch for (failure, plus restart if managed)
+ pats = [
+ self.templates["Pat:CloneOpFail"] % (self._action, rsc.id, rsc.clone_id)
+ ]
+
+ if rsc.managed:
+ pats.append(self.templates["Pat:RscOpOK"] % ("stop", self._rid))
+
+ if rsc.unique:
+ pats.append(self.templates["Pat:RscOpOK"] % ("start", self._rid))
+ else:
+ # Anonymous clones may get restarted with a different clone number
+ pats.append(self.templates["Pat:RscOpOK"] % ("start", ".*"))
+
+ # Fail resource. (Ideally, we'd fail it twice, to ensure the fail count
+ # is incrementing properly, but it might restart on a different node.
+ # We'd have to temporarily ban it from all other nodes and ensure the
+ # migration-threshold hasn't been reached.)
+ if self._fail_resource(rsc, node, pats) is None:
+ # self.failure() already called
+ return None
+
+ return self.success()
+
+ def _choose_resource(self, node, resourcelist):
+ """ Choose a random resource to target """
+
+ self._rid = self._env.random_gen.choice(resourcelist)
+ self._rid_alt = self._rid
+ (_, lines) = self._rsh(node, "crm_resource -c", verbose=1)
+
+ for line in lines:
+ if line.startswith("Resource: "):
+ rsc = AuditResource(self._cm, line)
+
+ if rsc.id == self._rid:
+ # Handle anonymous clones that get renamed
+ self._rid = rsc.clone_id
+ return rsc
+
+ return None
+
+ def _get_failcount(self, node):
+ """ Check the fail count of targeted resource on given node """
+
+ cmd = "crm_failcount --quiet --query --resource %s --operation %s --interval %d --node %s"
+ (rc, lines) = self._rsh(node, cmd % (self._rid, self._action, self._interval, node),
+ verbose=1)
+
+ if rc != 0 or len(lines) != 1:
+ lines = [l.strip() for l in lines]
+ self._logger.log("crm_failcount on %s failed (%d): %s" % (node, rc, " // ".join(lines)))
+ return -1
+
+ try:
+ failcount = int(lines[0])
+ except (IndexError, ValueError):
+ self._logger.log("crm_failcount output on %s unparseable: %s" % (node, " ".join(lines)))
+ return -1
+
+ return failcount
+
+ def _fail_resource(self, rsc, node, pats):
+ """ Fail the targeted resource, and verify as expected """
+
+ orig_failcount = self._get_failcount(node)
+
+ watch = self.create_watch(pats, 60)
+ watch.set_watch()
+
+ self._rsh(node, "crm_resource -V -F -r %s -H %s &>/dev/null" % (self._rid, node))
+
+ with Timer(self._logger, self.name, "recover"):
+ watch.look_for_all()
+
+ self._cm.cluster_stable()
+ recovered = self._cm.resource_location(self._rid)
+
+ if watch.unmatched:
+ return self.failure("Patterns not found: %r" % watch.unmatched)
+
+ if rsc.unique and len(recovered) > 1:
+ return self.failure("%s is now active on more than one node: %r" % (self._rid, recovered))
+
+ if recovered:
+ self.debug("%s is running on: %r" % (self._rid, recovered))
+
+ elif rsc.managed:
+ return self.failure("%s was not recovered and is inactive" % self._rid)
+
+ new_failcount = self._get_failcount(node)
+ if new_failcount != orig_failcount + 1:
+ return self.failure("%s fail count is %d not %d"
+ % (self._rid, new_failcount, orig_failcount + 1))
+
+ return 0 # Anything but None is success
+
+ @property
+ def errors_to_ignore(self):
+ """ Return list of errors which should be ignored """
+
+ return [
+ r"Updating failcount for %s" % self._rid,
+ r"schedulerd.*: Recover\s+(%s|%s)\s+\(.*\)" % (self._rid, self._rid_alt),
+ r"Unknown operation: fail",
+ self.templates["Pat:RscOpOK"] % (self._action, self._rid),
+ r"(ERROR|error).*: Action %s_%s_%d .* initiated outside of a transition" % (self._rid, self._action, self._interval)
+ ]
diff --git a/python/pacemaker/_cts/tests/restartonebyone.py b/python/pacemaker/_cts/tests/restartonebyone.py
new file mode 100644
index 0000000..23b3a68
--- /dev/null
+++ b/python/pacemaker/_cts/tests/restartonebyone.py
@@ -0,0 +1,58 @@
+""" Restart all nodes in order """
+
+__all__ = ["RestartOnebyOne"]
+__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
+
+from pacemaker._cts.tests.ctstest import CTSTest
+from pacemaker._cts.tests.restarttest import RestartTest
+from pacemaker._cts.tests.simulstartlite import SimulStartLite
+
+# Disable various pylint warnings that occur in so many places throughout this
+# file it's easiest to just take care of them globally. This does introduce the
+# possibility that we'll miss some other cause of the same warning, but we'll
+# just have to be careful.
+
+# pylint doesn't understand that self._env is subscriptable.
+# pylint: disable=unsubscriptable-object
+
+
+class RestartOnebyOne(CTSTest):
+ """ A concrete test that restarts all nodes in order """
+
+ def __init__(self, cm):
+ """ Create a new RestartOnebyOne instance
+
+ Arguments:
+
+ cm -- A ClusterManager instance
+ """
+
+ CTSTest.__init__(self, cm)
+
+ self.name = "RestartOnebyOne"
+
+ self._restart = None
+ self._startall = SimulStartLite(cm)
+
+ def __call__(self, dummy):
+ """ Perform the test """
+
+ self.incr("calls")
+
+ ret = self._startall(None)
+ if not ret:
+ return self.failure("Setup failed")
+
+ did_fail = []
+ self.set_timer()
+ self._restart = RestartTest(self._cm)
+
+ for node in self._env["nodes"]:
+ if not self._restart(node):
+ did_fail.append(node)
+
+ if did_fail:
+ return self.failure("Could not restart %d nodes: %r" % (len(did_fail), did_fail))
+
+ return self.success()
diff --git a/python/pacemaker/_cts/tests/restarttest.py b/python/pacemaker/_cts/tests/restarttest.py
new file mode 100644
index 0000000..3b628ce
--- /dev/null
+++ b/python/pacemaker/_cts/tests/restarttest.py
@@ -0,0 +1,49 @@
+""" Stop and restart a node """
+
+__all__ = ["RestartTest"]
+__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
+
+from pacemaker._cts.tests.ctstest import CTSTest
+from pacemaker._cts.tests.starttest import StartTest
+from pacemaker._cts.tests.stoptest import StopTest
+
+
+class RestartTest(CTSTest):
+ """ A concrete test that stops and restarts a node """
+
+ def __init__(self, cm):
+ """ Create a new RestartTest instance
+
+ Arguments:
+
+ cm -- A ClusterManager instance
+ """
+
+ CTSTest.__init__(self, cm)
+ self.benchmark = True
+ self.name = "Restart"
+
+ self._start = StartTest(cm)
+ self._stop = StopTest(cm)
+
+ def __call__(self, node):
+ """ Perform this test """
+
+ self.incr("calls")
+ self.incr("node:%s" % node)
+
+ if self._cm.stat_cm(node):
+ self.incr("WasStopped")
+ if not self._start(node):
+ return self.failure("start (setup) failure: %s" % node)
+
+ self.set_timer()
+
+ if not self._stop(node):
+ return self.failure("stop failure: %s" % node)
+
+ if not self._start(node):
+ return self.failure("start failure: %s" % node)
+
+ return self.success()
diff --git a/python/pacemaker/_cts/tests/resynccib.py b/python/pacemaker/_cts/tests/resynccib.py
new file mode 100644
index 0000000..fe634d6
--- /dev/null
+++ b/python/pacemaker/_cts/tests/resynccib.py
@@ -0,0 +1,75 @@
+""" Start the cluster without a CIB and verify it gets copied from another node """
+
+__all__ = ["ResyncCIB"]
+__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
+
+from pacemaker import BuildOptions
+from pacemaker._cts.tests.ctstest import CTSTest
+from pacemaker._cts.tests.restarttest import RestartTest
+from pacemaker._cts.tests.simulstartlite import SimulStartLite
+from pacemaker._cts.tests.simulstoplite import SimulStopLite
+
+# Disable various pylint warnings that occur in so many places throughout this
+# file it's easiest to just take care of them globally. This does introduce the
+# possibility that we'll miss some other cause of the same warning, but we'll
+# just have to be careful.
+
+# pylint doesn't understand that self._rsh is callable.
+# pylint: disable=not-callable
+
+
+class ResyncCIB(CTSTest):
+ """ A concrete test that starts the cluster on one node without a CIB and
+ verifies the CIB is copied over when the remaining nodes join
+ """
+
+ def __init__(self, cm):
+ """ Create a new ResyncCIB instance
+
+ Arguments:
+
+ cm -- A ClusterManager instance
+ """
+
+ CTSTest.__init__(self, cm)
+
+ self.name = "ResyncCIB"
+
+ self._restart1 = RestartTest(cm)
+ self._startall = SimulStartLite(cm)
+ self._stopall = SimulStopLite(cm)
+
+ def __call__(self, node):
+ """ Perform this test """
+
+ self.incr("calls")
+
+ # Shut down all the nodes...
+ if not self._stopall(None):
+ return self.failure("Could not stop all nodes")
+
+ # Test config recovery when the other nodes come up
+ self._rsh(node, "rm -f %s/cib*" % BuildOptions.CIB_DIR)
+
+ # Start the selected node
+ if not self._restart1(node):
+ return self.failure("Could not start %s" % node)
+
+ # Start all remaining nodes
+ if not self._startall(None):
+ return self.failure("Could not start the remaining nodes")
+
+ return self.success()
+
+ @property
+ def errors_to_ignore(self):
+ """ Return list of errors which should be ignored """
+
+ # Errors that occur as a result of the CIB being wiped
+ return [
+ r"error.*: v1 patchset error, patch failed to apply: Application of an update diff failed",
+ r"error.*: Resource start-up disabled since no STONITH resources have been defined",
+ r"error.*: Either configure some or disable STONITH with the stonith-enabled option",
+ r"error.*: NOTE: Clusters with shared data need STONITH to ensure data integrity"
+ ]
diff --git a/python/pacemaker/_cts/tests/simulstart.py b/python/pacemaker/_cts/tests/simulstart.py
new file mode 100644
index 0000000..88a7f2f
--- /dev/null
+++ b/python/pacemaker/_cts/tests/simulstart.py
@@ -0,0 +1,42 @@
+""" Start all stopped nodes simultaneously """
+
+__all__ = ["SimulStart"]
+__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
+
+from pacemaker._cts.tests.ctstest import CTSTest
+from pacemaker._cts.tests.simulstartlite import SimulStartLite
+from pacemaker._cts.tests.simulstoplite import SimulStopLite
+
+
+class SimulStart(CTSTest):
+ """ A concrete test that starts all stopped nodes simultaneously """
+
+ def __init__(self, cm):
+ """ Create a new SimulStart instance
+
+ Arguments:
+
+ cm -- A ClusterManager instance
+ """
+
+ CTSTest.__init__(self, cm)
+
+ self.name = "SimulStart"
+
+ self._startall = SimulStartLite(cm)
+ self._stopall = SimulStopLite(cm)
+
+ def __call__(self, dummy):
+ """ Perform this test """
+
+ self.incr("calls")
+
+ ret = self._stopall(None)
+ if not ret:
+ return self.failure("Setup failed")
+
+ if not self._startall(None):
+ return self.failure("Startall failed")
+
+ return self.success()
diff --git a/python/pacemaker/_cts/tests/simulstartlite.py b/python/pacemaker/_cts/tests/simulstartlite.py
new file mode 100644
index 0000000..c5c51e1
--- /dev/null
+++ b/python/pacemaker/_cts/tests/simulstartlite.py
@@ -0,0 +1,133 @@
+""" Simultaneously start stopped nodes """
+
+__all__ = ["SimulStartLite"]
+__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
+
+from pacemaker._cts.tests.ctstest import CTSTest
+
+# Disable various pylint warnings that occur in so many places throughout this
+# file it's easiest to just take care of them globally. This does introduce the
+# possibility that we'll miss some other cause of the same warning, but we'll
+# just have to be careful.
+
+# pylint doesn't understand that self._env is subscriptable.
+# pylint: disable=unsubscriptable-object
+
+
+class SimulStartLite(CTSTest):
+ """ A pseudo-test that is only used to set up conditions before running
+ some other test. This class starts any stopped nodes more or less
+ simultaneously.
+
+ Other test classes should not use this one as a superclass.
+ """
+
+ def __init__(self, cm):
+ """ Create a new SimulStartLite instance
+
+ Arguments:
+
+ cm -- A ClusterManager instance
+ """
+
+ CTSTest.__init__(self, cm)
+ self.name = "SimulStartLite"
+
+ def __call__(self, dummy):
+ """ Start all stopped nodes more or less simultaneously, returning
+ whether this succeeded or not.
+ """
+
+ self.incr("calls")
+ self.debug("Setup: %s" % self.name)
+
+ # We ignore the "node" parameter...
+ node_list = []
+ for node in self._env["nodes"]:
+ if self._cm.expected_status[node] == "down":
+ self.incr("WasStopped")
+ node_list.append(node)
+
+ self.set_timer()
+ while len(node_list) > 0:
+ # Repeat until all nodes come up
+ uppat = self.templates["Pat:NonDC_started"]
+ if self._cm.upcount() == 0:
+ uppat = self.templates["Pat:Local_started"]
+
+ watchpats = [
+ self.templates["Pat:DC_IDLE"]
+ ]
+ for node in node_list:
+ watchpats.extend([uppat % node,
+ self.templates["Pat:InfraUp"] % node,
+ self.templates["Pat:PacemakerUp"] % node])
+
+ # Start all the nodes - at about the same time...
+ watch = self.create_watch(watchpats, self._env["DeadTime"]+10)
+ watch.set_watch()
+
+ stonith = self._cm.prepare_fencing_watcher()
+
+ for node in node_list:
+ self._cm.start_cm_async(node)
+
+ watch.look_for_all()
+
+ node_list = self._cm.fencing_cleanup(self.name, stonith)
+
+ if node_list is None:
+ return self.failure("Cluster did not stabilize")
+
+ # Remove node_list messages from watch.unmatched
+ for node in node_list:
+ self._logger.debug("Dealing with stonith operations for %s" % node_list)
+ if watch.unmatched:
+ try:
+ watch.unmatched.remove(uppat % node)
+ except ValueError:
+ self.debug("Already matched: %s" % (uppat % node))
+
+ try:
+ watch.unmatched.remove(self.templates["Pat:InfraUp"] % node)
+ except ValueError:
+ self.debug("Already matched: %s" % (self.templates["Pat:InfraUp"] % node))
+
+ try:
+ watch.unmatched.remove(self.templates["Pat:PacemakerUp"] % node)
+ except ValueError:
+ self.debug("Already matched: %s" % (self.templates["Pat:PacemakerUp"] % node))
+
+ if watch.unmatched:
+ for regex in watch.unmatched:
+ self._logger.log("Warn: Startup pattern not found: %s" % regex)
+
+ if not self._cm.cluster_stable():
+ return self.failure("Cluster did not stabilize")
+
+ did_fail = False
+ unstable = []
+ for node in self._env["nodes"]:
+ if not self._cm.stat_cm(node):
+ did_fail = True
+ unstable.append(node)
+
+ if did_fail:
+ return self.failure("Unstarted nodes exist: %s" % unstable)
+
+ unstable = []
+ for node in self._env["nodes"]:
+ if not self._cm.node_stable(node):
+ did_fail = True
+ unstable.append(node)
+
+ if did_fail:
+ return self.failure("Unstable cluster nodes exist: %s" % unstable)
+
+ return self.success()
+
+ def is_applicable(self):
+ """ SimulStartLite is a setup test and never applicable """
+
+ return False
diff --git a/python/pacemaker/_cts/tests/simulstop.py b/python/pacemaker/_cts/tests/simulstop.py
new file mode 100644
index 0000000..174c533
--- /dev/null
+++ b/python/pacemaker/_cts/tests/simulstop.py
@@ -0,0 +1,42 @@
+""" Stop all running nodes simultaneously """
+
+__all__ = ["SimulStop"]
+__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
+
+from pacemaker._cts.tests.ctstest import CTSTest
+from pacemaker._cts.tests.simulstartlite import SimulStartLite
+from pacemaker._cts.tests.simulstoplite import SimulStopLite
+
+
+class SimulStop(CTSTest):
+ """ A concrete test that stops all running nodes simultaneously """
+
+ def __init__(self, cm):
+ """ Create a new SimulStop instance
+
+ Arguments:
+
+ cm -- A ClusterManager instance
+ """
+
+ CTSTest.__init__(self, cm)
+
+ self.name = "SimulStop"
+
+ self._startall = SimulStartLite(cm)
+ self._stopall = SimulStopLite(cm)
+
+ def __call__(self, dummy):
+ """ Perform this test """
+
+ self.incr("calls")
+
+ ret = self._startall(None)
+ if not ret:
+ return self.failure("Setup failed")
+
+ if not self._stopall(None):
+ return self.failure("Stopall failed")
+
+ return self.success()
diff --git a/python/pacemaker/_cts/tests/simulstoplite.py b/python/pacemaker/_cts/tests/simulstoplite.py
new file mode 100644
index 0000000..d2e687e
--- /dev/null
+++ b/python/pacemaker/_cts/tests/simulstoplite.py
@@ -0,0 +1,91 @@
+""" Simultaneously stop running nodes """
+
+__all__ = ["SimulStopLite"]
+__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
+
+from pacemaker._cts.tests.ctstest import CTSTest
+
+# Disable various pylint warnings that occur in so many places throughout this
+# file it's easiest to just take care of them globally. This does introduce the
+# possibility that we'll miss some other cause of the same warning, but we'll
+# just have to be careful.
+
+# pylint doesn't understand that self._rsh is callable.
+# pylint: disable=not-callable
+# pylint doesn't understand that self._env is subscriptable.
+# pylint: disable=unsubscriptable-object
+
+
+class SimulStopLite(CTSTest):
+ """ A pseudo-test that is only used to set up conditions before running
+ some other test. This class stops any running nodes more or less
+ simultaneously. It can be used both to set up a test or to clean up
+ a test.
+
+ Other test classes should not use this one as a superclass.
+ """
+
+ def __init__(self, cm):
+ """ Create a new SimulStopLite instance
+
+ Arguments:
+
+ cm -- A ClusterManager instance
+ """
+
+ CTSTest.__init__(self, cm)
+ self.name = "SimulStopLite"
+
+ def __call__(self, dummy):
+ """ Stop all running nodes more or less simultaneously, returning
+ whether this succeeded or not.
+ """
+
+ self.incr("calls")
+ self.debug("Setup: %s" % self.name)
+
+ # We ignore the "node" parameter...
+ watchpats = []
+
+ for node in self._env["nodes"]:
+ if self._cm.expected_status[node] == "up":
+ self.incr("WasStarted")
+ watchpats.append(self.templates["Pat:We_stopped"] % node)
+
+ if len(watchpats) == 0:
+ return self.success()
+
+ # Stop all the nodes - at about the same time...
+ watch = self.create_watch(watchpats, self._env["DeadTime"]+10)
+
+ watch.set_watch()
+ self.set_timer()
+ for node in self._env["nodes"]:
+ if self._cm.expected_status[node] == "up":
+ self._cm.stop_cm_async(node)
+
+ if watch.look_for_all():
+ # Make sure they're completely down with no residule
+ for node in self._env["nodes"]:
+ self._rsh(node, self.templates["StopCmd"])
+
+ return self.success()
+
+ did_fail = False
+ up_nodes = []
+ for node in self._env["nodes"]:
+ if self._cm.stat_cm(node):
+ did_fail = True
+ up_nodes.append(node)
+
+ if did_fail:
+ return self.failure("Active nodes exist: %s" % up_nodes)
+
+ self._logger.log("Warn: All nodes stopped but CTS didn't detect: %s" % watch.unmatched)
+ return self.failure("Missing log message: %s " % watch.unmatched)
+
+ def is_applicable(self):
+ """ SimulStopLite is a setup test and never applicable """
+
+ return False
diff --git a/python/pacemaker/_cts/tests/splitbraintest.py b/python/pacemaker/_cts/tests/splitbraintest.py
new file mode 100644
index 0000000..09d5f55
--- /dev/null
+++ b/python/pacemaker/_cts/tests/splitbraintest.py
@@ -0,0 +1,215 @@
+""" Create a split brain cluster and verify a resource is multiply managed """
+
+__all__ = ["SplitBrainTest"]
+__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
+
+import time
+
+from pacemaker._cts.input import should_continue
+from pacemaker._cts.tests.ctstest import CTSTest
+from pacemaker._cts.tests.simulstartlite import SimulStartLite
+from pacemaker._cts.tests.starttest import StartTest
+
+# Disable various pylint warnings that occur in so many places throughout this
+# file it's easiest to just take care of them globally. This does introduce the
+# possibility that we'll miss some other cause of the same warning, but we'll
+# just have to be careful.
+
+# pylint doesn't understand that self._rsh is callable.
+# pylint: disable=not-callable
+# pylint doesn't understand that self._env is subscriptable.
+# pylint: disable=unsubscriptable-object
+
+
+class SplitBrainTest(CTSTest):
+ """ A concrete test that creates a split brain cluster and verifies that
+ one node in each partition takes over the resource, resulting in two
+ nodes running the same resource.
+ """
+
+ def __init__(self, cm):
+ """ Create a new SplitBrainTest instance
+
+ Arguments:
+
+ cm -- A ClusterManager instance
+ """
+
+ CTSTest.__init__(self, cm)
+
+ self.is_experimental = True
+ self.name = "SplitBrain"
+
+ self._start = StartTest(cm)
+ self._startall = SimulStartLite(cm)
+
+ def _isolate_partition(self, partition):
+ """ Create a new partition containing the given nodes """
+
+ other_nodes = self._env["nodes"].copy()
+
+ for node in partition:
+ try:
+ other_nodes.remove(node)
+ except ValueError:
+ self._logger.log("Node %s not in %r from %r" % (node, self._env["nodes"], partition))
+
+ if not other_nodes:
+ return
+
+ self.debug("Creating partition: %r" % partition)
+ self.debug("Everyone else: %r" % other_nodes)
+
+ for node in partition:
+ if not self._cm.isolate_node(node, other_nodes):
+ self._logger.log("Could not isolate %s" % node)
+ return
+
+ def _heal_partition(self, partition):
+ """ Move the given nodes out of their own partition back into the cluster """
+
+ other_nodes = self._env["nodes"].copy()
+
+ for node in partition:
+ try:
+ other_nodes.remove(node)
+ except ValueError:
+ self._logger.log("Node %s not in %r" % (node, self._env["nodes"]))
+
+ if len(other_nodes) == 0:
+ return
+
+ self.debug("Healing partition: %r" % partition)
+ self.debug("Everyone else: %r" % other_nodes)
+
+ for node in partition:
+ self._cm.unisolate_node(node, other_nodes)
+
+ def __call__(self, node):
+ """ Perform this test """
+
+ self.incr("calls")
+ self.passed = True
+ partitions = {}
+
+ if not self._startall(None):
+ return self.failure("Setup failed")
+
+ while True:
+ # Retry until we get multiple partitions
+ partitions = {}
+ p_max = len(self._env["nodes"])
+
+ for n in self._env["nodes"]:
+ p = self._env.random_gen.randint(1, p_max)
+
+ if p not in partitions:
+ partitions[p] = []
+
+ partitions[p].append(n)
+
+ p_max = len(partitions)
+ if p_max > 1:
+ break
+ # else, try again
+
+ self.debug("Created %d partitions" % p_max)
+ for (key, val) in partitions.items():
+ self.debug("Partition[%s]:\t%r" % (key, val))
+
+ # Disabling STONITH to reduce test complexity for now
+ self._rsh(node, "crm_attribute -V -n stonith-enabled -v false")
+
+ for val in partitions.values():
+ self._isolate_partition(val)
+
+ count = 30
+ while count > 0:
+ if len(self._cm.find_partitions()) != p_max:
+ time.sleep(10)
+ else:
+ break
+ else:
+ self.failure("Expected partitions were not created")
+
+ # Target number of partitions formed - wait for stability
+ if not self._cm.cluster_stable():
+ self.failure("Partitioned cluster not stable")
+
+ # Now audit the cluster state
+ self._cm.partitions_expected = p_max
+ if not self.audit():
+ self.failure("Audits failed")
+
+ self._cm.partitions_expected = 1
+
+ # And heal them again
+ for val in partitions.values():
+ self._heal_partition(val)
+
+ # Wait for a single partition to form
+ count = 30
+ while count > 0:
+ if len(self._cm.find_partitions()) != 1:
+ time.sleep(10)
+ count -= 1
+ else:
+ break
+ else:
+ self.failure("Cluster did not reform")
+
+ # Wait for it to have the right number of members
+ count = 30
+ while count > 0:
+ members = []
+
+ partitions = self._cm.find_partitions()
+ if partitions:
+ members = partitions[0].split()
+
+ if len(members) != len(self._env["nodes"]):
+ time.sleep(10)
+ count -= 1
+ else:
+ break
+ else:
+ self.failure("Cluster did not completely reform")
+
+ # Wait up to 20 minutes - the delay is more preferable than
+ # trying to continue with in a messed up state
+ if not self._cm.cluster_stable(1200):
+ self.failure("Reformed cluster not stable")
+
+ if not should_continue(self._env):
+ raise ValueError("Reformed cluster not stable")
+
+ # Turn fencing back on
+ if self._env["DoFencing"]:
+ self._rsh(node, "crm_attribute -V -D -n stonith-enabled")
+
+ self._cm.cluster_stable()
+
+ if self.passed:
+ return self.success()
+
+ return self.failure("See previous errors")
+
+ @property
+ def errors_to_ignore(self):
+ """ Return list of errors which should be ignored """
+
+ return [
+ r"Another DC detected:",
+ r"(ERROR|error).*: .*Application of an update diff failed",
+ r"pacemaker-controld.*:.*not in our membership list",
+ r"CRIT:.*node.*returning after partition"
+ ]
+
+ def is_applicable(self):
+ """ Return True if this test is applicable in the current test configuration. """
+
+ if not CTSTest.is_applicable(self):
+ return False
+
+ return len(self._env["nodes"]) > 2
diff --git a/python/pacemaker/_cts/tests/standbytest.py b/python/pacemaker/_cts/tests/standbytest.py
new file mode 100644
index 0000000..a9ce8ec
--- /dev/null
+++ b/python/pacemaker/_cts/tests/standbytest.py
@@ -0,0 +1,110 @@
+""" Put a node into standby mode and check that resources migrate """
+
+__all__ = ["StandbyTest"]
+__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
+
+from pacemaker._cts.tests.ctstest import CTSTest
+from pacemaker._cts.tests.simulstartlite import SimulStartLite
+from pacemaker._cts.tests.starttest import StartTest
+
+# Disable various pylint warnings that occur in so many places throughout this
+# file it's easiest to just take care of them globally. This does introduce the
+# possibility that we'll miss some other cause of the same warning, but we'll
+# just have to be careful.
+
+# pylint doesn't understand that self._env is subscriptable.
+# pylint: disable=unsubscriptable-object
+
+
+class StandbyTest(CTSTest):
+ """ A concrete tests that puts a node into standby and checks that resources
+ migrate away from the node
+ """
+
+ def __init__(self, cm):
+ """ Create a new StandbyTest instance
+
+ Arguments:
+
+ cm -- A ClusterManager instance
+ """
+
+ CTSTest.__init__(self, cm)
+
+ self.benchmark = True
+ self.name = "Standby"
+
+ self._start = StartTest(cm)
+ self._startall = SimulStartLite(cm)
+
+ # make sure the node is active
+ # set the node to standby mode
+ # check resources, none resource should be running on the node
+ # set the node to active mode
+ # check resources, resources should have been migrated back (SHOULD THEY?)
+
+ def __call__(self, node):
+ """ Perform this test """
+
+ self.incr("calls")
+ ret = self._startall(None)
+ if not ret:
+ return self.failure("Start all nodes failed")
+
+ self.debug("Make sure node %s is active" % node)
+ if self._cm.in_standby_mode(node):
+ if not self._cm.set_standby_mode(node, False):
+ return self.failure("can't set node %s to active mode" % node)
+
+ self._cm.cluster_stable()
+
+ if self._cm.in_standby_mode(node):
+ return self.failure("standby status of %s is [on] but we expect [off]" % node)
+
+ watchpats = [
+ r"State transition .* -> S_POLICY_ENGINE",
+ ]
+ watch = self.create_watch(watchpats, self._env["DeadTime"]+10)
+ watch.set_watch()
+
+ self.debug("Setting node %s to standby mode" % node)
+ if not self._cm.set_standby_mode(node, True):
+ return self.failure("can't set node %s to standby mode" % node)
+
+ self.set_timer("on")
+
+ ret = watch.look_for_all()
+ if not ret:
+ self._logger.log("Patterns not found: %r" % watch.unmatched)
+ self._cm.set_standby_mode(node, False)
+ return self.failure("cluster didn't react to standby change on %s" % node)
+
+ self._cm.cluster_stable()
+
+ if not self._cm.in_standby_mode(node):
+ return self.failure("standby status of %s is [off] but we expect [on]" % node)
+
+ self.log_timer("on")
+
+ self.debug("Checking resources")
+ rscs_on_node = self._cm.active_resources(node)
+ if rscs_on_node:
+ rc = self.failure("%s set to standby, %r is still running on it" % (node, rscs_on_node))
+ self.debug("Setting node %s to active mode" % node)
+ self._cm.set_standby_mode(node, False)
+ return rc
+
+ self.debug("Setting node %s to active mode" % node)
+ if not self._cm.set_standby_mode(node, False):
+ return self.failure("can't set node %s to active mode" % node)
+
+ self.set_timer("off")
+ self._cm.cluster_stable()
+
+ if self._cm.in_standby_mode(node):
+ return self.failure("standby status of %s is [on] but we expect [off]" % node)
+
+ self.log_timer("off")
+
+ return self.success()
diff --git a/python/pacemaker/_cts/tests/startonebyone.py b/python/pacemaker/_cts/tests/startonebyone.py
new file mode 100644
index 0000000..6a01097
--- /dev/null
+++ b/python/pacemaker/_cts/tests/startonebyone.py
@@ -0,0 +1,55 @@
+""" Start all stopped nodes serially """
+
+__all__ = ["StartOnebyOne"]
+__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
+
+from pacemaker._cts.tests.ctstest import CTSTest
+from pacemaker._cts.tests.simulstoplite import SimulStopLite
+from pacemaker._cts.tests.starttest import StartTest
+
+# Disable various pylint warnings that occur in so many places throughout this
+# file it's easiest to just take care of them globally. This does introduce the
+# possibility that we'll miss some other cause of the same warning, but we'll
+# just have to be careful.
+
+# pylint doesn't understand that self._env is subscriptable.
+# pylint: disable=unsubscriptable-object
+
+
+class StartOnebyOne(CTSTest):
+ """ A concrete test that starts all stopped nodes serially """
+
+ def __init__(self, cm):
+ """ Create a new StartOnebyOne instance
+
+ Arguments:
+
+ cm -- A ClusterManager instance
+ """
+
+ CTSTest.__init__(self, cm)
+ self.name = "StartOnebyOne"
+
+ self._start = StartTest(cm)
+ self._stopall = SimulStopLite(cm)
+
+ def __call__(self, dummy):
+ """ Perform this test """
+
+ self.incr("calls")
+
+ ret = self._stopall(None)
+ if not ret:
+ return self.failure("Test setup failed")
+
+ failed = []
+ self.set_timer()
+ for node in self._env["nodes"]:
+ if not self._start(node):
+ failed.append(node)
+
+ if failed:
+ return self.failure("Some node failed to start: %r" % failed)
+
+ return self.success()
diff --git a/python/pacemaker/_cts/tests/starttest.py b/python/pacemaker/_cts/tests/starttest.py
new file mode 100644
index 0000000..6387511
--- /dev/null
+++ b/python/pacemaker/_cts/tests/starttest.py
@@ -0,0 +1,54 @@
+""" Start the cluster manager on a given node """
+
+__all__ = ["StartTest"]
+__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
+
+from pacemaker._cts.tests.ctstest import CTSTest
+
+# Disable various pylint warnings that occur in so many places throughout this
+# file it's easiest to just take care of them globally. This does introduce the
+# possibility that we'll miss some other cause of the same warning, but we'll
+# just have to be careful.
+
+# pylint doesn't understand that self._env is subscriptable.
+# pylint: disable=unsubscriptable-object
+
+
+class StartTest(CTSTest):
+ """ A pseudo-test that is only used to set up conditions before running
+ some other test. This class starts the cluster manager on a given
+ node.
+
+ Other test classes should not use this one as a superclass.
+ """
+
+ def __init__(self, cm):
+ """ Create a new StartTest instance
+
+ Arguments:
+
+ cm -- A ClusterManager instance
+ """
+
+ CTSTest.__init__(self, cm)
+ self.name = "Start"
+
+ def __call__(self, node):
+ """ Start the given node, returning whether this succeeded or not """
+
+ self.incr("calls")
+
+ if self._cm.upcount() == 0:
+ self.incr("us")
+ else:
+ self.incr("them")
+
+ if self._cm.expected_status[node] != "down":
+ return self.skipped()
+
+ if self._cm.start_cm(node):
+ return self.success()
+
+ return self.failure("Startup %s on node %s failed"
+ % (self._env["Name"], node))
diff --git a/python/pacemaker/_cts/tests/stonithdtest.py b/python/pacemaker/_cts/tests/stonithdtest.py
new file mode 100644
index 0000000..0dce291
--- /dev/null
+++ b/python/pacemaker/_cts/tests/stonithdtest.py
@@ -0,0 +1,145 @@
+""" Fence a running node and wait for it to restart """
+
+__all__ = ["StonithdTest"]
+__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
+
+from pacemaker.exitstatus import ExitStatus
+from pacemaker._cts.tests.ctstest import CTSTest
+from pacemaker._cts.tests.simulstartlite import SimulStartLite
+from pacemaker._cts.timer import Timer
+
+# Disable various pylint warnings that occur in so many places throughout this
+# file it's easiest to just take care of them globally. This does introduce the
+# possibility that we'll miss some other cause of the same warning, but we'll
+# just have to be careful.
+
+# pylint doesn't understand that self._rsh is callable.
+# pylint: disable=not-callable
+# pylint doesn't understand that self._env is subscriptable.
+# pylint: disable=unsubscriptable-object
+
+
+class StonithdTest(CTSTest):
+ """ A concrete test that fences a running node and waits for it to restart """
+
+ def __init__(self, cm):
+ """ Create a new StonithdTest instance
+
+ Arguments:
+
+ cm -- A ClusterManager instance
+ """
+
+ CTSTest.__init__(self, cm)
+ self.benchmark = True
+ self.name = "Stonithd"
+
+ self._startall = SimulStartLite(cm)
+
+ def __call__(self, node):
+ """ Perform this test """
+
+ self.incr("calls")
+ if len(self._env["nodes"]) < 2:
+ return self.skipped()
+
+ ret = self._startall(None)
+ if not ret:
+ return self.failure("Setup failed")
+
+ watchpats = [
+ self.templates["Pat:Fencing_ok"] % node,
+ self.templates["Pat:NodeFenced"] % node,
+ ]
+
+ if not self._env["at-boot"]:
+ self.debug("Expecting %s to stay down" % node)
+ self._cm.expected_status[node] = "down"
+ else:
+ self.debug("Expecting %s to come up again %d" % (node, self._env["at-boot"]))
+ watchpats.extend([
+ "%s.* S_STARTING -> S_PENDING" % node,
+ "%s.* S_PENDING -> S_NOT_DC" % node,
+ ])
+
+ watch = self.create_watch(watchpats, 30 + self._env["DeadTime"] + self._env["StableTime"] + self._env["StartTime"])
+ watch.set_watch()
+
+ origin = self._env.random_gen.choice(self._env["nodes"])
+
+ (rc, _) = self._rsh(origin, "stonith_admin --reboot %s -VVVVVV" % node)
+
+ if rc == ExitStatus.TIMEOUT:
+ # Look for the patterns, usually this means the required
+ # device was running on the node to be fenced - or that
+ # the required devices were in the process of being loaded
+ # and/or moved
+ #
+ # Effectively the node committed suicide so there will be
+ # no confirmation, but pacemaker should be watching and
+ # fence the node again
+
+ self._logger.log("Fencing command on %s to fence %s timed out" % (origin, node))
+
+ elif origin != node and rc != 0:
+ self.debug("Waiting for the cluster to recover")
+ self._cm.cluster_stable()
+
+ self.debug("Waiting for fenced node to come back up")
+ self._cm.ns.wait_for_all_nodes(self._env["nodes"], 600)
+
+ self._logger.log("Fencing command on %s failed to fence %s (rc=%d)" % (origin, node, rc))
+
+ elif origin == node and rc != 255:
+ # 255 == broken pipe, ie. the node was fenced as expected
+ self._logger.log("Locally originated fencing returned %d" % rc)
+
+ with Timer(self._logger, self.name, "fence"):
+ matched = watch.look_for_all()
+
+ self.set_timer("reform")
+ if watch.unmatched:
+ self._logger.log("Patterns not found: %r" % watch.unmatched)
+
+ self.debug("Waiting for the cluster to recover")
+ self._cm.cluster_stable()
+
+ self.debug("Waiting for fenced node to come back up")
+ self._cm.ns.wait_for_all_nodes(self._env["nodes"], 600)
+
+ self.debug("Waiting for the cluster to re-stabilize with all nodes")
+ is_stable = self._cm.cluster_stable(self._env["StartTime"])
+
+ if not matched:
+ return self.failure("Didn't find all expected patterns")
+
+ if not is_stable:
+ return self.failure("Cluster did not become stable")
+
+ self.log_timer("reform")
+ return self.success()
+
+ @property
+ def errors_to_ignore(self):
+ """ Return list of errors which should be ignored """
+
+ return [
+ self.templates["Pat:Fencing_start"] % ".*",
+ self.templates["Pat:Fencing_ok"] % ".*",
+ self.templates["Pat:Fencing_active"],
+ r"error.*: Operation 'reboot' targeting .* by .* for stonith_admin.*: Timer expired"
+ ]
+
+ def is_applicable(self):
+ """ Return True if this test is applicable in the current test configuration. """
+
+ if not CTSTest.is_applicable(self):
+ return False
+
+ # pylint gets confused because of EnvFactory here.
+ # pylint: disable=unsupported-membership-test
+ if "DoFencing" in self._env:
+ return self._env["DoFencing"]
+
+ return True
diff --git a/python/pacemaker/_cts/tests/stoponebyone.py b/python/pacemaker/_cts/tests/stoponebyone.py
new file mode 100644
index 0000000..d75d282
--- /dev/null
+++ b/python/pacemaker/_cts/tests/stoponebyone.py
@@ -0,0 +1,56 @@
+""" Stop all running nodes serially """
+
+__all__ = ["StopOnebyOne"]
+__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
+
+from pacemaker._cts.tests.ctstest import CTSTest
+from pacemaker._cts.tests.simulstartlite import SimulStartLite
+from pacemaker._cts.tests.stoptest import StopTest
+
+# Disable various pylint warnings that occur in so many places throughout this
+# file it's easiest to just take care of them globally. This does introduce the
+# possibility that we'll miss some other cause of the same warning, but we'll
+# just have to be careful.
+
+# pylint doesn't understand that self._env is subscriptable.
+# pylint: disable=unsubscriptable-object
+
+
+class StopOnebyOne(CTSTest):
+ """ A concrete test that stops all running nodes serially """
+
+ def __init__(self, cm):
+ """ Create a new StartOnebyOne instance
+
+ Arguments:
+
+ cm -- A ClusterManager instance
+ """
+
+ CTSTest.__init__(self, cm)
+
+ self.name = "StopOnebyOne"
+
+ self._startall = SimulStartLite(cm)
+ self._stop = StopTest(cm)
+
+ def __call__(self, dummy):
+ """ Perform this test """
+
+ self.incr("calls")
+
+ ret = self._startall(None)
+ if not ret:
+ return self.failure("Setup failed")
+
+ failed = []
+ self.set_timer()
+ for node in self._env["nodes"]:
+ if not self._stop(node):
+ failed.append(node)
+
+ if failed:
+ return self.failure("Some node failed to stop: %r" % failed)
+
+ return self.success()
diff --git a/python/pacemaker/_cts/tests/stoptest.py b/python/pacemaker/_cts/tests/stoptest.py
new file mode 100644
index 0000000..8f496d3
--- /dev/null
+++ b/python/pacemaker/_cts/tests/stoptest.py
@@ -0,0 +1,99 @@
+""" Stop the cluster manager on a given node """
+
+__all__ = ["StopTest"]
+__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
+
+from pacemaker._cts.tests.ctstest import CTSTest
+
+# Disable various pylint warnings that occur in so many places throughout this
+# file it's easiest to just take care of them globally. This does introduce the
+# possibility that we'll miss some other cause of the same warning, but we'll
+# just have to be careful.
+
+# pylint doesn't understand that self._rsh is callable.
+# pylint: disable=not-callable
+# pylint doesn't understand that self._env is subscriptable.
+# pylint: disable=unsubscriptable-object
+
+
+class StopTest(CTSTest):
+ """ A pseudo-test that is only used to set up conditions before running
+ some other test. This class stops the cluster manager on a given
+ node.
+
+ Other test classes should not use this one as a superclass.
+ """
+
+ def __init__(self, cm):
+ """ Create a new StopTest instance
+
+ Arguments:
+
+ cm -- A ClusterManager instance
+ """
+
+ CTSTest.__init__(self, cm)
+ self.name = "Stop"
+
+ def __call__(self, node):
+ """ Stop the given node, returning whether this succeeded or not """
+
+ self.incr("calls")
+ if self._cm.expected_status[node] != "up":
+ return self.skipped()
+
+ # Technically we should always be able to notice ourselves stopping
+ patterns = [
+ self.templates["Pat:We_stopped"] % node,
+ ]
+
+ # Any active node needs to notice this one left
+ # (note that this won't work if we have multiple partitions)
+ for other in self._env["nodes"]:
+ if self._cm.expected_status[other] == "up" and other != node:
+ patterns.append(self.templates["Pat:They_stopped"] %(other, node))
+
+ watch = self.create_watch(patterns, self._env["DeadTime"])
+ watch.set_watch()
+
+ if node == self._cm.our_node:
+ self.incr("us")
+ else:
+ if self._cm.upcount() <= 1:
+ self.incr("all")
+ else:
+ self.incr("them")
+
+ self._cm.stop_cm(node)
+ watch.look_for_all()
+
+ failreason = None
+ unmatched_str = "||"
+
+ if watch.unmatched:
+ (_, output) = self._rsh(node, "/bin/ps axf", verbose=1)
+ for line in output:
+ self.debug(line)
+
+ (_, output) = self._rsh(node, "/usr/sbin/dlm_tool dump 2>/dev/null", verbose=1)
+ for line in output:
+ self.debug(line)
+
+ for regex in watch.unmatched:
+ self._logger.log("ERROR: Shutdown pattern not found: %s" % regex)
+ unmatched_str += "%s||" % regex
+ failreason = "Missing shutdown pattern"
+
+ self._cm.cluster_stable(self._env["DeadTime"])
+
+ if not watch.unmatched or self._cm.upcount() == 0:
+ return self.success()
+
+ if len(watch.unmatched) >= self._cm.upcount():
+ return self.failure("no match against (%s)" % unmatched_str)
+
+ if failreason is None:
+ return self.success()
+
+ return self.failure(failreason)
diff --git a/python/pacemaker/_cts/timer.py b/python/pacemaker/_cts/timer.py
new file mode 100644
index 0000000..122b70b
--- /dev/null
+++ b/python/pacemaker/_cts/timer.py
@@ -0,0 +1,63 @@
+""" Timer-related utilities for CTS """
+
+__all__ = ["Timer"]
+__copyright__ = "Copyright 2000-2023 the Pacemaker project contributors"
+__license__ = "GNU General Public License version 2 or later (GPLv2+) WITHOUT ANY WARRANTY"
+
+import time
+
+class Timer:
+ """ A class for measuring the runtime of some task. A Timer may be used
+ manually or as a context manager, like so:
+
+ with Timer(logger, "SomeTest", "SomeTimer"):
+ ...
+
+ A Timer runs from when start() is called until the timer is deleted
+ or reset() is called. There is no explicit stop method.
+ """
+
+ def __init__(self, logger, test_name, timer_name):
+ """ Create a new Timer instance.
+
+ Arguments:
+
+ logger -- A Logger instance that can be used to record when
+ the timer stopped
+ test_name -- The name of the test this timer is being run for
+ timer_name -- The name of this timer
+ """
+
+ self._logger = logger
+ self._start_time = None
+ self._test_name = test_name
+ self._timer_name = timer_name
+
+ def __enter__(self):
+ self.start()
+ return self
+
+ def __exit__(self, *args):
+ self._logger.debug("%s:%s runtime: %.2f" % (self._test_name, self._timer_name, self.elapsed))
+
+ def reset(self):
+ """ Restart the timer """
+
+ self.start()
+
+ def start(self):
+ """ Start the timer """
+
+ self._start_time = time.time()
+
+ @property
+ def start_time(self):
+ """ When did the timer start? """
+
+ return self._start_time
+
+ @property
+ def elapsed(self):
+ """ How long has the timer been running for? """
+
+ return time.time() - self._start_time
diff --git a/python/pacemaker/_cts/watcher.py b/python/pacemaker/_cts/watcher.py
index 3bdb892..3e6d702 100644
--- a/python/pacemaker/_cts/watcher.py
+++ b/python/pacemaker/_cts/watcher.py
@@ -13,7 +13,7 @@ from pacemaker.buildoptions import BuildOptions
from pacemaker._cts.logging import LogFactory
from pacemaker._cts.remote import RemoteFactory
-LOG_WATCHER_BIN = BuildOptions.DAEMON_DIR + "/cts-log-watcher"
+LOG_WATCHER_BIN = "%s/cts-log-watcher" % BuildOptions.DAEMON_DIR
@unique
class LogKind(Enum):
@@ -139,7 +139,7 @@ class FileObj(SearchObj):
if match:
self.offset = match.group(1)
- self.debug("Got %d lines, new offset: %s %s" % (len(out), self.offset, repr(self._delegate)))
+ self.debug("Got %d lines, new offset: %s %r" % (len(out), self.offset, self._delegate))
elif re.search(r"^CTSwatcher:.*truncated", line):
self.log(line)
elif re.search(r"^CTSwatcher:", line):
@@ -294,8 +294,8 @@ class JournalObj(SearchObj):
self.limit = lines[0].strip()
self.debug("Set limit to: %s" % self.limit)
else:
- self.debug("Unable to set limit for %s because date returned %d lines with status %d" % (self.host,
- len(lines), rc))
+ self.debug("Unable to set limit for %s because date returned %d lines with status %d"
+ % (self.host, len(lines), rc))
class LogWatcher:
""" A class for watching a single log file or journal across multiple hosts,
@@ -413,7 +413,7 @@ class LogWatcher:
for t in pending:
t.join(60.0)
if t.is_alive():
- self._logger.log("%s: Aborting after 20s waiting for %s logging commands" % (self.name, repr(t)))
+ self._logger.log("%s: Aborting after 20s waiting for %r logging commands" % (self.name, t))
return
def end(self):
diff --git a/python/pacemaker/buildoptions.py.in b/python/pacemaker/buildoptions.py.in
index 53b492b..17fe981 100644
--- a/python/pacemaker/buildoptions.py.in
+++ b/python/pacemaker/buildoptions.py.in
@@ -22,6 +22,9 @@ class BuildOptions:
CIB_DIR = "@CRM_CONFIG_DIR@"
""" Where CIB files are stored """
+ CIB_SCHEMA_VERSION = "@CIB_VERSION@"
+ """ Latest supported CIB schema version number """
+
COROSYNC_CONFIG_FILE = "@PCMK__COROSYNC_CONF@"
""" Path to the corosync config file """
diff --git a/python/pylintrc b/python/pylintrc
index e65110b..f46eece 100644
--- a/python/pylintrc
+++ b/python/pylintrc
@@ -446,7 +446,8 @@ exclude-too-few-public-methods=
[CLASSES]
# List of method names used to declare (i.e. assign) instance attributes.
-defining-attr-methods=__init__,__new__,setUp,__post_init__
+# CHANGED: Remove setUp and __post_init__, add reset
+defining-attr-methods=__init__,__new__,reset
# List of valid names for the first argument in a class method.
valid-classmethod-first-arg=cls
diff --git a/python/setup.py.in b/python/setup.py.in
index c4083da..e9d61d0 100644
--- a/python/setup.py.in
+++ b/python/setup.py.in
@@ -16,5 +16,5 @@ setup(name='pacemaker',
license='LGPLv2.1+',
url='https://clusterlabs.org/pacemaker/',
description='Python libraries for Pacemaker',
- packages=['pacemaker', 'pacemaker._cts'],
+ packages=['pacemaker', 'pacemaker._cts', 'pacemaker._cts.tests'],
)
diff --git a/python/tests/Makefile.am b/python/tests/Makefile.am
index 490b272..219812c 100644
--- a/python/tests/Makefile.am
+++ b/python/tests/Makefile.am
@@ -9,4 +9,5 @@
MAINTAINERCLEANFILES = Makefile.in
-EXTRA_DIST = $(wildcard test_*)
+EXTRA_DIST = $(wildcard test_*) \
+ __init__.py
diff --git a/python/tests/__init__.py b/python/tests/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/python/tests/__init__.py
diff --git a/python/tests/test_cts_network.py b/python/tests/test_cts_network.py
new file mode 100644
index 0000000..4aea8b9
--- /dev/null
+++ b/python/tests/test_cts_network.py
@@ -0,0 +1,37 @@
+# These warnings are not useful in unit tests.
+# pylint: disable=missing-class-docstring,missing-function-docstring,missing-module-docstring
+
+__copyright__ = "Copyright 2023 the Pacemaker project contributors"
+__license__ = "GPLv2+"
+
+import unittest
+
+from pacemaker._cts.network import next_ip
+
+# next_ip makes a bunch of assumptions that we are not going to test here:
+#
+# * The env argument actually contains an "IPBase" key with a string in it
+# * The string is a properly formatted IPv4 or IPv6 address, with no extra
+# leading or trailing whitespace
+
+class NextIPTestCase(unittest.TestCase):
+ def test_ipv4(self):
+ # The first time next_ip is called, it will read the IPBase out of
+ # the environment. After that, it just goes off whatever it
+ # previously returned, so the environment value doesn't matter.
+ self.assertEqual(next_ip("1.1.1.1"), "1.1.1.2")
+ self.assertEqual(next_ip(), "1.1.1.3")
+
+ # Passing reset=True will force it to re-read the environment. Here,
+ # we use that to ask it for a value outside of the available range.
+ self.assertRaises(ValueError, next_ip, "1.1.1.255", reset=True)
+
+ def test_ipv6(self):
+ # Same comments as for the test_ipv4 function, plus we need to reset
+ # here because otherwise it will remember what happened in that function.
+ self.assertEqual(next_ip("fe80::fc54:ff:fe09:101", reset=True),
+ "fe80::fc54:ff:fe09:102")
+ self.assertEqual(next_ip(),
+ "fe80::fc54:ff:fe09:103")
+
+ self.assertRaises(ValueError, next_ip, "fe80::fc54:ff:fe09:ffff", reset=True)
diff --git a/replace/Makefile.am b/replace/Makefile.am
deleted file mode 100644
index 1236b66..0000000
--- a/replace/Makefile.am
+++ /dev/null
@@ -1,28 +0,0 @@
-#
-#
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License
-# as published by the Free Software Foundation; either version 2
-# of the License, or (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
-#
-
-MAINTAINERCLEANFILES = Makefile.in
-
-AM_CPPFLAGS = -I$(top_srcdir)/include -I$(top_builddir)/include
-
-QUIET_LIBTOOL_OPTS = @QUIET_LIBTOOL_OPTS@
-LIBTOOL = @LIBTOOL@ @QUIET_LIBTOOL_OPTS@
-
-
-noinst_LTLIBRARIES = libreplace.la
-libreplace_la_SOURCES =
-libreplace_la_LIBADD = @LTLIBOBJS@
diff --git a/replace/NoSuchFunctionName.c b/replace/NoSuchFunctionName.c
deleted file mode 100644
index 2964f50..0000000
--- a/replace/NoSuchFunctionName.c
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Copyright (C) 2002 Alan Robertson <alanr@unix.sh>
- * This software licensed under the GNU LGPL.
- *
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
- */
-void nosuchfunctionname(void);
-
-/*
- * This is a completely useless function put here only to make OpenBSD make
- * procedures happy. I hope no one ever makes such a function ;-)
- */
-void
-nosuchfunctionname(void)
-{
- return;
-}
diff --git a/replace/alphasort.c b/replace/alphasort.c
deleted file mode 100644
index f09f426..0000000
--- a/replace/alphasort.c
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- *
- * alphasort - replacement for alphasort functions.
- *
- * Matt Soffen
-
- * Copyright (C) 2001 Matt Soffen <matt@soffen.com>
- *
- * Taken from the FreeBSD file (with copyright notice)
- * /usr/src/gnu/lib/libdialog/dir.c
- ***************************************************************************
- * Program: dir.c
- * Author: Marc van Kempen
- * desc: Directory routines, sorting and reading
- *
- * Copyright (c) 1995, Marc van Kempen
- *
- * All rights reserved.
- *
- * This software may be used, modified, copied, distributed, and
- * sold, in both source and binary form provided that the above
- * copyright and these terms are retained, verbatim, as the first
- * lines of this file. Under no circumstances is the author
- * responsible for the proper functioning of this software, nor does
- * the author assume any responsibility for damages incurred with
- * its use.
- *
- ***************************************************************************
- */
-
-#include <crm_internal.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-
-#include <unistd.h> /* XXX for _POSIX_VERSION ifdefs */
-
-#if HAVE_STRINGS_H
-# include <strings.h>
-#endif
-
-#if !defined sgi && !defined _POSIX_VERSION
-# include <sys/dir.h>
-#endif
-
-#include <sys/types.h>
-#include <dirent.h>
-#include <stdlib.h>
-#include <stddef.h>
-
-int
-alphasort(const void *dirent1, const void *dirent2)
-{
- return (strcmp((*(const struct dirent **)dirent1)->d_name,
- (*(const struct dirent **)dirent2)->d_name));
-}
diff --git a/replace/scandir.c b/replace/scandir.c
deleted file mode 100644
index 71ac64e..0000000
--- a/replace/scandir.c
+++ /dev/null
@@ -1,233 +0,0 @@
-/* scandir: Scan a directory, collecting all (selected) items into a an array.
- *
- * This code borrowed from 'libit', which can be found here:
- *
- * http://www.iro.umontreal.ca/~pinard/libit/dist/scandir/
- *
- * The original author put this code in the public domain.
- * It has been modified slightly to get rid of warnings, etc.
- *
- * Below is the email I received from pinard@iro.umontreal.ca (François Pinard)
- * when I sent him an email asking him about the license, etc. of this
- * code which I obtained from his site.
- *
- * I think the correct spelling of his name is Rich Salz. I think he's now
- * rsalz@datapower.com...
- * --
- * Rich Salz, Chief Security Architect
- * DataPower Technology http://www.datapower.com
- * XS40 XML Security Gateway http://www.datapower.com/products/xs40.html
- *
- * Copyright(C): none (public domain)
- * License: none (public domain)
- * Author: Rich Salz <rsalz@datapower.com>
- *
- *
- *
- * -- Alan Robertson
- * alanr@unix.sh
- *
- **************************************************************************
- *
- * Subject: Re: Scandir replacement function
- * Date: 18 May 2001 12:00:48 -0400
- * From: pinard@iro.umontreal.ca (François Pinard)
- * To: Alan Robertson <alanr@unix.sh>
- * References: 1
- *
- *
- * [Alan Robertson]
- *
- * > Hi, I'd like to use your scandir replacement function found here:
- * > http://www.iro.umontreal.ca/~pinard/libit/dist/scandir/ But, it does
- * > not indicate authorship or licensing terms in it. Could you tell me
- * > who wrote this code, under what license you distribute it, and whether
- * > and under what terms I may further distribute it?
- *
- * Hello, Alan. These are (somewhat) explained in UNSHAR.HDR found in the
- * same directory. The routines have been written by Rick Saltz (I'm not
- * completely sure of the spelling) a long while ago. I think that nowadays,
- * Rick is better known as the main author of the nice INN package.
- *
- **************************************************************************
- *
- * I spent a little time verifying this with Rick Salz.
- * The results are below:
- *
- **************************************************************************
- *
- * Date: Tue, 20 Sep 2005 21:52:09 -0400 (EDT)
- * From: Rich Salz <rsalz@datapower.com>
- * To: Alan Robertson <alanr@unix.sh>
- * Subject: Re: Verifying permissions/licenses/etc on some old code of yours -
- * scandir.c
- * In-Reply-To: <433071CA.8000107@unix.sh>
- * Message-ID: <Pine.LNX.4.44L0.0509202151270.9198-100000@smtp.datapower.com>
- * Content-Type: TEXT/PLAIN; charset=US-ASCII
- *
- * yes, it's most definitely in the public domain.
- *
- * I'm glad you find it useful. I'm surprised it hasn't been replaced by,
- * e.g,. something in GLibC. Ii'm impressed you tracked me down.
- *
- * /r$
- *
- * --
- * Rich Salz Chief Security Architect
- * DataPower Technology http://www.datapower.com
- * XS40 XML Security Gateway http://www.datapower.com/products/xs40.html
- * ---------------------------------------------------------------------->
- * Subject: scandir, ftw REDUX
- * Date: 1 Jan 88 00:47:01 GMT
- * From: rsalz@pebbles.bbn.com
- * Newsgroups: comp.sources.misc
- *
- *
- * Forget my previous message -- I just decided for completeness's sake to
- * implement the SysV ftw(3) routine, too.
- *
- * To repeat, these are public-domain implementations of the SystemV ftw()
- * routine, the BSD scandir() and alphasort() routines, and documentation for
- * same. The FTW manpage could be more readable, but so it goes.
- *
- * Anyhow, feel free to post these, and incorporate them into your existing
- * packages. I have readdir() routiens for MSDOS and the Amiga if anyone
- * wants them, and should have them for VMS by the end of January; let me
- * know if you want copies.
- *
- * Yours in filesystems,
- * /r$
- *
- * Anyhow, feel free to post
- * ----------------------------------------------------------------------<
- *
- */
-
-#include <crm_internal.h>
-#include <sys/types.h>
-#include <dirent.h>
-#include <stdlib.h>
-#include <stddef.h>
-#include <string.h>
-
-#ifndef NULL
-# define NULL ((void *) 0)
-#endif
-
-/* Initial guess at directory allocated. */
-#define INITIAL_ALLOCATION 20
-
-int
-
-
-scandir(const char *directory_name,
- struct dirent ***array_pointer, int (*select_function) (const struct dirent *),
-#ifdef USE_SCANDIR_COMPARE_STRUCT_DIRENT
- /* This is what the Linux man page says */
- int (*compare_function) (const struct dirent **, const struct dirent **)
-#else
- /* This is what the Linux header file says ... */
- int (*compare_function) (const void *, const void *)
-#endif
- );
-
-int
-scandir(const char *directory_name,
- struct dirent ***array_pointer, int (*select_function) (const struct dirent *),
-#ifdef USE_SCANDIR_COMPARE_STRUCT_DIRENT
- /* This is what the linux man page says */
- int (*compare_function) (const struct dirent **, const struct dirent **)
-#else
- /* This is what the linux header file says ... */
- int (*compare_function) (const void *, const void *)
-#endif
- )
-{
- DIR *directory;
- struct dirent **array;
- struct dirent *entry;
- struct dirent *copy;
- int allocated = INITIAL_ALLOCATION;
- int counter = 0;
-
- /* Get initial list space and open directory. */
-
- if ((directory = opendir(directory_name)) == NULL) {
- return -1;
- }
-
- if ((array = (struct dirent **)malloc(allocated * sizeof(struct dirent *)))
- == NULL) {
- closedir(directory);
- return -1;
- }
-
- /* Read entries in the directory. */
-
- while (entry = readdir(directory), entry)
- if (select_function == NULL || (*select_function) (entry)) {
- /* User wants them all, or he wants this one. Copy the entry. */
-
- /*
- * On some OSes the declaration of "entry->d_name" is a minimal-length
- * placeholder. Example: Solaris:
- * /usr/include/sys/dirent.h:
- * "char d_name[1];"
- * man page "dirent(3)":
- * The field d_name is the beginning of the character array
- * giving the name of the directory entry. This name is
- * null terminated and may have at most MAXNAMLEN chars.
- * So our malloc length may need to be increased accordingly.
- * sizeof(entry->d_name): space (possibly minimal) in struct.
- * strlen(entry->d_name): actual length of the entry.
- *
- * John Kavadias <john_kavadias@hotmail.com>
- * David Lee <t.d.lee@durham.ac.uk>
- */
- int namelength = strlen(entry->d_name) + 1; /* length with NULL */
- int extra = 0;
-
- if (sizeof(entry->d_name) <= namelength) {
- /* allocated space <= required space */
- extra += namelength - sizeof(entry->d_name);
- }
-
- if ((copy = (struct dirent *)malloc(sizeof(struct dirent) + extra)) == NULL) {
- closedir(directory);
- free(array);
- return -1;
- }
- copy->d_ino = entry->d_ino;
- copy->d_reclen = entry->d_reclen;
- strcpy(copy->d_name, entry->d_name);
-
- /* Save the copy. */
-
- if (counter + 1 == allocated) {
- allocated <<= 1;
- array = pcmk__realloc((char *)array,
- allocated * sizeof(struct dirent *));
- if (array == NULL) {
- closedir(directory);
- free(array);
- free(copy);
- return -1;
- }
- }
- array[counter++] = copy;
- }
-
- /* Close things off. */
-
- array[counter] = NULL;
- *array_pointer = array;
- closedir(directory);
-
- /* Sort? */
-
- if (counter > 1 && compare_function)
- qsort((char *)array, counter, sizeof(struct dirent *)
- , (int (*)(const void *, const void *))(compare_function));
-
- return counter;
-}
diff --git a/replace/strchrnul.c b/replace/strchrnul.c
deleted file mode 100644
index d1be6df..0000000
--- a/replace/strchrnul.c
+++ /dev/null
@@ -1,15 +0,0 @@
-#include <crm_internal.h>
-/* Borrowed from gnulib's strchrnul.c under GLPv2+ */
-
-#include <string.h>
-/* Find the first occurrence of C in S or the final NUL byte. */
-char *
-strchrnul(const char *s, int c_in)
-{
- char c = c_in;
-
- while (*s && (*s != c))
- s++;
-
- return (char *)s;
-}
diff --git a/replace/strerror.c b/replace/strerror.c
deleted file mode 100644
index bb1b25a..0000000
--- a/replace/strerror.c
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright (C) 2002 Alan Robertson <alanr@unix.sh>
- * This software licensed under the GNU LGPL.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of version 2.1 of the GNU Lesser General Public
- * License as published by the Free Software Foundation.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
- */
-#include <crm_internal.h>
-#include <errno.h>
-#include <stdio.h>
-extern const char *sys_err[];
-extern int sys_nerr;
-char *
-strerror(int errnum)
-{
- static char whaterr[32];
-
- if (errnum < 0) {
- return "negative errno";
- }
- if (errnum >= sys_nerr) {
- snprintf(whaterr, sizeof(whaterr), "error %d", errnum);
- return whaterr;
- }
- return sys_err[sys_nerr];
-}
diff --git a/replace/strndup.c b/replace/strndup.c
deleted file mode 100644
index 9fe02cc..0000000
--- a/replace/strndup.c
+++ /dev/null
@@ -1,38 +0,0 @@
-#include <crm_internal.h>
-#include <stdlib.h>
-#include <string.h>
-/*
- * Copyright (C) 2004 Matt Soffen <sirgeek-ha@mrsucko.org>
- * This software licensed under the GNU LGPL.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
- */
-
-/* Taken from the GlibC implementation of strndup */
-
-char *
-strndup(const char *str, size_t len)
-{
- size_t n = strnlen(str, len);
- char *new = (char *)malloc(len + 1);
-
- if (NULL == new) {
- return NULL;
- }
-
- new[n] = '\0';
- return (char *)memcpy(new, str, len);
-}
diff --git a/replace/strnlen.c b/replace/strnlen.c
deleted file mode 100644
index 7f4250c..0000000
--- a/replace/strnlen.c
+++ /dev/null
@@ -1,31 +0,0 @@
-#include <crm_internal.h>
-#include <string.h>
-/*
- * Copyright (C) 2003 Alan Robertson <alanr@unix.sh>
- * This software licensed under the GNU LGPL.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
- */
-
-size_t
-strnlen(const char *s, size_t maxlen)
-{
- const char *eospos;
-
- eospos = memchr(s, (int)'\0', maxlen);
-
- return (eospos == NULL ? maxlen : (size_t) (eospos - s));
-}
diff --git a/rpm/Makefile.am b/rpm/Makefile.am
index c7975e4..2388ad6 100644
--- a/rpm/Makefile.am
+++ b/rpm/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2003-2022 the Pacemaker project contributors
+# Copyright 2003-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -12,7 +12,7 @@
# used in this file.
top_srcdir ?= ..
abs_srcdir ?= $(shell pwd)
-abs_builddir ?= $(abs_srcdir)
+abs_builddir ?= $(abs_srcdir)
MAKE ?= make
PACKAGE ?= pacemaker
AM_V_at ?= @
@@ -21,9 +21,13 @@ MKDIR_P ?= mkdir -p
include $(top_srcdir)/mk/common.mk
include $(top_srcdir)/mk/release.mk
-EXTRA_DIST = pacemaker.spec.in \
+EXTRA_DIST = pacemaker.spec.in \
rpmlintrc
+# Extra options to pass to rpmbuild (this can be used to override the location
+# options this file normally passes, or to override macros used by the spec)
+RPM_EXTRA ?=
+
# Where to put RPM artifacts; possible values:
#
# - subtree (default): RPM sources (i.e. TARFILE) in top-level build directory,
@@ -68,7 +72,7 @@ RPMTYPE = $(shell case "$(RPMDEST)" in \
esac)
RPM_SPEC_DIR = $(RPM_SPEC_DIR_$(RPMTYPE))
RPM_SRCRPM_DIR = $(RPM_SRCRPM_DIR_$(RPMTYPE))
-RPM_OPTS = $(RPM_OPTS_$(RPMTYPE))
+RPM_OPTS = $(RPM_OPTS_$(RPMTYPE)) $(RPM_EXTRA)
RPM_CLEAN = $(RPM_CLEAN_$(RPMTYPE))
WITH ?= --without doc
@@ -90,7 +94,7 @@ SPEC_COMMIT ?= $(shell \
Pacemaker-*|DIST$(rparen) \
echo '$(TAG)' ;; \
*$(rparen) \
- git log --pretty=format:%h -n 1 '$(TAG)';; \
+ "$(GIT)" log --pretty=format:%h -n 1 '$(TAG)';; \
esac)$(DIRTY_EXT)
SPEC_ABBREV = $(shell printf %s '$(SPEC_COMMIT)' | wc -c)
SPEC_RELEASE = $(shell case "$(WITH)" in \
@@ -115,6 +119,7 @@ TARFILE = $(abs_builddir)/../$(top_distdir).tar.gz
# Create a source distribution based on a git archive. (If we aren't in a git
# checkout, do a make dist instead.)
+.PHONY: export
export:
cd $(abs_srcdir)/..; \
if [ -z "$(CHECKOUT)" ] && [ -f "$(TARFILE)" ]; then \
@@ -123,35 +128,40 @@ export:
$(MAKE) $(AM_MAKEFLAGS) dist; \
echo "`date`: Rebuilt tarball: $(TARFILE)"; \
elif [ -n "$(DIRTY_EXT)" ]; then \
- git commit -m "DO-NOT-PUSH" -a; \
- git archive --prefix=$(top_distdir)/ -o "$(TARFILE)" HEAD^{tree}; \
- git reset --mixed HEAD^; \
+ "$(GIT)" commit -m "DO-NOT-PUSH" -a; \
+ "$(GIT)" archive --prefix=$(top_distdir)/ -o "$(TARFILE)" \
+ HEAD^{tree}; \
+ "$(GIT)" reset --mixed HEAD^; \
echo "`date`: Rebuilt $(TARFILE)"; \
elif [ -f "$(TARFILE)" ]; then \
echo "`date`: Using existing tarball: $(TARFILE)"; \
else \
- git archive --prefix=$(top_distdir)/ -o "$(TARFILE)" $(TAG)^{tree}; \
+ "$(GIT)" archive --prefix=$(top_distdir)/ -o "$(TARFILE)" \
+ $(TAG)^{tree}; \
echo "`date`: Rebuilt $(TARFILE)"; \
fi
# Depend on spec-clean so the spec gets rebuilt every time
$(RPM_SPEC_DIR)/$(PACKAGE).spec: spec-clean pacemaker.spec.in
$(AM_V_at)$(MKDIR_P) "$(RPM_SPEC_DIR)"
- $(AM_V_GEN)if [ x"`git ls-files -m pacemaker.spec.in 2>/dev/null`" != x ]; then \
- cat "$(abs_srcdir)/pacemaker.spec.in"; \
- elif git cat-file -e $(TAG):rpm/pacemaker.spec.in 2>/dev/null; then \
- git show $(TAG):rpm/pacemaker.spec.in; \
- elif git cat-file -e $(TAG):pacemaker.spec.in 2>/dev/null; then \
- git show $(TAG):pacemaker.spec.in; \
- else \
- cat "$(abs_srcdir)/pacemaker.spec.in"; \
- fi | sed \
- -e 's/^\(%global pcmkversion \).*/\1$(SPEC_RELEASE_NO)/' \
- -e 's/^\(%global specversion \).*/\1$(SPECVERSION)/' \
- -e 's/^\(%global commit \).*/\1$(SPEC_COMMIT)/' \
- -e 's/^\(%global commit_abbrev \).*/\1$(SPEC_ABBREV)/' \
- -e "s/PACKAGE_DATE/$$(date +'%a %b %d %Y')/" \
- -e 's/PACKAGE_VERSION/$(SPEC_RELEASE_NO)-$(SPECVERSION)/' \
+ $(AM_V_GEN)if [ x"`"$(GIT)" ls-files \
+ -m pacemaker.spec.in 2>/dev/null`" != x ]; then \
+ cat "$(abs_srcdir)/pacemaker.spec.in"; \
+ elif "$(GIT)" cat-file -e $(TAG):rpm/pacemaker.spec.in \
+ 2>/dev/null; then \
+ "$(GIT)" show $(TAG):rpm/pacemaker.spec.in; \
+ elif "$(GIT)" cat-file -e $(TAG):pacemaker.spec.in 2>/dev/null; \
+ then \
+ "$(GIT)" show $(TAG):pacemaker.spec.in; \
+ else \
+ cat "$(abs_srcdir)/pacemaker.spec.in"; \
+ fi | sed \
+ -e 's/^\(%global pcmkversion \).*/\1$(SPEC_RELEASE_NO)/' \
+ -e 's/^\(%global specversion \).*/\1$(SPECVERSION)/' \
+ -e 's/^\(%global commit \).*/\1$(SPEC_COMMIT)/' \
+ -e 's/^\(%global commit_abbrev \).*/\1$(SPEC_ABBREV)/' \
+ -e "s/PACKAGE_DATE/$$(date +'%a %b %d %Y')/" \
+ -e 's/PACKAGE_VERSION/$(SPEC_RELEASE_NO)-$(SPECVERSION)/' \
> "$@"
.PHONY: spec $(PACKAGE).spec
@@ -200,7 +210,7 @@ rc:
echo 'This target must be run from a git checkout'; \
exit 1; \
fi
- $(MAKE) $(AM_MAKEFLAGS) TAG="$$(git tag -l 2>/dev/null \
+ $(MAKE) $(AM_MAKEFLAGS) TAG="$$("$(GIT)" tag -l 2>/dev/null \
| sed -n -e 's/^\(Pacemaker-[0-9.]*-rc[0-9]*\)$$/\1/p' \
| sort -Vr | head -n 1)" rpm
@@ -251,6 +261,7 @@ mock-clean:
-rm -rf "$(MOCK_DIR)"
# Make debugging makefile issues easier
+.PHONY: vars
vars:
@echo "CHECKOUT=$(CHECKOUT)"
@echo "VERSION=$(VERSION)"
@@ -278,5 +289,6 @@ vars:
@echo "SPEC_RELEASE_NO=$(SPEC_RELEASE_NO)"
@echo "TARFILE=$(TARFILE)"
+.PHONY: clean-local
clean-local: mock-clean rpm-clean
-rm -f "$(TARFILE)"
diff --git a/rpm/pacemaker.spec.in b/rpm/pacemaker.spec.in
index 7fb27e4..c279f88 100644
--- a/rpm/pacemaker.spec.in
+++ b/rpm/pacemaker.spec.in
@@ -321,7 +321,7 @@ BuildRequires: sed
# Required for core functionality
BuildRequires: pkgconfig(glib-2.0) >= 2.42
-BuildRequires: libxml2-devel
+BuildRequires: libxml2-devel >= 2.6.0
BuildRequires: libxslt-devel
BuildRequires: libuuid-devel
BuildRequires: %{pkgname_bzip2_devel}
@@ -336,7 +336,7 @@ BuildRequires: pam-devel
BuildRequires: %{pkgname_gettext} >= 0.18
# Required for "make check"
-BuildRequires: libcmocka-devel
+BuildRequires: libcmocka-devel >= 1.1.0
%if %{systemd_native}
BuildRequires: pkgconfig(systemd)
@@ -486,7 +486,7 @@ Requires: libqb-devel%{?_isa}
Requires: %{?pkgname_libtool_devel_arch}
%endif
Requires: libuuid-devel%{?_isa}
-Requires: libxml2-devel%{?_isa}
+Requires: libxml2-devel%{?_isa} >= 2.6.0
Requires: libxslt-devel%{?_isa}
%description -n %{pkgname_pcmk_libs}-devel
@@ -633,14 +633,6 @@ mkdir -p ${RPM_BUILD_ROOT}%{_localstatedir}/lib/rpm-state/%{name}
# Don't package libtool archives
find %{buildroot} -name '*.la' -type f -print0 | xargs -0 rm -f
-# Byte-compile Python sources where suitable and the distro procedures known
-%if %{defined py_byte_compile}
-%{py_byte_compile %{python_path} %{buildroot}%{_datadir}/pacemaker/tests}
-%if !%{defined _python_bytecompile_extra}
-%{py_byte_compile %{python_path} %{buildroot}%{python_site}/cts}
-%endif
-%endif
-
%post
%if %{defined _unitdir}
%systemd_post pacemaker.service
@@ -772,7 +764,6 @@ exit 0
%exclude %{_sbindir}/pacemaker_remoted
%{_libexecdir}/pacemaker/*
-%{_sbindir}/crm_master
%if %{with stonithd}
%{_sbindir}/fence_legacy
%endif
@@ -784,7 +775,6 @@ exit 0
%doc %{_mandir}/man7/ocf_pacemaker_controld.*
%doc %{_mandir}/man7/ocf_pacemaker_o2cb.*
%doc %{_mandir}/man7/ocf_pacemaker_remote.*
-%doc %{_mandir}/man8/crm_master.*
%if %{with stonithd}
%doc %{_mandir}/man8/fence_legacy.*
%endif
@@ -830,6 +820,7 @@ exit 0
%{_sbindir}/crm_diff
%{_sbindir}/crm_error
%{_sbindir}/crm_failcount
+%{_sbindir}/crm_master
%{_sbindir}/crm_mon
%{_sbindir}/crm_node
%{_sbindir}/crm_resource
@@ -865,7 +856,6 @@ exit 0
%exclude %{_mandir}/man7/ocf_pacemaker_o2cb.*
%exclude %{_mandir}/man7/ocf_pacemaker_remote.*
%doc %{_mandir}/man8/crm*.8.gz
-%exclude %{_mandir}/man8/crm_master.*
%doc %{_mandir}/man8/attrd_updater.*
%doc %{_mandir}/man8/cibadmin.*
%if %{with cibsecrets}
@@ -935,7 +925,6 @@ exit 0
%license licenses/CC-BY-SA-4.0
%files cts
-%{python_site}/cts
%{python3_sitelib}/pacemaker/_cts/
%{_datadir}/pacemaker/tests
diff --git a/tests/Makefile.am b/tests/Makefile.am
index 96019de..8601e1b 100644
--- a/tests/Makefile.am
+++ b/tests/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2020-2022 the Pacemaker project contributors
+# Copyright 2020-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -8,14 +8,15 @@
#
# tap-test is copied from /usr/share/automake-*/tap-driver.sh.
-EXTRA_DIST = tap-driver.sh \
- tap-test \
+EXTRA_DIST = tap-driver.sh \
+ tap-test \
test-headers.sh
+.PHONY: check
check: check-headers
.PHONY: check-headers
check-headers:
CFLAGS="$(CFLAGS)" CXXFLAGS="$(CXXFLAGS)" CC="$(CC)" \
- CXX="$(CXX)" CPPFLAGS="$(CPPFLAGS)" LIBS="$(LIBS)" \
+ CXX="$(CXX)" CPPFLAGS="$(CPPFLAGS) -I$(top_builddir)/include" LIBS="$(LIBS)" \
SRCDIR="$(top_srcdir)" sh "$(srcdir)/test-headers.sh"
diff --git a/tools/Makefile.am b/tools/Makefile.am
index 36bd3ae..3efa938 100644
--- a/tools/Makefile.am
+++ b/tools/Makefile.am
@@ -13,53 +13,42 @@ if BUILD_SYSTEMD
systemdsystemunit_DATA = crm_mon.service
endif
-noinst_HEADERS = crm_mon.h crm_resource.h
+noinst_HEADERS = crm_mon.h \
+ crm_resource.h
pcmkdir = $(datadir)/$(PACKAGE)
-pcmk_DATA = report.common report.collector
+pcmk_DATA = report.common \
+ report.collector
-sbin_SCRIPTS = crm_report crm_standby crm_master crm_failcount
+sbin_SCRIPTS = crm_report \
+ crm_standby \
+ crm_master \
+ crm_failcount
if BUILD_CIBSECRETS
sbin_SCRIPTS += cibsecret
endif
noinst_SCRIPTS = cluster-clean \
- cluster-init \
cluster-helper \
pcmk_simtimes
-EXTRA_DIST = attrd_updater.8.inc \
- cibadmin.8.inc \
- crm_attribute.8.inc \
- crm_diff.8.inc \
- crm_error.8.inc \
- crm_mon.8.inc \
- crm_node.8.inc \
- crm_resource.8.inc \
- crm_rule.8.inc \
- crm_shadow.8.inc \
- crm_simulate.8.inc \
- crm_ticket.8.inc \
- crm_verify.8.inc \
- crmadmin.8.inc \
- fix-manpages \
- iso8601.8.inc \
- stonith_admin.8.inc
+EXTRA_DIST = $(wildcard *.inc) \
+ fix-manpages
sbin_PROGRAMS = attrd_updater \
- cibadmin \
- crmadmin \
- crm_simulate \
+ cibadmin \
+ crmadmin \
+ crm_simulate \
crm_attribute \
- crm_diff \
- crm_error \
- crm_mon \
- crm_node \
- crm_resource \
- crm_rule \
- crm_shadow \
- crm_verify \
- crm_ticket \
- iso8601 \
+ crm_diff \
+ crm_error \
+ crm_mon \
+ crm_node \
+ crm_resource \
+ crm_rule \
+ crm_shadow \
+ crm_verify \
+ crm_ticket \
+ iso8601 \
stonith_admin
## SOURCES
@@ -70,96 +59,96 @@ sbin_PROGRAMS = attrd_updater \
MAN8DEPS = crm_attribute
crmadmin_SOURCES = crmadmin.c
-crmadmin_LDADD = $(top_builddir)/lib/pengine/libpe_status.la \
- $(top_builddir)/lib/cib/libcib.la \
- $(top_builddir)/lib/common/libcrmcommon.la \
- $(top_builddir)/lib/pacemaker/libpacemaker.la
+crmadmin_LDADD = $(top_builddir)/lib/pacemaker/libpacemaker.la
+crmadmin_LDADD += $(top_builddir)/lib/pengine/libpe_status.la
+crmadmin_LDADD += $(top_builddir)/lib/cib/libcib.la
+crmadmin_LDADD += $(top_builddir)/lib/common/libcrmcommon.la
crm_error_SOURCES = crm_error.c
-crm_error_LDADD = $(top_builddir)/lib/pacemaker/libpacemaker.la \
- $(top_builddir)/lib/common/libcrmcommon.la
+crm_error_LDADD = $(top_builddir)/lib/pacemaker/libpacemaker.la
+crm_error_LDADD += $(top_builddir)/lib/common/libcrmcommon.la
cibadmin_SOURCES = cibadmin.c
-cibadmin_LDADD = $(top_builddir)/lib/pacemaker/libpacemaker.la \
- $(top_builddir)/lib/cib/libcib.la \
- $(top_builddir)/lib/common/libcrmcommon.la
+cibadmin_LDADD = $(top_builddir)/lib/pacemaker/libpacemaker.la
+cibadmin_LDADD += $(top_builddir)/lib/cib/libcib.la
+cibadmin_LDADD += $(top_builddir)/lib/common/libcrmcommon.la
crm_shadow_SOURCES = crm_shadow.c
-crm_shadow_LDADD = $(top_builddir)/lib/cib/libcib.la \
- $(top_builddir)/lib/common/libcrmcommon.la
+crm_shadow_LDADD = $(top_builddir)/lib/cib/libcib.la
+crm_shadow_LDADD += $(top_builddir)/lib/common/libcrmcommon.la
crm_node_SOURCES = crm_node.c
-crm_node_LDADD = $(top_builddir)/lib/cib/libcib.la \
- $(top_builddir)/lib/common/libcrmcommon.la
+crm_node_LDADD = $(top_builddir)/lib/pacemaker/libpacemaker.la
+crm_node_LDADD += $(top_builddir)/lib/cib/libcib.la
+crm_node_LDADD += $(top_builddir)/lib/common/libcrmcommon.la
crm_simulate_SOURCES = crm_simulate.c
-
-crm_simulate_LDADD = $(top_builddir)/lib/pengine/libpe_status.la \
- $(top_builddir)/lib/pacemaker/libpacemaker.la \
- $(top_builddir)/lib/cib/libcib.la \
- $(top_builddir)/lib/common/libcrmcommon.la
+crm_simulate_LDADD = $(top_builddir)/lib/pacemaker/libpacemaker.la
+crm_simulate_LDADD += $(top_builddir)/lib/pengine/libpe_status.la
+crm_simulate_LDADD += $(top_builddir)/lib/cib/libcib.la
+crm_simulate_LDADD += $(top_builddir)/lib/common/libcrmcommon.la
crm_diff_SOURCES = crm_diff.c
-crm_diff_LDADD = $(top_builddir)/lib/common/libcrmcommon.la
+crm_diff_LDADD = $(top_builddir)/lib/common/libcrmcommon.la
crm_mon_SOURCES = crm_mon.c crm_mon_curses.c
-crm_mon_LDADD = $(top_builddir)/lib/pengine/libpe_status.la \
- $(top_builddir)/lib/fencing/libstonithd.la \
- $(top_builddir)/lib/pacemaker/libpacemaker.la \
- $(top_builddir)/lib/cib/libcib.la \
- $(top_builddir)/lib/common/libcrmcommon.la \
- $(CURSESLIBS)
+crm_mon_LDADD = $(top_builddir)/lib/pacemaker/libpacemaker.la
+crm_mon_LDADD += $(top_builddir)/lib/pengine/libpe_status.la
+crm_mon_LDADD += $(top_builddir)/lib/fencing/libstonithd.la
+crm_mon_LDADD += $(top_builddir)/lib/cib/libcib.la
+crm_mon_LDADD += $(top_builddir)/lib/common/libcrmcommon.la
+crm_mon_LDADD += $(CURSESLIBS)
crm_verify_SOURCES = crm_verify.c
-crm_verify_LDADD = $(top_builddir)/lib/pengine/libpe_status.la \
- $(top_builddir)/lib/pacemaker/libpacemaker.la \
- $(top_builddir)/lib/cib/libcib.la \
- $(top_builddir)/lib/common/libcrmcommon.la
+crm_verify_LDADD = $(top_builddir)/lib/pacemaker/libpacemaker.la
+crm_verify_LDADD += $(top_builddir)/lib/pengine/libpe_status.la
+crm_verify_LDADD += $(top_builddir)/lib/cib/libcib.la
+crm_verify_LDADD += $(top_builddir)/lib/common/libcrmcommon.la
crm_attribute_SOURCES = crm_attribute.c
-crm_attribute_LDADD = $(top_builddir)/lib/pacemaker/libpacemaker.la \
- $(top_builddir)/lib/cib/libcib.la \
- $(top_builddir)/lib/common/libcrmcommon.la
+crm_attribute_LDADD = $(top_builddir)/lib/pacemaker/libpacemaker.la
+crm_attribute_LDADD += $(top_builddir)/lib/cib/libcib.la
+crm_attribute_LDADD += $(top_builddir)/lib/common/libcrmcommon.la
crm_resource_SOURCES = crm_resource.c \
crm_resource_ban.c \
crm_resource_print.c \
crm_resource_runtime.c
-crm_resource_LDADD = $(top_builddir)/lib/pengine/libpe_rules.la \
- $(top_builddir)/lib/fencing/libstonithd.la \
- $(top_builddir)/lib/lrmd/liblrmd.la \
- $(top_builddir)/lib/services/libcrmservice.la \
- $(top_builddir)/lib/pengine/libpe_status.la \
- $(top_builddir)/lib/pacemaker/libpacemaker.la \
- $(top_builddir)/lib/cib/libcib.la \
- $(top_builddir)/lib/common/libcrmcommon.la
+crm_resource_LDADD = $(top_builddir)/lib/pacemaker/libpacemaker.la
+crm_resource_LDADD += $(top_builddir)/lib/pengine/libpe_status.la
+crm_resource_LDADD += $(top_builddir)/lib/cib/libcib.la
+crm_resource_LDADD += $(top_builddir)/lib/pengine/libpe_rules.la
+crm_resource_LDADD += $(top_builddir)/lib/lrmd/liblrmd.la
+crm_resource_LDADD += $(top_builddir)/lib/fencing/libstonithd.la
+crm_resource_LDADD += $(top_builddir)/lib/services/libcrmservice.la
+crm_resource_LDADD += $(top_builddir)/lib/common/libcrmcommon.la
crm_rule_SOURCES = crm_rule.c
-crm_rule_LDADD = $(top_builddir)/lib/pacemaker/libpacemaker.la \
- $(top_builddir)/lib/cib/libcib.la \
- $(top_builddir)/lib/pengine/libpe_rules.la \
- $(top_builddir)/lib/pengine/libpe_status.la \
- $(top_builddir)/lib/common/libcrmcommon.la
+crm_rule_LDADD = $(top_builddir)/lib/pacemaker/libpacemaker.la
+crm_rule_LDADD += $(top_builddir)/lib/pengine/libpe_status.la
+crm_rule_LDADD += $(top_builddir)/lib/cib/libcib.la
+crm_rule_LDADD += $(top_builddir)/lib/pengine/libpe_rules.la
+crm_rule_LDADD += $(top_builddir)/lib/common/libcrmcommon.la
iso8601_SOURCES = iso8601.c
iso8601_LDADD = $(top_builddir)/lib/common/libcrmcommon.la
attrd_updater_SOURCES = attrd_updater.c
-attrd_updater_LDADD = $(top_builddir)/lib/pacemaker/libpacemaker.la \
- $(top_builddir)/lib/common/libcrmcommon.la
+attrd_updater_LDADD = $(top_builddir)/lib/pacemaker/libpacemaker.la
+attrd_updater_LDADD += $(top_builddir)/lib/common/libcrmcommon.la
crm_ticket_SOURCES = crm_ticket.c
-crm_ticket_LDADD = $(top_builddir)/lib/pengine/libpe_rules.la \
- $(top_builddir)/lib/pengine/libpe_status.la \
- $(top_builddir)/lib/pacemaker/libpacemaker.la \
- $(top_builddir)/lib/cib/libcib.la \
- $(top_builddir)/lib/common/libcrmcommon.la
+crm_ticket_LDADD = $(top_builddir)/lib/pacemaker/libpacemaker.la
+crm_ticket_LDADD += $(top_builddir)/lib/pengine/libpe_status.la
+crm_ticket_LDADD += $(top_builddir)/lib/pengine/libpe_rules.la
+crm_ticket_LDADD += $(top_builddir)/lib/cib/libcib.la
+crm_ticket_LDADD += $(top_builddir)/lib/common/libcrmcommon.la
stonith_admin_SOURCES = stonith_admin.c
-stonith_admin_LDADD = $(top_builddir)/lib/pacemaker/libpacemaker.la \
- $(top_builddir)/lib/cib/libcib.la \
- $(top_builddir)/lib/pengine/libpe_status.la \
- $(top_builddir)/lib/fencing/libstonithd.la \
- $(top_builddir)/lib/common/libcrmcommon.la
+stonith_admin_LDADD = $(top_builddir)/lib/pacemaker/libpacemaker.la
+stonith_admin_LDADD += $(top_builddir)/lib/pengine/libpe_status.la
+stonith_admin_LDADD += $(top_builddir)/lib/cib/libcib.la
+stonith_admin_LDADD += $(top_builddir)/lib/fencing/libstonithd.la
+stonith_admin_LDADD += $(top_builddir)/lib/common/libcrmcommon.la
CLEANFILES = $(man8_MANS)
diff --git a/tools/attrd_updater.c b/tools/attrd_updater.c
index 60e4cc7..5f91356 100644
--- a/tools/attrd_updater.c
+++ b/tools/attrd_updater.c
@@ -145,8 +145,11 @@ static GOptionEntry required_entries[] = {
static GOptionEntry command_entries[] = {
{ "update", 'U', 0, G_OPTION_ARG_CALLBACK, command_cb,
- "Update attribute's value in pacemaker-attrd. If this causes the value\n"
- INDENT "to change, it will also be updated in the cluster configuration.",
+ "Update attribute's value. Required: -n/--name or -P/--pattern.\n"
+ INDENT "Optional: -d/--delay (if specified, the delay will be used if\n"
+ INDENT "the attribute needs to be created, but ignored if the\n"
+ INDENT "attribute already exists), -s/--set, -p/--private, -W/--wait,\n"
+ INDENT "-z/--utilization.",
"VALUE" },
{ "update-both", 'B', 0, G_OPTION_ARG_CALLBACK, command_cb,
@@ -446,10 +449,11 @@ send_attrd_query(pcmk__output_t *out, const char *attr_name,
pcmk_register_ipc_callback(attrd_api, attrd_event_cb, out);
// Connect to attrd (without main loop)
- rc = pcmk_connect_ipc(attrd_api, pcmk_ipc_dispatch_sync);
+ rc = pcmk__connect_ipc(attrd_api, pcmk_ipc_dispatch_sync, 5);
if (rc != pcmk_rc_ok) {
g_set_error(&error, PCMK__RC_ERROR, rc,
- "Could not connect to attrd: %s", pcmk_rc_str(rc));
+ "Could not connect to %s: %s",
+ pcmk_ipc_name(attrd_api, true), pcmk_rc_str(rc));
pcmk_free_ipc_api(attrd_api);
return rc;
}
@@ -463,7 +467,7 @@ send_attrd_query(pcmk__output_t *out, const char *attr_name,
if (rc != pcmk_rc_ok) {
g_set_error(&error, PCMK__RC_ERROR, rc, "Could not query value of %s: %s (%d)",
- attr_name, pcmk_strerror(rc), rc);
+ attr_name, pcmk_rc_str(rc), rc);
} else if (!printed_values) {
rc = pcmk_rc_schema_validation;
g_set_error(&error, PCMK__RC_ERROR, rc,
@@ -500,7 +504,7 @@ send_attrd_update(char command, const char *attr_node, const char *attr_name,
case 'U':
rc = pcmk__attrd_api_update(NULL, attr_node, attr_name, attr_value,
- NULL, attr_set, NULL,
+ attr_dampen, attr_set, NULL,
attr_options | pcmk__node_attr_value);
break;
diff --git a/tools/cibadmin.c b/tools/cibadmin.c
index f80afae..44488b5 100644
--- a/tools/cibadmin.c
+++ b/tools/cibadmin.c
@@ -72,8 +72,6 @@ void cibadmin_op_callback(xmlNode *msg, int call_id, int rc, xmlNode *output,
static void
print_xml_output(xmlNode * xml)
{
- char *buffer;
-
if (!xml) {
return;
} else if (xml->type != XML_ELEMENT_NODE) {
@@ -95,8 +93,8 @@ print_xml_output(xmlNode * xml)
}
} else {
- buffer = dump_xml_formatted(xml);
- fprintf(stdout, "%s", pcmk__s(buffer, "<null>\n"));
+ char *buffer = dump_xml_formatted(xml);
+ fprintf(stdout, "%s", buffer);
free(buffer);
}
}
@@ -574,7 +572,7 @@ main(int argc, char **argv)
output = createEmptyCib(1);
crm_xml_add(output, XML_ATTR_VALIDATION, options.validate_with);
buf = dump_xml_formatted(output);
- fprintf(stdout, "%s", pcmk__s(buf, "<null>\n"));
+ fprintf(stdout, "%s", buf);
free(buf);
goto done;
}
@@ -726,7 +724,7 @@ main(int argc, char **argv)
goto done;
}
- if (strcmp(options.cib_action, "md5-sum") == 0) {
+ if (pcmk__str_eq(options.cib_action, "md5-sum", pcmk__str_casei)) {
char *digest = NULL;
if (input == NULL) {
@@ -885,7 +883,7 @@ do_work(xmlNode *input, xmlNode **output)
/* construct the request */
the_cib->call_timeout = options.message_timeout_sec;
if ((strcmp(options.cib_action, PCMK__CIB_REQUEST_REPLACE) == 0)
- && pcmk__str_eq(crm_element_name(input), XML_TAG_CIB, pcmk__str_casei)) {
+ && pcmk__xe_is(input, XML_TAG_CIB)) {
xmlNode *status = pcmk_find_cib_element(input, XML_CIB_TAG_STATUS);
if (status == NULL) {
diff --git a/tools/cluster-helper.in b/tools/cluster-helper.in
index d8dac6e..5bfe890 100755
--- a/tools/cluster-helper.in
+++ b/tools/cluster-helper.in
@@ -72,7 +72,7 @@ while true ; do
-I) replace=$2; shift; shift;;
--list|list) format=$2; command=list; shift; shift;;
--add|add) command=group-add; shift;;
- --create|create) group="$2", command=group-create; shift; shift;;
+ --create|create) group="$2"; command=group-create; shift; shift;;
--run|run) command=run; shift;;
--copy|copy) command=copy; shift; break ;;
--key|key) command=key; shift; break ;;
diff --git a/tools/cluster-init.in b/tools/cluster-init.in
deleted file mode 100755
index 1485c81..0000000
--- a/tools/cluster-init.in
+++ /dev/null
@@ -1,537 +0,0 @@
-#!@BASH_PATH@
-#
-# Copyright 2011-2023 the Pacemaker project contributors
-#
-# The version control history for this file may have further details.
-#
-# This source code is licensed under the GNU General Public License version 2
-# or later (GPLv2+) WITHOUT ANY WARRANTY.
-#
-
-accept_defaults=0
-do_raw=0
-ETCHOSTS=0
-nodelist=0
-limit=0
-
-pkgs="corosync xinetd nmap abrt-cli fence-agents perl-TimeDate gdb"
-
-transport="multicast"
-inaddr_any="no"
-
-INSTALL=
-cs_conf=
-fence_conf=
-
-dsh_group=0
-if [ ! -z $cluster_name ]; then
- cluster=$cluster_name
-else
- cluster=dummy0
-fi
-
-# Corosync Settings
-cs_port=666
-
-# Settings that work great on nXX
-join=60
-#token=3000
-consensus=1500
-
-# Official settings
-join=2000
-token=5000
-consensus=2500
-
-# Testing
-join=1000
-consensus=7500
-do_debug=off
-
-function ip_for_node() {
- ping -c 1 $1 | grep "bytes from" | head -n 1 | sed -e 's/.*bytes from//' -e 's/: icmp.*//' | awk '{print $NF}' | sed 's:(::' | sed 's:)::'
-# if [ $do_raw = 1 ]; then
-# echo $1
-# else
-# #host $1 | grep "has address" | head -n 1 | awk '{print $NF}' | sed 's:(::' | sed 's:)::'
-# fi
-}
-function id_for_node() {
- ip_for_node $* | tr '.' ' ' | awk '{print $4}'
-}
-function name_for_node() {
- echo $1 | awk -F. '{print $1}'
-}
-
-function helptext() {
- echo "cluster-init - Configure cluster communication for the infrastructures supported by Pacemaker"
- echo ""
- echo "-g, --group Specify the group to operate on/with"
- echo "-w, --host Specify a host to operate on/with. May be specified multiple times"
- echo "-r, --raw-ip Supplied nodes were listed as their IP addresses"
- echo ""
- echo "-c, --corosync configure for corosync"
- echo "-C, --nodelist configure for corosync with a node list"
- echo "-u, --unicast configure point-to-point communication instead of multicast"
- echo ""
- echo "-I, --install Install packages"
- echo ""
- echo "-d, --debug Enable debug logging for the cluster"
- echo "--hosts Copy the local /etc/hosts file to all nodes"
- echo "-e, --extra list Whitespace separated list of extra packages to install"
- echo "-l, --limit N Use the first N hosts from the named group"
- echo " Extra packages to install"
- exit $1
-}
-
-host_input=""
-while true; do
- case "$1" in
- -g) cluster=$2;
- shift; shift;;
- -w|--host)
- for h in $2; do
- host_input="$host_input -w $h";
- done
- shift; shift;;
- -w) host_input="$host_input -w $2"
- shift; shift;;
- -r|--raw-ip) do_raw=1; shift;;
-
- -d|--debug) do_debug=on; shift;;
-
- -I|--install) INSTALL=Yes; shift;;
- --hosts) ETCHOSTS=1; shift;;
-
- -c|--corosync) CTYPE=corosync; shift;;
- -C|--nodelist) CTYPE=corosync; nodelist=1; shift;;
- -u|--unicast) nodelist=1; transport=udpu; inaddr_any="yes"; shift;;
- -e|--extra) pkgs="$pkgs $2"; shift; shift;;
- -t|--test) pkgs="$pkgs valgrind"; shift;;
- -l|--limit) limit=$2; shift; shift;;
-
- r*[0-9])
- rhel=`echo $1 | sed -e s/rhel// -e s/-// -e s/r//`
- pkgs="$pkgs qarsh-server";
- case $rhel in
- 7) CTYPE=corosync;;
- esac
- shift
- ;;
-
- f*[0-9][0-9])
- CTYPE=corosync;
- shift
- ;;
-
- -y|--yes|--defaults) accept_defaults=1; shift;;
- -x) set -x; shift;;
- -\?|--help) helptext 0; shift;;
- "") break;;
- *) echo "unknown option: $1"; exit 1;;
- esac
-done
-
-if [ ! -z $cluster ]; then
- host_input="-g $cluster"
- # use the last digit present in the variable (if any)
- dsh_group=`echo $cluster | sed 's/[^0-9][^0-9]*//g;s/.*\([0-9]\)$/\1/'`
-fi
-
-if [ -z $dsh_group ]; then
- dsh_group=1
-fi
-
-if [ x = "x$host_input" -a x = "x$cluster" ]; then
- if [ -d $HOME/.dsh/group ]; then
- read -p "Please specify a dsh group you'd like to configure as a cluster? [] " -t 60 cluster
- else
- read -p "Please specify a whitespace delimetered list of nodes you'd like to configure as a cluster? [] " -t 60 host_list
-
- for h in $2; do
- host_input="$host_input -w $h";
- done
- fi
-fi
-
-if [ -z "$host_input" ]; then
- echo "You didn't specify any nodes or groups to configure"
- exit 1
-fi
-
-if [ $limit -gt 0 ]; then
- echo "Using only the first $limit hosts in $cluster group"
- host_list=`cluster-helper --list bullet $host_input | head -n $limit | tr '\n*' ' '`
-else
- host_list=`cluster-helper --list short $host_input`
-fi
-num_hosts=`echo $host_list | wc -w`
-
-if [ $num_hosts -gt 9 ]; then
- cs_port=66
-fi
-
-for h in $host_list; do
- ping -c 1 -q $h
- if [ $? != 0 ]; then
- echo "Using long names..."
- host_list=`cluster-helper --list long $host_input`
- break
- fi
-done
-
-if [ -z $CTYPE ]; then
- echo ""
- read -p "Where should Pacemaker obtain membership and quorum from? [corosync] (corosync) " -t 60 CTYPE
-fi
-
-case $CTYPE in
- corosync) cs_conf="@PCMK__COROSYNC_CONF@" ;;
-esac
-
-function get_defaults()
-{
- if [ -z $SSH ]; then
- SSH="No"
- fi
-
- if [ -z $SELINUX ]; then
- SELINUX="No"
- fi
-
- if [ -z $IPTABLES ]; then
- IPTABLES="Yes"
- fi
-
- if [ -z $DOMAIN ]; then
- DOMAIN="No"
- fi
- if [ -z $INSTALL ]; then
- INSTALL="Yes"
- fi
- if [ -z $DATE ]; then
- DATE="No"
- fi
-}
-
-get_defaults
-if [ $accept_defaults = 0 ]; then
- echo ""
- read -p "Shall I install an ssh key to cluster nodes? [$SSH] " -t 60 SSH
- echo ""
- echo "SELinux prevent many things, including password-less ssh logins"
- read -p "Shall I disable selinux? [$SELINUX] " -t 60 SELINUX
- echo ""
- echo "Incorrectly configured firewalls will prevent corosync from starting up"
- read -p "Shall I disable iptables? [$IPTABLES] " -t 60 IPTABLES
-
- echo ""
- read -p "Shall I install/update the relevant packages? [$INSTALL] " -t 60 INSTALL
-
- echo ""
- read -p "Shall I sync the date/time? [$DATE] " -t 60 DATE
-fi
-get_defaults
-
-echo ""
-echo "Detecting possible fencing options"
-if [ -e /etc/cluster/fence_xvm.key ]; then
- echo "* Found fence_xvm"
- fence_conf=/etc/cluster/fence_xvm.key
- pkgs="$pkgs fence-virt"
-fi
-
-if [ ! -z ${OS_AUTH_URL} ]; then
- echo "* Found openstack credentials"
- fence_conf=/sbin/fence_openstack
- pkgs="$pkgs python-novaclient"
-fi
-echo ""
-echo "Beginning cluster configuration"
-echo ""
-
-case $SSH in
- [Yy][Ee][Ss]|[Yy])
- for host in $host_list; do
- echo "Installing our ssh key on ${host}"
- ssh-copy-id root@${host} >/dev/null 2>&1
- # Fix selinux labeling
- ssh -l root ${host} -- restorecon -R -v .
- done
- ;;
-esac
-
-case $DATE in
- [Yy][Ee][Ss]|[Yy])
- for host in $host_list; do
- echo "Setting time on ${host}"
- scp /etc/localtime root@${host}:/etc
- now=`date +%s`
- ssh -l root ${host} -- date -s @$now
- echo ""
- done
- ;;
-esac
-
-init=`mktemp`
-cat<<-END>$init
-verbose=0
-pkgs="$pkgs"
-
-lhost=\`uname -n\`
-lshort=\`echo \$lhost | awk -F. '{print \$1}'\`
-
-log() {
- printf "%-10s \$*\n" "\$lshort:" 1>&2
-}
-
-debug() {
- if [ \$verbose -gt 0 ]; then
- log "Debug: \$*"
- fi
-}
-
-info() {
- log "\$*"
-}
-
-warning() {
- log "WARN: \$*"
-}
-
-fatal() {
- log "ERROR: \$*"
- exit 1
-}
-
-case $SELINUX in
- [Yy][Ee][Ss]|[Yy])
- sed -i.sed "s/enforcing/disabled/g" /etc/selinux/config
- ;;
-esac
-
-case $IPTABLES in
- [Yy][Ee][Ss]|[Yy]|"")
- service iptables stop
- chkconfig iptables off
- service firewalld stop
- chkconfig firewalld off
- ;;
-esac
-
-case $DOMAIN in
- [Nn][Oo]|"")
- ;;
- *.*)
- if
- ! grep domain /etc/resolv.conf
- then
- sed -i.sed "s/nameserver/domain\ $DOMAIN\\\nnameserver/g" /etc/resolv.conf
- fi
- ;;
- *) echo "Unknown domain: $DOMAIN";;
-esac
-
-case $INSTALL in
- [Yy][Ee][Ss]|[Yy]|"")
- info Installing cluster software
- yum install -y $pkgs pacemaker
- ;;
-esac
-
-info "Configuring services"
-chkconfig xinetd on
-service xinetd start &>/dev/null
-
-chkconfig corosync off &> /dev/null
-mkdir -p /etc/cluster
-
-info "Turning on core files"
-grep -q "unlimited" /etc/bashrc
-if [ $? = 1 ]; then
- sed -i.sed "s/bashrc/bashrc\\\nulimit\ -c\ unlimited/g" /etc/bashrc
-fi
-
-function patch_cs_config() {
- test $num_hosts != 2
- two_node=$?
-
- priority="info"
- if [ $do_debug = 1 ]; then
- priority="debug"
- fi
-
- ssh -l root ${host} -- sed -i.sed "s/.*mcastaddr:.*/mcastaddr:\ 226.94.1.1/g" $cs_conf
- ssh -l root ${host} -- sed -i.sed "s/.*mcastport:.*/mcastport:\ $cs_port$dsh_group/g" $cs_conf
- ssh -l root ${host} -- sed -i.sed "s/.*bindnetaddr:.*/bindnetaddr:\ $ip/g" $cs_conf
- ssh -l root ${host} -- sed -i.sed "s/.*syslog_facility:.*/syslog_facility:\ daemon/g" $cs_conf
- ssh -l root ${host} -- sed -i.sed "s/.*logfile_priority:.*/logfile_priority:\ $priority/g" $cs_conf
-
- if [ ! -z $token ]; then
- ssh -l root ${host} -- sed -i.sed "s/.*token:.*/token:\ $token/g" $cs_conf
- fi
- if [ ! -z $consensus ]; then
- ssh -l root ${host} -- sed -i.sed "s/.*consensus:.*/consensus:\ $consensus/g" $cs_conf
- fi
- if [ ! -z $join ]; then
- ssh -l root ${host} -- sed -i.sed "s/^join:.*/join:\ $join/g" $cs_conf
- ssh -l root ${host} -- sed -i.sed "s/\\\Wjoin:.*/join:\ $join/g" $cs_conf
- fi
-
- ssh -l root ${host} -- grep -q "corosync_votequorum" $cs_conf 2>&1 > /dev/null
- if [ $? -eq 0 ]; then
- ssh -l root ${host} -- sed -i.sed "s/\\\Wexpected_votes:.*/expected_votes:\ $num_hosts/g" $cs_conf
- ssh -l root ${host} -- sed -i.sed "s/\\\Wtwo_node:.*/two_node:\ $two_node/g" $cs_conf
- else
- printf "%-10s Wrong quorum provider: installing $cs_conf for corosync instead\n" ${host}
- create_cs_config
- fi
-}
-
-function create_cs_config() {
- cs_tmp=/tmp/cs_conf.$$
- test $num_hosts != 2
- two_node=$?
-
- # Base config
- priority="info"
- if [ $do_debug = 1 ]; then
- priority="debug"
- fi
-
- cat <<-END >$cs_tmp
-# Please read the corosync.conf.5 manual page
-totem {
- version: 2
-
- # cypto_cipher and crypto_hash: Used for mutual node authentication.
- # If you choose to enable this, then do remember to create a shared
- # secret with "corosync-keygen".
- crypto_cipher: none
- crypto_hash: none
-
- # Assign a fixed node id
- nodeid: $id
-
- # Disable encryption
- secauth: off
-
- transport: $transport
- inaddr_any: $inaddr_any
-
- # interface: define at least one interface to communicate
- # over. If you define more than one interface stanza, you must
- # also set rrp_mode.
- interface {
- # Rings must be consecutively numbered, starting at 0.
- ringnumber: 0
-
- # This is normally the *network* address of the
- # interface to bind to. This ensures that you can use
- # identical instances of this configuration file
- # across all your cluster nodes, without having to
- # modify this option.
- bindnetaddr: $ip
-
- # However, if you have multiple physical network
- # interfaces configured for the same subnet, then the
- # network address alone is not sufficient to identify
- # the interface Corosync should bind to. In that case,
- # configure the *host* address of the interface
- # instead:
- # bindnetaddr: 192.168.1.1
- # When selecting a multicast address, consider RFC
- # 2365 (which, among other things, specifies that
- # 239.255.x.x addresses are left to the discretion of
- # the network administrator). Do not reuse multicast
- # addresses across multiple Corosync clusters sharing
- # the same network.
-
- # Corosync uses the port you specify here for UDP
- # messaging, and also the immediately preceding
- # port. Thus if you set this to 5405, Corosync sends
- # messages over UDP ports 5405 and 5404.
- mcastport: $cs_port$dsh_group
-
- # Time-to-live for cluster communication packets. The
- # number of hops (routers) that this ring will allow
- # itself to pass. Note that multicast routing must be
- # specifically enabled on most network routers.
- ttl: 1
- mcastaddr: 226.94.1.1
- }
-}
-
-logging {
- debug: off
- fileline: off
- to_syslog: yes
- to_stderr: no
- syslog_facility: daemon
- timestamp: on
- to_logfile: yes
- logfile: /var/log/corosync.log
- logfile_priority: $priority
-}
-
-amf {
- mode: disabled
-}
-
-quorum {
- provider: corosync_votequorum
- expected_votes: $num_hosts
- votes: 1
- two_node: $two_node
- wait_for_all: 0
- last_man_standing: 0
- auto_tie_breaker: 0
-}
-END
- scp -q $cs_tmp root@${host}:$cs_conf
- rm -f $cs_tmp
-}
-
-for host in $host_list; do
- echo ""
- echo ""
- echo "* Configuring $host"
-
- cs_short_host=`name_for_node $host`
- ip=`ip_for_node $host`
- id=`id_for_node $host`
-
- echo $ip | grep -qis NXDOMAIN
- if [ $? = 0 ]; then
- echo "Couldn't find resolve $host to an IP address"
- exit 1
- fi
-
- if [ `uname -n` = $host ]; then
- bash $init
- else
- cat $init | ssh -l root -T $host -- "cat > $init; bash $init"
- fi
-
- if [ "x$fence_conf" != x ]; then
- if [ -e $fence_conf ]; then
- scp $fence_conf root@${host}:$fence_conf
- fi
- fi
-
- if [ $ETCHOSTS = 1 ]; then
- scp /etc/hosts root@${host}:/etc/hosts
- fi
-
- ssh -l root ${host} -- grep -q "token:" $cs_conf 2>&1 > /dev/null
- new_config=$?
- new_config=1
-
- if [ $new_config = 0 ]; then
- printf "%-10s Updating $cs_conf\n" ${host}:
- patch_cs_config
- else
- printf "%-10s Installing $cs_conf\n" ${host}:
- create_cs_config
- fi
-done
diff --git a/tools/crm_attribute.c b/tools/crm_attribute.c
index 358b150..defe294 100644
--- a/tools/crm_attribute.c
+++ b/tools/crm_attribute.c
@@ -565,7 +565,7 @@ command_query(pcmk__output_t *out, cib_t *cib)
} else if (rc != pcmk_rc_ok) {
// Don't do anything.
- } else if (xml_has_children(result)) {
+ } else if (result->children != NULL) {
struct output_data_s od = { out, use_pattern, false };
pcmk__xe_foreach_child(result, NULL, output_one_attribute, &od);
@@ -852,7 +852,7 @@ main(int argc, char **argv)
} else if (rc != pcmk_rc_ok) {
exit_code = pcmk_rc2exitc(rc);
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
- "Error performing operation: %s", pcmk_strerror(rc));
+ "Error performing operation: %s", pcmk_rc_str(rc));
}
done:
diff --git a/tools/crm_diff.c b/tools/crm_diff.c
index efe2fcf..9925ea7 100644
--- a/tools/crm_diff.c
+++ b/tools/crm_diff.c
@@ -108,7 +108,7 @@ print_patch(xmlNode *patch)
{
char *buffer = dump_xml_formatted(patch);
- printf("%s", pcmk__s(buffer, "<null>\n"));
+ printf("%s", buffer);
free(buffer);
fflush(stdout);
}
@@ -152,7 +152,7 @@ log_patch_cib_versions(xmlNode *patch)
const char *digest = NULL;
xml_patch_versions(patch, add, del);
- fmt = crm_element_value(patch, "format");
+ fmt = crm_element_value(patch, PCMK_XA_FORMAT);
digest = crm_element_value(patch, XML_ATTR_DIGEST);
if (add[2] != del[2] || add[1] != del[1] || add[0] != del[0]) {
@@ -166,7 +166,7 @@ strip_patch_cib_version(xmlNode *patch, const char **vfields, size_t nvfields)
{
int format = 1;
- crm_element_value_int(patch, "format", &format);
+ crm_element_value_int(patch, PCMK_XA_FORMAT, &format);
if (format == 2) {
xmlNode *version_xml = find_xml_node(patch, "version", FALSE);
@@ -208,21 +208,13 @@ static int
generate_patch(xmlNode *object_1, xmlNode *object_2, const char *xml_file_2,
gboolean as_cib, gboolean no_version)
{
- xmlNode *output = NULL;
- int rc = pcmk_rc_ok;
-
- pcmk__output_t *logger_out = NULL;
- int out_rc = pcmk_rc_no_output;
- int temp_rc = pcmk_rc_no_output;
-
const char *vfields[] = {
XML_ATTR_GENERATION_ADMIN,
XML_ATTR_GENERATION,
XML_ATTR_NUMUPDATES,
};
- rc = pcmk__log_output_new(&logger_out);
- CRM_CHECK(rc == pcmk_rc_ok, return rc);
+ xmlNode *output = NULL;
/* If we're ignoring the version, make the version information
* identical, so it isn't detected as a change. */
@@ -244,21 +236,13 @@ generate_patch(xmlNode *object_1, xmlNode *object_2, const char *xml_file_2,
output = xml_create_patchset(0, object_1, object_2, NULL, FALSE);
- pcmk__output_set_log_level(logger_out, LOG_INFO);
- out_rc = pcmk__xml_show_changes(logger_out, object_2);
-
+ pcmk__log_xml_changes(LOG_INFO, object_2);
xml_accept_changes(object_2);
if (output == NULL) {
- goto done; // rc == pcmk_rc_ok
+ return pcmk_rc_ok; // No changes
}
- /* pcmk_rc_error means there's non-empty diff.
- * @COMPAT: Choose a more descriptive return code, like one that maps to
- * CRM_EX_DIGEST?
- */
- rc = pcmk_rc_error;
-
patchset_process_digest(output, object_1, object_2, as_cib);
if (as_cib) {
@@ -268,18 +252,15 @@ generate_patch(xmlNode *object_1, xmlNode *object_2, const char *xml_file_2,
strip_patch_cib_version(output, vfields, PCMK__NELEM(vfields));
}
- pcmk__output_set_log_level(logger_out, LOG_NOTICE);
- temp_rc = logger_out->message(logger_out, "xml-patchset", output);
- out_rc = pcmk__output_select_rc(out_rc, temp_rc);
-
+ pcmk__log_xml_patchset(LOG_NOTICE, output);
print_patch(output);
free_xml(output);
-done:
- logger_out->finish(logger_out, pcmk_rc2exitc(out_rc), true, NULL);
- pcmk__output_free(logger_out);
-
- return rc;
+ /* pcmk_rc_error means there's a non-empty diff.
+ * @COMPAT Choose a more descriptive return code, like one that maps to
+ * CRM_EX_DIGEST?
+ */
+ return pcmk_rc_error;
}
static GOptionContext *
diff --git a/tools/crm_mon.c b/tools/crm_mon.c
index c20766c..dbe76fc 100644
--- a/tools/crm_mon.c
+++ b/tools/crm_mon.c
@@ -1780,7 +1780,7 @@ send_custom_trap(const char *node, const char *rsc, const char *task, int target
pid = fork();
if (pid == -1) {
- crm_perror(LOG_ERR, "notification fork() failed.");
+ out->err(out, "notification fork() failed: %s", strerror(errno));
}
if (pid == 0) {
/* crm_debug("notification: I am the child. Executing the nofitication program."); */
@@ -1840,7 +1840,7 @@ handle_rsc_op(xmlNode *xml, void *userdata)
node = crm_element_value(rsc_op, XML_LRM_ATTR_TARGET);
- while (n != NULL && !pcmk__str_eq(XML_CIB_TAG_STATE, TYPE(n), pcmk__str_casei)) {
+ while ((n != NULL) && !pcmk__xe_is(n, XML_CIB_TAG_STATE)) {
n = n->parent;
}
@@ -2051,7 +2051,7 @@ crm_diff_update(const char *event, xmlNode * msg)
if (options.external_agent) {
int format = 0;
- crm_element_value_int(diff, "format", &format);
+ crm_element_value_int(diff, PCMK_XA_FORMAT, &format);
switch(format) {
case 1:
crm_diff_update_v1(event, msg);
diff --git a/tools/crm_mon.h b/tools/crm_mon.h
index a505f50..c87432d 100644
--- a/tools/crm_mon.h
+++ b/tools/crm_mon.h
@@ -14,8 +14,8 @@
#include <glib.h>
+#include <crm/common/scheduler.h>
#include <crm/common/output_internal.h>
-#include <crm/pengine/pe_types.h>
#include <crm/stonith-ng.h>
/*
diff --git a/tools/crm_mon_curses.c b/tools/crm_mon_curses.c
index 769c7c9..212a400 100644
--- a/tools/crm_mon_curses.c
+++ b/tools/crm_mon_curses.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2019-2022 the Pacemaker project contributors
+ * Copyright 2019-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -425,11 +425,11 @@ static int
cluster_maint_mode_console(pcmk__output_t *out, va_list args) {
unsigned long long flags = va_arg(args, unsigned long long);
- if (pcmk_is_set(flags, pe_flag_maintenance_mode)) {
+ if (pcmk_is_set(flags, pcmk_sched_in_maintenance)) {
curses_formatted_printf(out, "\n *** Resource management is DISABLED ***\n");
curses_formatted_printf(out, " The cluster will not attempt to start, stop or recover services\n");
return pcmk_rc_ok;
- } else if (pcmk_is_set(flags, pe_flag_stop_everything)) {
+ } else if (pcmk_is_set(flags, pcmk_sched_stop_all)) {
curses_formatted_printf(out, "\n *** Resource management is DISABLED ***\n");
curses_formatted_printf(out, " The cluster will keep all resources stopped\n");
return pcmk_rc_ok;
@@ -438,7 +438,7 @@ cluster_maint_mode_console(pcmk__output_t *out, va_list args) {
}
}
-PCMK__OUTPUT_ARGS("cluster-status", "pe_working_set_t *",
+PCMK__OUTPUT_ARGS("cluster-status", "pcmk_scheduler_t *",
"enum pcmk_pacemakerd_state", "crm_exit_t",
"stonith_history_t *", "enum pcmk__fence_history", "uint32_t",
"uint32_t", "const char *", "GList *", "GList *")
diff --git a/tools/crm_node.c b/tools/crm_node.c
index ac2a190..1e7ce6c 100644
--- a/tools/crm_node.c
+++ b/tools/crm_node.c
@@ -9,6 +9,7 @@
#include <crm_internal.h>
+#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
@@ -24,6 +25,8 @@
#include <crm/common/ipc_controld.h>
#include <crm/common/attrd_internal.h>
+#include <pacemaker-internal.h>
+
#define SUMMARY "crm_node - Tool for displaying low-level node information"
struct {
@@ -42,8 +45,10 @@ gboolean command_cb(const gchar *option_name, const gchar *optarg, gpointer data
gboolean name_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
gboolean remove_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error);
+static GError *error = NULL;
static GMainLoop *mainloop = NULL;
static crm_exit_t exit_code = CRM_EX_OK;
+static pcmk__output_t *out = NULL;
#define INDENT " "
@@ -91,6 +96,13 @@ static GOptionEntry addl_entries[] = {
{ NULL }
};
+static pcmk__supported_format_t formats[] = {
+ PCMK__SUPPORTED_FORMAT_NONE,
+ PCMK__SUPPORTED_FORMAT_TEXT,
+ PCMK__SUPPORTED_FORMAT_XML,
+ { NULL, NULL, NULL }
+};
+
gboolean
command_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
if (pcmk__str_eq("-i", option_name, pcmk__str_casei) || pcmk__str_eq("--cluster-id", option_name, pcmk__str_casei)) {
@@ -104,7 +116,7 @@ command_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError
} else if (pcmk__str_eq("-q", option_name, pcmk__str_casei) || pcmk__str_eq("--quorum", option_name, pcmk__str_casei)) {
options.command = 'q';
} else {
- g_set_error(error, PCMK__EXITC_ERROR, CRM_EX_INVALID_PARAM, "Unknown param passed to command_cb: %s\n", option_name);
+ g_set_error(error, PCMK__EXITC_ERROR, CRM_EX_INVALID_PARAM, "Unknown param passed to command_cb: %s", option_name);
return FALSE;
}
@@ -121,7 +133,6 @@ name_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **e
gboolean
remove_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
if (optarg == NULL) {
- crm_err("-R option requires an argument");
g_set_error(error, PCMK__EXITC_ERROR, CRM_EX_INVALID_PARAM, "-R option requires an argument");
return FALSE;
}
@@ -132,6 +143,184 @@ remove_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError *
return TRUE;
}
+PCMK__OUTPUT_ARGS("node-id", "uint32_t")
+static int
+node_id_default(pcmk__output_t *out, va_list args) {
+ uint32_t node_id = va_arg(args, uint32_t);
+
+ out->info(out, "%" PRIu32, node_id);
+ return pcmk_rc_ok;
+}
+
+PCMK__OUTPUT_ARGS("node-id", "uint32_t")
+static int
+node_id_xml(pcmk__output_t *out, va_list args) {
+ uint32_t node_id = va_arg(args, uint32_t);
+
+ char *id_s = crm_strdup_printf("%" PRIu32, node_id);
+
+ pcmk__output_create_xml_node(out, "node-info",
+ "nodeid", id_s,
+ NULL);
+
+ free(id_s);
+ return pcmk_rc_ok;
+}
+
+PCMK__OUTPUT_ARGS("node-list", "GList *")
+static int
+node_list_default(pcmk__output_t *out, va_list args)
+{
+ GList *nodes = va_arg(args, GList *);
+
+ for (GList *node_iter = nodes; node_iter != NULL; node_iter = node_iter->next) {
+ pcmk_controld_api_node_t *node = node_iter->data;
+ out->info(out, "%" PRIu32 " %s %s", node->id, pcmk__s(node->uname, ""),
+ pcmk__s(node->state, ""));
+ }
+
+ return pcmk_rc_ok;
+}
+
+PCMK__OUTPUT_ARGS("node-list", "GList *")
+static int
+node_list_xml(pcmk__output_t *out, va_list args)
+{
+ GList *nodes = va_arg(args, GList *);
+
+ out->begin_list(out, NULL, NULL, "nodes");
+
+ for (GList *node_iter = nodes; node_iter != NULL; node_iter = node_iter->next) {
+ pcmk_controld_api_node_t *node = node_iter->data;
+ char *id_s = crm_strdup_printf("%" PRIu32, node->id);
+
+ pcmk__output_create_xml_node(out, "node",
+ "id", id_s,
+ "name", node->uname,
+ "state", node->state,
+ NULL);
+
+ free(id_s);
+ }
+
+ out->end_list(out);
+
+ return pcmk_rc_ok;
+}
+
+PCMK__OUTPUT_ARGS("node-name", "uint32_t", "const char *")
+static int
+node_name_default(pcmk__output_t *out, va_list args) {
+ uint32_t node_id G_GNUC_UNUSED = va_arg(args, uint32_t);
+ const char *node_name = va_arg(args, const char *);
+
+ out->info(out, "%s", node_name);
+ return pcmk_rc_ok;
+}
+
+PCMK__OUTPUT_ARGS("node-name", "uint32_t", "const char *")
+static int
+node_name_xml(pcmk__output_t *out, va_list args) {
+ uint32_t node_id = va_arg(args, uint32_t);
+ const char *node_name = va_arg(args, const char *);
+
+ char *id_s = crm_strdup_printf("%" PRIu32, node_id);
+
+ pcmk__output_create_xml_node(out, "node-info",
+ "nodeid", id_s,
+ XML_ATTR_UNAME, node_name,
+ NULL);
+
+ free(id_s);
+ return pcmk_rc_ok;
+}
+
+PCMK__OUTPUT_ARGS("partition-list", "GList *")
+static int
+partition_list_default(pcmk__output_t *out, va_list args)
+{
+ GList *nodes = va_arg(args, GList *);
+
+ GString *buffer = NULL;
+
+ for (GList *node_iter = nodes; node_iter != NULL; node_iter = node_iter->next) {
+ pcmk_controld_api_node_t *node = node_iter->data;
+ if (pcmk__str_eq(node->state, "member", pcmk__str_none)) {
+ pcmk__add_separated_word(&buffer, 128, pcmk__s(node->uname, ""), " ");
+ }
+ }
+
+ if (buffer != NULL) {
+ out->info(out, "%s", buffer->str);
+ g_string_free(buffer, TRUE);
+ return pcmk_rc_ok;
+ }
+
+ return pcmk_rc_no_output;
+}
+
+PCMK__OUTPUT_ARGS("partition-list", "GList *")
+static int
+partition_list_xml(pcmk__output_t *out, va_list args)
+{
+ GList *nodes = va_arg(args, GList *);
+
+ out->begin_list(out, NULL, NULL, "nodes");
+
+ for (GList *node_iter = nodes; node_iter != NULL; node_iter = node_iter->next) {
+ pcmk_controld_api_node_t *node = node_iter->data;
+
+ if (pcmk__str_eq(node->state, "member", pcmk__str_none)) {
+ char *id_s = crm_strdup_printf("%" PRIu32, node->id);
+
+ pcmk__output_create_xml_node(out, "node",
+ "id", id_s,
+ "name", node->uname,
+ "state", node->state,
+ NULL);
+ free(id_s);
+ }
+ }
+
+ out->end_list(out);
+ return pcmk_rc_ok;
+}
+
+PCMK__OUTPUT_ARGS("quorum", "bool")
+static int
+quorum_default(pcmk__output_t *out, va_list args) {
+ bool have_quorum = va_arg(args, int);
+
+ out->info(out, "%d", have_quorum);
+ return pcmk_rc_ok;
+}
+
+PCMK__OUTPUT_ARGS("quorum", "bool")
+static int
+quorum_xml(pcmk__output_t *out, va_list args) {
+ bool have_quorum = va_arg(args, int);
+
+ pcmk__output_create_xml_node(out, "cluster-info",
+ "quorum", have_quorum ? "true" : "false",
+ NULL);
+ return pcmk_rc_ok;
+}
+
+static pcmk__message_entry_t fmt_functions[] = {
+ { "node-id", "default", node_id_default },
+ { "node-id", "xml", node_id_xml },
+ { "node-list", "default", node_list_default },
+ { "node-list", "xml", node_list_xml },
+ { "node-name", "default", node_name_default },
+ { "node-name", "xml", node_name_xml },
+ { "quorum", "default", quorum_default },
+ { "quorum", "xml", quorum_xml },
+ { "partition-list", "default", partition_list_default },
+ { "partition-list", "xml", partition_list_xml },
+
+ { NULL, NULL, NULL }
+};
+
static gint
sort_node(gconstpointer a, gconstpointer b)
{
@@ -152,7 +341,8 @@ controller_event_cb(pcmk_ipc_api_t *controld_api,
switch (event_type) {
case pcmk_ipc_event_disconnect:
if (exit_code == CRM_EX_DISCONNECT) { // Unexpected
- fprintf(stderr, "error: Lost connection to controller\n");
+ g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
+ "Lost connection to controller");
}
goto done;
break;
@@ -165,93 +355,26 @@ controller_event_cb(pcmk_ipc_api_t *controld_api,
}
if (status != CRM_EX_OK) {
- fprintf(stderr, "error: Bad reply from controller: %s\n",
- crm_exit_str(status));
+ exit_code = status;
+ g_set_error(&error, PCMK__EXITC_ERROR, status,
+ "Bad reply from controller: %s",
+ crm_exit_str(status));
goto done;
}
- // Parse desired info from reply and display to user
- switch (options.command) {
- case 'i':
- if (reply->reply_type != pcmk_controld_reply_info) {
- fprintf(stderr,
- "error: Unknown reply type %d from controller\n",
- reply->reply_type);
- goto done;
- }
- if (reply->data.node_info.id == 0) {
- fprintf(stderr,
- "error: Controller reply did not contain node ID\n");
- exit_code = CRM_EX_PROTOCOL;
- goto done;
- }
- printf("%d\n", reply->data.node_info.id);
- break;
-
- case 'n':
- case 'N':
- if (reply->reply_type != pcmk_controld_reply_info) {
- fprintf(stderr,
- "error: Unknown reply type %d from controller\n",
- reply->reply_type);
- goto done;
- }
- if (reply->data.node_info.uname == NULL) {
- fprintf(stderr, "Node is not known to cluster\n");
- exit_code = CRM_EX_NOHOST;
- goto done;
- }
- printf("%s\n", reply->data.node_info.uname);
- break;
-
- case 'q':
- if (reply->reply_type != pcmk_controld_reply_info) {
- fprintf(stderr,
- "error: Unknown reply type %d from controller\n",
- reply->reply_type);
- goto done;
- }
- printf("%d\n", reply->data.node_info.have_quorum);
- if (!(reply->data.node_info.have_quorum)) {
- exit_code = CRM_EX_QUORUM;
- goto done;
- }
- break;
+ if (reply->reply_type != pcmk_controld_reply_nodes) {
+ g_set_error(&error, PCMK__EXITC_ERROR, CRM_EX_INDETERMINATE,
+ "Unknown reply type %d from controller",
+ reply->reply_type);
+ goto done;
+ }
- case 'l':
- case 'p':
- if (reply->reply_type != pcmk_controld_reply_nodes) {
- fprintf(stderr,
- "error: Unknown reply type %d from controller\n",
- reply->reply_type);
- goto done;
- }
- reply->data.nodes = g_list_sort(reply->data.nodes, sort_node);
- for (GList *node_iter = reply->data.nodes;
- node_iter != NULL; node_iter = node_iter->next) {
-
- pcmk_controld_api_node_t *node = node_iter->data;
- const char *uname = (node->uname? node->uname : "");
- const char *state = (node->state? node->state : "");
-
- if (options.command == 'l') {
- printf("%lu %s %s\n",
- (unsigned long) node->id, uname, state);
-
- // i.e. CRM_NODE_MEMBER, but we don't want to include cluster.h
- } else if (!strcmp(state, "member")) {
- printf("%s ", uname);
- }
- }
- if (options.command == 'p') {
- printf("\n");
- }
- break;
+ reply->data.nodes = g_list_sort(reply->data.nodes, sort_node);
- default:
- fprintf(stderr, "internal error: Controller reply not expected\n");
- exit_code = CRM_EX_SOFTWARE;
- goto done;
+ if (options.command == 'p') {
+ out->message(out, "partition-list", reply->data.nodes);
+ } else if (options.command == 'l') {
+ out->message(out, "node-list", reply->data.nodes);
}
// Success
@@ -262,7 +385,7 @@ done:
}
static void
-run_controller_mainloop(uint32_t nodeid, bool list_nodes)
+run_controller_mainloop(void)
{
pcmk_ipc_api_t *controld_api = NULL;
int rc;
@@ -273,31 +396,30 @@ run_controller_mainloop(uint32_t nodeid, bool list_nodes)
// Create controller IPC object
rc = pcmk_new_ipc_api(&controld_api, pcmk_ipc_controld);
if (rc != pcmk_rc_ok) {
- fprintf(stderr, "error: Could not connect to controller: %s\n",
- pcmk_rc_str(rc));
+ g_set_error(&error, PCMK__RC_ERROR, rc,
+ "Could not connect to controller: %s",
+ pcmk_rc_str(rc));
return;
}
pcmk_register_ipc_callback(controld_api, controller_event_cb, NULL);
// Connect to controller
- rc = pcmk_connect_ipc(controld_api, pcmk_ipc_dispatch_main);
+ rc = pcmk__connect_ipc(controld_api, pcmk_ipc_dispatch_main, 5);
if (rc != pcmk_rc_ok) {
- fprintf(stderr, "error: Could not connect to controller: %s\n",
- pcmk_rc_str(rc));
exit_code = pcmk_rc2exitc(rc);
+ g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
+ "Could not connect to %s: %s",
+ pcmk_ipc_name(controld_api, true), pcmk_rc_str(rc));
return;
}
- if (list_nodes) {
- rc = pcmk_controld_api_list_nodes(controld_api);
- } else {
- rc = pcmk_controld_api_node_info(controld_api, nodeid);
- }
+ rc = pcmk_controld_api_list_nodes(controld_api);
+
if (rc != pcmk_rc_ok) {
- fprintf(stderr, "error: Could not ping controller: %s\n",
- pcmk_rc_str(rc));
pcmk_disconnect_ipc(controld_api);
exit_code = pcmk_rc2exitc(rc);
+ g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
+ "Could not ping controller: %s", pcmk_rc_str(rc));
return;
}
@@ -310,169 +432,295 @@ run_controller_mainloop(uint32_t nodeid, bool list_nodes)
}
static void
-print_node_name(void)
+print_node_id(void)
{
- // Check environment first (i.e. when called by resource agent)
- const char *name = getenv("OCF_RESKEY_" CRM_META "_" XML_LRM_ATTR_TARGET);
+ uint32_t nodeid;
+ int rc = pcmk__query_node_info(out, &nodeid, NULL, NULL, NULL, NULL, NULL,
+ false, 0);
- if (name != NULL) {
- printf("%s\n", name);
- exit_code = CRM_EX_OK;
+ if (rc != pcmk_rc_ok) {
+ /* pcmk__query_node_info already sets an error message on the output object,
+ * so there's no need to call g_set_error here. That would just create a
+ * duplicate error message in the output.
+ */
+ exit_code = pcmk_rc2exitc(rc);
return;
+ }
- } else {
- /* Otherwise ask the controller.
- * FIXME: Use pcmk__query_node_name() after conversion to formatted
- * output.
- */
- run_controller_mainloop(0, false);
+ rc = out->message(out, "node-id", nodeid);
+
+ if (rc != pcmk_rc_ok) {
+ g_set_error(&error, PCMK__RC_ERROR, rc, "Could not print node ID: %s",
+ pcmk_rc_str(rc));
}
+
+ exit_code = pcmk_rc2exitc(rc);
}
-static int
-cib_remove_node(long id, const char *name)
+static void
+print_node_name(uint32_t nodeid)
{
- int rc;
- cib_t *cib = NULL;
- xmlNode *node = NULL;
- xmlNode *node_state = NULL;
+ int rc = pcmk_rc_ok;
+ char *node_name = NULL;
+
+ if (nodeid == 0) {
+ // Check environment first (i.e. when called by resource agent)
+ const char *name = getenv("OCF_RESKEY_" CRM_META "_" XML_LRM_ATTR_TARGET);
+
+ if (name != NULL) {
+ rc = out->message(out, "node-name", 0, name);
+ goto done;
+ }
+ }
- crm_trace("Removing %s from the CIB", name);
+ // Otherwise ask the controller
- if(name == NULL && id == 0) {
- return -ENOTUNIQ;
+ /* pcmk__query_node_name already sets an error message on the output object,
+ * so there's no need to call g_set_error here. That would just create a
+ * duplicate error message in the output.
+ */
+ rc = pcmk__query_node_name(out, nodeid, &node_name, 0);
+ if (rc != pcmk_rc_ok) {
+ exit_code = pcmk_rc2exitc(rc);
+ return;
}
- node = create_xml_node(NULL, XML_CIB_TAG_NODE);
- node_state = create_xml_node(NULL, XML_CIB_TAG_STATE);
+ rc = out->message(out, "node-name", 0, node_name);
- crm_xml_add(node, XML_ATTR_UNAME, name);
- crm_xml_add(node_state, XML_ATTR_UNAME, name);
- if (id > 0) {
- crm_xml_set_id(node, "%ld", id);
- crm_xml_add(node_state, XML_ATTR_ID, ID(node));
+done:
+ if (node_name != NULL) {
+ free(node_name);
}
- cib = cib_new();
- cib->cmds->signon(cib, crm_system_name, cib_command);
+ if (rc != pcmk_rc_ok) {
+ g_set_error(&error, PCMK__RC_ERROR, rc, "Could not print node name: %s",
+ pcmk_rc_str(rc));
+ }
- rc = cib->cmds->remove(cib, XML_CIB_TAG_NODES, node, cib_sync_call);
- if (rc != pcmk_ok) {
- printf("Could not remove %s[%ld] from " XML_CIB_TAG_NODES ": %s",
- name, id, pcmk_strerror(rc));
+ exit_code = pcmk_rc2exitc(rc);
+}
+
+static void
+print_quorum(void)
+{
+ bool quorum;
+ int rc = pcmk__query_node_info(out, NULL, NULL, NULL, NULL, &quorum, NULL,
+ false, 0);
+
+ if (rc != pcmk_rc_ok) {
+ /* pcmk__query_node_info already sets an error message on the output object,
+ * so there's no need to call g_set_error here. That would just create a
+ * duplicate error message in the output.
+ */
+ exit_code = pcmk_rc2exitc(rc);
+ return;
+ }
+
+ rc = out->message(out, "quorum", quorum);
+
+ if (rc != pcmk_rc_ok) {
+ g_set_error(&error, PCMK__RC_ERROR, rc, "Could not print quorum status: %s",
+ pcmk_rc_str(rc));
+ }
+
+ exit_code = pcmk_rc2exitc(rc);
+}
+
+/*!
+ * \internal
+ * \brief Extend a transaction by removing a node from a CIB section
+ *
+ * \param[in,out] cib Active CIB connection
+ * \param[in] element CIB element containing node name and/or ID
+ * \param[in] section CIB section that \p element is in
+ * \param[in] node_name Name of node to purge (NULL to leave unspecified)
+ * \param[in] node_id Node ID of node to purge (0 to leave unspecified)
+ *
+ * \note At least one of node_name and node_id must be specified.
+ * \return Standard Pacemaker return code
+ */
+static int
+remove_from_section(cib_t *cib, const char *element, const char *section,
+ const char *node_name, long node_id)
+{
+ xmlNode *xml = NULL;
+ int rc = pcmk_rc_ok;
+
+ xml = create_xml_node(NULL, element);
+ if (xml == NULL) {
+ return pcmk_rc_error;
+ }
+ crm_xml_add(xml, XML_ATTR_UNAME, node_name);
+ if (node_id > 0) {
+ crm_xml_set_id(xml, "%ld", node_id);
+ }
+ rc = cib->cmds->remove(cib, section, xml, cib_transaction);
+ free_xml(xml);
+ return (rc >= 0)? pcmk_rc_ok : pcmk_legacy2rc(rc);
+}
+
+/*!
+ * \internal
+ * \brief Purge a node from CIB
+ *
+ * \param[in] node_name Name of node to purge (or NULL to leave unspecified)
+ * \param[in] node_id Node ID of node to purge (or 0 to leave unspecified)
+ *
+ * \note At least one of node_name and node_id must be specified.
+ * \return Standard Pacemaker return code
+ */
+static int
+purge_node_from_cib(const char *node_name, long node_id)
+{
+ int rc = pcmk_rc_ok;
+ int commit_rc = pcmk_rc_ok;
+ cib_t *cib = NULL;
+
+ // Connect to CIB and start a transaction
+ cib = cib_new();
+ if (cib == NULL) {
+ return ENOTCONN;
+ }
+ rc = cib->cmds->signon(cib, crm_system_name, cib_command);
+ if (rc == pcmk_ok) {
+ rc = cib->cmds->init_transaction(cib);
}
- rc = cib->cmds->remove(cib, XML_CIB_TAG_STATUS, node_state, cib_sync_call);
if (rc != pcmk_ok) {
- printf("Could not remove %s[%ld] from " XML_CIB_TAG_STATUS ": %s",
- name, id, pcmk_strerror(rc));
+ rc = pcmk_legacy2rc(rc);
+ cib__clean_up_connection(&cib);
+ return rc;
+ }
+
+ // Remove from configuration and status
+ rc = remove_from_section(cib, XML_CIB_TAG_NODE, XML_CIB_TAG_NODES,
+ node_name, node_id);
+ if (rc == pcmk_rc_ok) {
+ rc = remove_from_section(cib, XML_CIB_TAG_STATE, XML_CIB_TAG_STATUS,
+ node_name, node_id);
}
+ // Commit the transaction
+ commit_rc = cib->cmds->end_transaction(cib, (rc == pcmk_rc_ok),
+ cib_sync_call);
cib__clean_up_connection(&cib);
+
+ if ((rc == pcmk_rc_ok) && (commit_rc == pcmk_ok)) {
+ crm_debug("Purged node %s (%ld) from CIB",
+ pcmk__s(node_name, "by ID"), node_id);
+ }
return rc;
}
+/*!
+ * \internal
+ * \brief Purge a node from a single server's peer cache
+ *
+ * \param[in] server IPC server to send request to
+ * \param[in] node_name Name of node to purge (or NULL to leave unspecified)
+ * \param[in] node_id Node ID of node to purge (or 0 to leave unspecified)
+ *
+ * \note At least one of node_name and node_id must be specified.
+ * \return Standard Pacemaker return code
+ */
static int
-controller_remove_node(const char *node_name, long nodeid)
+purge_node_from(enum pcmk_ipc_server server, const char *node_name,
+ long node_id)
{
- pcmk_ipc_api_t *controld_api = NULL;
+ pcmk_ipc_api_t *api = NULL;
int rc;
- // Create controller IPC object
- rc = pcmk_new_ipc_api(&controld_api, pcmk_ipc_controld);
+ rc = pcmk_new_ipc_api(&api, server);
if (rc != pcmk_rc_ok) {
- fprintf(stderr, "error: Could not connect to controller: %s\n",
- pcmk_rc_str(rc));
- return ENOTCONN;
+ goto done;
}
- // Connect to controller (without main loop)
- rc = pcmk_connect_ipc(controld_api, pcmk_ipc_dispatch_sync);
+ rc = pcmk__connect_ipc(api, pcmk_ipc_dispatch_sync, 5);
if (rc != pcmk_rc_ok) {
- fprintf(stderr, "error: Could not connect to controller: %s\n",
- pcmk_rc_str(rc));
- pcmk_free_ipc_api(controld_api);
- return rc;
+ goto done;
}
- rc = pcmk_ipc_purge_node(controld_api, node_name, nodeid);
- if (rc != pcmk_rc_ok) {
- fprintf(stderr,
- "error: Could not clear node from controller's cache: %s\n",
- pcmk_rc_str(rc));
+ rc = pcmk_ipc_purge_node(api, node_name, node_id);
+done:
+ if (rc != pcmk_rc_ok) { // Debug message already logged on success
+ g_set_error(&error, PCMK__RC_ERROR, rc,
+ "Could not purge node %s from %s: %s",
+ pcmk__s(node_name, "by ID"), pcmk_ipc_name(api, true),
+ pcmk_rc_str(rc));
}
-
- pcmk_free_ipc_api(controld_api);
- return pcmk_rc_ok;
+ pcmk_free_ipc_api(api);
+ return rc;
}
+/*!
+ * \internal
+ * \brief Purge a node from the fencer's peer cache
+ *
+ * \param[in] node_name Name of node to purge (or NULL to leave unspecified)
+ * \param[in] node_id Node ID of node to purge (or 0 to leave unspecified)
+ *
+ * \note At least one of node_name and node_id must be specified.
+ * \return Standard Pacemaker return code
+ */
static int
-tools_remove_node_cache(const char *node_name, long nodeid, const char *target)
+purge_node_from_fencer(const char *node_name, long node_id)
{
- int rc = -1;
+ int rc = pcmk_rc_ok;
crm_ipc_t *conn = NULL;
xmlNode *cmd = NULL;
- conn = crm_ipc_new(target, 0);
- if (!conn) {
- return -ENOTCONN;
+ conn = crm_ipc_new("stonith-ng", 0);
+ if (conn == NULL) {
+ rc = ENOTCONN;
+ exit_code = pcmk_rc2exitc(rc);
+ g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
+ "Could not connect to fencer to purge node %s",
+ pcmk__s(node_name, "by ID"));
+ return rc;
}
- if (!crm_ipc_connect(conn)) {
- crm_perror(LOG_ERR, "Connection to %s failed", target);
+
+ rc = pcmk__connect_generic_ipc(conn);
+ if (rc != pcmk_rc_ok) {
+ exit_code = pcmk_rc2exitc(rc);
+ g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
+ "Could not connect to fencer to purge node %s: %s",
+ pcmk__s(node_name, "by ID"), pcmk_rc_str(rc));
crm_ipc_destroy(conn);
- return -ENOTCONN;
+ return rc;
}
- crm_trace("Removing %s[%ld] from the %s membership cache",
- node_name, nodeid, target);
-
- if(pcmk__str_eq(target, T_ATTRD, pcmk__str_casei)) {
- cmd = create_xml_node(NULL, __func__);
-
- crm_xml_add(cmd, F_TYPE, T_ATTRD);
- crm_xml_add(cmd, F_ORIG, crm_system_name);
-
- crm_xml_add(cmd, PCMK__XA_TASK, PCMK__ATTRD_CMD_PEER_REMOVE);
-
- pcmk__xe_add_node(cmd, node_name, nodeid);
-
- } else { // Fencer or pacemakerd
- cmd = create_request(CRM_OP_RM_NODE_CACHE, NULL, NULL, target,
- crm_system_name, NULL);
- if (nodeid > 0) {
- crm_xml_set_id(cmd, "%ld", nodeid);
- }
- crm_xml_add(cmd, XML_ATTR_UNAME, node_name);
+ cmd = create_request(CRM_OP_RM_NODE_CACHE, NULL, NULL, "stonith-ng",
+ crm_system_name, NULL);
+ if (node_id > 0) {
+ crm_xml_set_id(cmd, "%ld", node_id);
}
+ crm_xml_add(cmd, XML_ATTR_UNAME, node_name);
rc = crm_ipc_send(conn, cmd, 0, 0, NULL);
- crm_debug("%s peer cache cleanup for %s (%ld): %d",
- target, node_name, nodeid, rc);
-
- if (rc > 0) {
- // @TODO Should this be done just once after all the rest?
- rc = cib_remove_node(nodeid, node_name);
- }
-
- if (conn) {
- crm_ipc_close(conn);
- crm_ipc_destroy(conn);
+ if (rc >= 0) {
+ rc = pcmk_rc_ok;
+ crm_debug("Purged node %s (%ld) from fencer",
+ pcmk__s(node_name, "by ID"), node_id);
+ } else {
+ rc = pcmk_legacy2rc(rc);
+ fprintf(stderr, "Could not purge node %s from fencer: %s\n",
+ pcmk__s(node_name, "by ID"), pcmk_rc_str(rc));
}
free_xml(cmd);
- return rc > 0 ? 0 : rc;
+ crm_ipc_close(conn);
+ crm_ipc_destroy(conn);
+ return rc;
}
static void
remove_node(const char *target_uname)
{
- int rc;
- int d = 0;
+ int rc = pcmk_rc_ok;
long nodeid = 0;
const char *node_name = NULL;
char *endptr = NULL;
- const char *daemons[] = {
- "stonith-ng",
- T_ATTRD,
- CRM_SYSTEM_MCP,
+ const enum pcmk_ipc_server servers[] = {
+ pcmk_ipc_controld,
+ pcmk_ipc_attrd,
};
// Check whether node was specified by name or numeric ID
@@ -485,25 +733,28 @@ remove_node(const char *target_uname)
node_name = target_uname;
}
- rc = controller_remove_node(node_name, nodeid);
+ for (int i = 0; i < PCMK__NELEM(servers); ++i) {
+ rc = purge_node_from(servers[i], node_name, nodeid);
+ if (rc != pcmk_rc_ok) {
+ exit_code = pcmk_rc2exitc(rc);
+ return;
+ }
+ }
+
+ // The fencer hasn't been converted to pcmk_ipc_api_t yet
+ rc = purge_node_from_fencer(node_name, nodeid);
if (rc != pcmk_rc_ok) {
exit_code = pcmk_rc2exitc(rc);
return;
}
- for (d = 0; d < PCMK__NELEM(daemons); d++) {
- if (tools_remove_node_cache(node_name, nodeid, daemons[d])) {
- crm_err("Failed to connect to %s to remove node '%s'",
- daemons[d], target_uname);
- exit_code = CRM_EX_ERROR;
- return;
- }
- }
- exit_code = CRM_EX_OK;
+ // Lastly, purge the node from the CIB itself
+ rc = purge_node_from_cib(node_name, nodeid);
+ exit_code = pcmk_rc2exitc(rc);
}
static GOptionContext *
-build_arg_context(pcmk__common_args_t *args, GOptionGroup *group) {
+build_arg_context(pcmk__common_args_t *args, GOptionGroup **group) {
GOptionContext *context = NULL;
GOptionEntry extra_prog_entries[] = {
@@ -514,7 +765,7 @@ build_arg_context(pcmk__common_args_t *args, GOptionGroup *group) {
{ NULL }
};
- context = pcmk__build_arg_context(args, NULL, &group, NULL);
+ context = pcmk__build_arg_context(args, "text (default), xml", group, NULL);
/* Add the -q option, which cannot be part of the globally supported options
* because some tools use that flag for something else.
@@ -531,13 +782,14 @@ build_arg_context(pcmk__common_args_t *args, GOptionGroup *group) {
int
main(int argc, char **argv)
{
- GError *error = NULL;
+ int rc = pcmk_rc_ok;
GOptionGroup *output_group = NULL;
pcmk__common_args_t *args = pcmk__new_common_args(SUMMARY);
gchar **processed_args = pcmk__cmdline_preproc(argv, "NR");
- GOptionContext *context = build_arg_context(args, output_group);
+ GOptionContext *context = build_arg_context(args, &output_group);
+ pcmk__register_formats(output_group, formats);
if (!g_option_context_parse_strv(context, &processed_args, &error)) {
exit_code = CRM_EX_USAGE;
goto done;
@@ -545,49 +797,72 @@ main(int argc, char **argv)
pcmk__cli_init_logging("crm_node", args->verbosity);
+ rc = pcmk__output_new(&out, args->output_ty, args->output_dest, argv);
+ if (rc != pcmk_rc_ok) {
+ exit_code = pcmk_rc2exitc(rc);
+ g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
+ "Error creating output format %s: %s", args->output_ty,
+ pcmk_rc_str(rc));
+ goto done;
+ }
+
+ if (!pcmk__force_args(context, &error, "%s --xml-simple-list", g_get_prgname())) {
+ exit_code = CRM_EX_SOFTWARE;
+ goto done;
+ }
+
if (args->version) {
- g_strfreev(processed_args);
- pcmk__free_arg_context(context);
- /* FIXME: When crm_node is converted to use formatted output, this can go. */
- pcmk__cli_help('v');
+ out->version(out, false);
+ goto done;
}
if (options.command == 0) {
char *help = g_option_context_get_help(context, TRUE, NULL);
- fprintf(stderr, "%s", help);
+ out->err(out, "%s", help);
g_free(help);
exit_code = CRM_EX_USAGE;
goto done;
}
if (options.dangerous_cmd && options.force_flag == FALSE) {
- fprintf(stderr, "The supplied command is considered dangerous."
- " To prevent accidental destruction of the cluster,"
- " the --force flag is required in order to proceed.\n");
exit_code = CRM_EX_USAGE;
+ g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
+ "The supplied command is considered dangerous."
+ " To prevent accidental destruction of the cluster,"
+ " the --force flag is required in order to proceed.");
goto done;
}
+ pcmk__register_lib_messages(out);
+ pcmk__register_messages(out, fmt_functions);
+
switch (options.command) {
- case 'n':
- print_node_name();
+ case 'i':
+ print_node_id();
break;
- case 'R':
- remove_node(options.target_uname);
+
+ case 'n':
+ print_node_name(0);
break;
- case 'i':
+
case 'q':
+ print_quorum();
+ break;
+
case 'N':
- /* FIXME: Use pcmk__query_node_name() after conversion to formatted
- * output
- */
- run_controller_mainloop(options.nodeid, false);
+ print_node_name(options.nodeid);
break;
+
+ case 'R':
+ remove_node(options.target_uname);
+ break;
+
case 'l':
case 'p':
- run_controller_mainloop(0, true);
+ run_controller_mainloop();
break;
+
default:
break;
}
@@ -596,6 +871,12 @@ done:
g_strfreev(processed_args);
pcmk__free_arg_context(context);
- pcmk__output_and_clear_error(&error, NULL);
+ pcmk__output_and_clear_error(&error, out);
+
+ if (out != NULL) {
+ out->finish(out, exit_code, true, NULL);
+ pcmk__output_free(out);
+ }
+ pcmk__unregister_formats();
return crm_exit(exit_code);
}
diff --git a/tools/crm_resource.c b/tools/crm_resource.c
index f351c26..7c4a0a1 100644
--- a/tools/crm_resource.c
+++ b/tools/crm_resource.c
@@ -76,7 +76,7 @@ struct {
gboolean require_cib; // Whether command requires CIB IPC
int cib_options; // Options to use with CIB IPC calls
gboolean require_crmd; // Whether command requires controller IPC
- gboolean require_dataset; // Whether command requires populated data set
+ gboolean require_scheduler; // Whether command requires scheduler data
gboolean require_resource; // Whether command requires resource specified
gboolean require_node; // Whether command requires node specified
int find_flags; // Flags to use when searching for resource
@@ -117,7 +117,7 @@ struct {
.check_level = -1,
.cib_options = cib_sync_call,
.require_cib = TRUE,
- .require_dataset = TRUE,
+ .require_scheduler = TRUE,
.require_resource = TRUE,
};
@@ -183,7 +183,7 @@ static GError *error = NULL;
static GMainLoop *mainloop = NULL;
static cib_t *cib_conn = NULL;
static pcmk_ipc_api_t *controld_api = NULL;
-static pe_working_set_t *data_set = NULL;
+static pcmk_scheduler_t *scheduler = NULL;
#define MESSAGE_TIMEOUT_S 60
@@ -227,8 +227,8 @@ bye(crm_exit_t ec)
mainloop = NULL;
}
- pe_free_working_set(data_set);
- data_set = NULL;
+ pe_free_working_set(scheduler);
+ scheduler = NULL;
crm_exit(ec);
return ec;
}
@@ -650,7 +650,7 @@ reset_options(void) {
options.require_node = FALSE;
options.require_cib = TRUE;
- options.require_dataset = TRUE;
+ options.require_scheduler = TRUE;
options.require_resource = TRUE;
options.find_flags = 0;
@@ -702,15 +702,15 @@ cleanup_refresh_cb(const gchar *option_name, const gchar *optarg, gpointer data,
if (getenv("CIB_file") == NULL) {
options.require_crmd = TRUE;
}
- options.find_flags = pe_find_renamed|pe_find_anon;
+ options.find_flags = pcmk_rsc_match_history|pcmk_rsc_match_anon_basename;
return TRUE;
}
gboolean
delete_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
SET_COMMAND(cmd_delete);
- options.require_dataset = FALSE;
- options.find_flags = pe_find_renamed|pe_find_any;
+ options.require_scheduler = FALSE;
+ options.find_flags = pcmk_rsc_match_history|pcmk_rsc_match_basename;
return TRUE;
}
@@ -725,7 +725,7 @@ static void
get_agent_spec(const gchar *optarg)
{
options.require_cib = FALSE;
- options.require_dataset = FALSE;
+ options.require_scheduler = FALSE;
options.require_resource = FALSE;
pcmk__str_update(&options.agent_spec, optarg);
}
@@ -754,7 +754,7 @@ list_standards_cb(const gchar *option_name, const gchar *optarg, gpointer data,
{
SET_COMMAND(cmd_list_standards);
options.require_cib = FALSE;
- options.require_dataset = FALSE;
+ options.require_scheduler = FALSE;
options.require_resource = FALSE;
return TRUE;
}
@@ -806,30 +806,36 @@ gboolean
flag_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
if (pcmk__str_any_of(option_name, "-U", "--clear", NULL)) {
SET_COMMAND(cmd_clear);
- options.find_flags = pe_find_renamed|pe_find_anon;
+ options.find_flags = pcmk_rsc_match_history
+ |pcmk_rsc_match_anon_basename;
} else if (pcmk__str_any_of(option_name, "-B", "--ban", NULL)) {
SET_COMMAND(cmd_ban);
- options.find_flags = pe_find_renamed|pe_find_anon;
+ options.find_flags = pcmk_rsc_match_history
+ |pcmk_rsc_match_anon_basename;
} else if (pcmk__str_any_of(option_name, "-M", "--move", NULL)) {
SET_COMMAND(cmd_move);
- options.find_flags = pe_find_renamed|pe_find_anon;
+ options.find_flags = pcmk_rsc_match_history
+ |pcmk_rsc_match_anon_basename;
} else if (pcmk__str_any_of(option_name, "-q", "--query-xml", NULL)) {
SET_COMMAND(cmd_query_xml);
- options.find_flags = pe_find_renamed|pe_find_any;
+ options.find_flags = pcmk_rsc_match_history|pcmk_rsc_match_basename;
} else if (pcmk__str_any_of(option_name, "-w", "--query-xml-raw", NULL)) {
SET_COMMAND(cmd_query_raw_xml);
- options.find_flags = pe_find_renamed|pe_find_any;
+ options.find_flags = pcmk_rsc_match_history|pcmk_rsc_match_basename;
} else if (pcmk__str_any_of(option_name, "-W", "--locate", NULL)) {
SET_COMMAND(cmd_locate);
- options.find_flags = pe_find_renamed|pe_find_anon;
+ options.find_flags = pcmk_rsc_match_history
+ |pcmk_rsc_match_anon_basename;
} else if (pcmk__str_any_of(option_name, "-a", "--constraints", NULL)) {
SET_COMMAND(cmd_colocations);
- options.find_flags = pe_find_renamed|pe_find_anon;
+ options.find_flags = pcmk_rsc_match_history
+ |pcmk_rsc_match_anon_basename;
} else if (pcmk__str_any_of(option_name, "-A", "--stack", NULL)) {
SET_COMMAND(cmd_colocations);
- options.find_flags = pe_find_renamed|pe_find_anon;
+ options.find_flags = pcmk_rsc_match_history
+ |pcmk_rsc_match_anon_basename;
options.recursive = TRUE;
}
@@ -845,7 +851,7 @@ get_param_prop_cb(const gchar *option_name, const gchar *optarg, gpointer data,
}
pcmk__str_update(&options.prop_name, optarg);
- options.find_flags = pe_find_renamed|pe_find_any;
+ options.find_flags = pcmk_rsc_match_history|pcmk_rsc_match_basename;
return TRUE;
}
@@ -876,16 +882,16 @@ set_delete_param_cb(const gchar *option_name, const gchar *optarg, gpointer data
}
pcmk__str_update(&options.prop_name, optarg);
- options.find_flags = pe_find_renamed|pe_find_any;
+ options.find_flags = pcmk_rsc_match_history|pcmk_rsc_match_basename;
return TRUE;
}
gboolean
set_prop_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
SET_COMMAND(cmd_set_property);
- options.require_dataset = FALSE;
+ options.require_scheduler = FALSE;
pcmk__str_update(&options.prop_name, optarg);
- options.find_flags = pe_find_renamed|pe_find_any;
+ options.find_flags = pcmk_rsc_match_history|pcmk_rsc_match_basename;
return TRUE;
}
@@ -904,7 +910,7 @@ validate_or_force_cb(const gchar *option_name, const gchar *optarg,
g_free(options.operation);
}
options.operation = g_strdup(option_name + 2); // skip "--"
- options.find_flags = pe_find_renamed|pe_find_anon;
+ options.find_flags = pcmk_rsc_match_history|pcmk_rsc_match_anon_basename;
if (options.override_params == NULL) {
options.override_params = pcmk__strkey_table(free, free);
}
@@ -925,7 +931,7 @@ restart_cb(const gchar *option_name, const gchar *optarg, gpointer data,
GError **error)
{
SET_COMMAND(cmd_restart);
- options.find_flags = pe_find_renamed|pe_find_anon;
+ options.find_flags = pcmk_rsc_match_history|pcmk_rsc_match_anon_basename;
return TRUE;
}
@@ -934,12 +940,12 @@ digests_cb(const gchar *option_name, const gchar *optarg, gpointer data,
GError **error)
{
SET_COMMAND(cmd_digests);
- options.find_flags = pe_find_renamed|pe_find_anon;
+ options.find_flags = pcmk_rsc_match_history|pcmk_rsc_match_anon_basename;
if (options.override_params == NULL) {
options.override_params = pcmk__strkey_table(free, free);
}
options.require_node = TRUE;
- options.require_dataset = TRUE;
+ options.require_scheduler = TRUE;
return TRUE;
}
@@ -947,7 +953,7 @@ gboolean
wait_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
SET_COMMAND(cmd_wait);
options.require_resource = FALSE;
- options.require_dataset = FALSE;
+ options.require_scheduler = FALSE;
return TRUE;
}
@@ -955,15 +961,16 @@ gboolean
why_cb(const gchar *option_name, const gchar *optarg, gpointer data, GError **error) {
SET_COMMAND(cmd_why);
options.require_resource = FALSE;
- options.find_flags = pe_find_renamed|pe_find_anon;
+ options.find_flags = pcmk_rsc_match_history|pcmk_rsc_match_anon_basename;
return TRUE;
}
static int
-ban_or_move(pcmk__output_t *out, pe_resource_t *rsc, const char *move_lifetime)
+ban_or_move(pcmk__output_t *out, pcmk_resource_t *rsc,
+ const char *move_lifetime)
{
int rc = pcmk_rc_ok;
- pe_node_t *current = NULL;
+ pcmk_node_t *current = NULL;
unsigned int nactive = 0;
CRM_CHECK(rsc != NULL, return EINVAL);
@@ -971,27 +978,29 @@ ban_or_move(pcmk__output_t *out, pe_resource_t *rsc, const char *move_lifetime)
current = pe__find_active_requires(rsc, &nactive);
if (nactive == 1) {
- rc = cli_resource_ban(out, options.rsc_id, current->details->uname, move_lifetime, NULL,
- cib_conn, options.cib_options, options.promoted_role_only);
+ rc = cli_resource_ban(out, options.rsc_id, current->details->uname, move_lifetime,
+ cib_conn, options.cib_options, options.promoted_role_only,
+ PCMK__ROLE_PROMOTED);
- } else if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
+ } else if (pcmk_is_set(rsc->flags, pcmk_rsc_promotable)) {
int count = 0;
GList *iter = NULL;
current = NULL;
for(iter = rsc->children; iter; iter = iter->next) {
- pe_resource_t *child = (pe_resource_t *)iter->data;
+ pcmk_resource_t *child = (pcmk_resource_t *)iter->data;
enum rsc_role_e child_role = child->fns->state(child, TRUE);
- if (child_role == RSC_ROLE_PROMOTED) {
+ if (child_role == pcmk_role_promoted) {
count++;
current = pe__current_node(child);
}
}
if(count == 1 && current) {
- rc = cli_resource_ban(out, options.rsc_id, current->details->uname, move_lifetime, NULL,
- cib_conn, options.cib_options, options.promoted_role_only);
+ rc = cli_resource_ban(out, options.rsc_id, current->details->uname, move_lifetime,
+ cib_conn, options.cib_options, options.promoted_role_only,
+ PCMK__ROLE_PROMOTED);
} else {
rc = EINVAL;
@@ -1017,7 +1026,7 @@ ban_or_move(pcmk__output_t *out, pe_resource_t *rsc, const char *move_lifetime)
}
static void
-cleanup(pcmk__output_t *out, pe_resource_t *rsc, pe_node_t *node)
+cleanup(pcmk__output_t *out, pcmk_resource_t *rsc, pcmk_node_t *node)
{
int rc = pcmk_rc_ok;
@@ -1027,8 +1036,9 @@ cleanup(pcmk__output_t *out, pe_resource_t *rsc, pe_node_t *node)
crm_debug("Erasing failures of %s (%s requested) on %s",
rsc->id, options.rsc_id, (options.host_uname? options.host_uname: "all nodes"));
- rc = cli_resource_delete(controld_api, options.host_uname, rsc, options.operation,
- options.interval_spec, TRUE, data_set, options.force);
+ rc = cli_resource_delete(controld_api, options.host_uname, rsc,
+ options.operation, options.interval_spec, TRUE,
+ scheduler, options.force);
if ((rc == pcmk_rc_ok) && !out->is_quiet(out)) {
// Show any reasons why resource might stay stopped
@@ -1047,20 +1057,21 @@ clear_constraints(pcmk__output_t *out, xmlNodePtr *cib_xml_copy)
GList *after = NULL;
GList *remaining = NULL;
GList *ele = NULL;
- pe_node_t *dest = NULL;
+ pcmk_node_t *dest = NULL;
int rc = pcmk_rc_ok;
if (!out->is_quiet(out)) {
- before = build_constraint_list(data_set->input);
+ before = build_constraint_list(scheduler->input);
}
if (options.clear_expired) {
- rc = cli_resource_clear_all_expired(data_set->input, cib_conn, options.cib_options,
- options.rsc_id, options.host_uname,
+ rc = cli_resource_clear_all_expired(scheduler->input, cib_conn,
+ options.cib_options, options.rsc_id,
+ options.host_uname,
options.promoted_role_only);
} else if (options.host_uname) {
- dest = pe_find_node(data_set->nodes, options.host_uname);
+ dest = pe_find_node(scheduler->nodes, options.host_uname);
if (dest == NULL) {
rc = pcmk_rc_node_unknown;
if (!out->is_quiet(out)) {
@@ -1072,7 +1083,7 @@ clear_constraints(pcmk__output_t *out, xmlNodePtr *cib_xml_copy)
cib_conn, options.cib_options, TRUE, options.force);
} else {
- rc = cli_resource_clear(options.rsc_id, NULL, data_set->nodes,
+ rc = cli_resource_clear(options.rsc_id, NULL, scheduler->nodes,
cib_conn, options.cib_options, TRUE, options.force);
}
@@ -1082,17 +1093,17 @@ clear_constraints(pcmk__output_t *out, xmlNodePtr *cib_xml_copy)
if (rc != pcmk_rc_ok) {
g_set_error(&error, PCMK__RC_ERROR, rc,
- _("Could not get modified CIB: %s\n"), pcmk_strerror(rc));
+ _("Could not get modified CIB: %s\n"), pcmk_rc_str(rc));
g_list_free(before);
free_xml(*cib_xml_copy);
*cib_xml_copy = NULL;
return rc;
}
- data_set->input = *cib_xml_copy;
- cluster_status(data_set);
+ scheduler->input = *cib_xml_copy;
+ cluster_status(scheduler);
- after = build_constraint_list(data_set->input);
+ after = build_constraint_list(scheduler->input);
remaining = pcmk__subtract_lists(before, after, (GCompareFunc) strcmp);
for (ele = remaining; ele != NULL; ele = ele->next) {
@@ -1131,119 +1142,7 @@ delete(void)
}
static int
-list_agents(pcmk__output_t *out, const char *agent_spec)
-{
- int rc = pcmk_rc_ok;
- char *provider = strchr(agent_spec, ':');
- lrmd_t *lrmd_conn = NULL;
- lrmd_list_t *list = NULL;
-
- rc = lrmd__new(&lrmd_conn, NULL, NULL, 0);
- if (rc != pcmk_rc_ok) {
- goto error;
- }
-
- if (provider) {
- *provider++ = 0;
- }
-
- rc = lrmd_conn->cmds->list_agents(lrmd_conn, &list, agent_spec, provider);
-
- if (rc > 0) {
- rc = out->message(out, "agents-list", list, agent_spec, provider);
- } else {
- rc = pcmk_rc_error;
- }
-
-error:
- if (rc != pcmk_rc_ok) {
- if (provider == NULL) {
- g_set_error(&error, PCMK__RC_ERROR, rc,
- _("No agents found for standard '%s'"), agent_spec);
- } else {
- g_set_error(&error, PCMK__RC_ERROR, rc,
- _("No agents found for standard '%s' and provider '%s'"),
- agent_spec, provider);
- }
- }
-
- lrmd_api_delete(lrmd_conn);
- return rc;
-}
-
-static int
-list_providers(pcmk__output_t *out, const char *agent_spec)
-{
- int rc;
- const char *text = NULL;
- lrmd_t *lrmd_conn = NULL;
- lrmd_list_t *list = NULL;
-
- rc = lrmd__new(&lrmd_conn, NULL, NULL, 0);
- if (rc != pcmk_rc_ok) {
- goto error;
- }
-
- switch (options.rsc_cmd) {
- case cmd_list_alternatives:
- rc = lrmd_conn->cmds->list_ocf_providers(lrmd_conn, agent_spec, &list);
-
- if (rc > 0) {
- rc = out->message(out, "alternatives-list", list, agent_spec);
- } else {
- rc = pcmk_rc_error;
- }
-
- text = "OCF providers";
- break;
- case cmd_list_standards:
- rc = lrmd_conn->cmds->list_standards(lrmd_conn, &list);
-
- if (rc > 0) {
- rc = out->message(out, "standards-list", list);
- } else {
- rc = pcmk_rc_error;
- }
-
- text = "standards";
- break;
- case cmd_list_providers:
- rc = lrmd_conn->cmds->list_ocf_providers(lrmd_conn, agent_spec, &list);
-
- if (rc > 0) {
- rc = out->message(out, "providers-list", list, agent_spec);
- } else {
- rc = pcmk_rc_error;
- }
-
- text = "OCF providers";
- break;
- default:
- g_set_error(&error, PCMK__RC_ERROR, pcmk_rc_error, "Bug");
- lrmd_api_delete(lrmd_conn);
- return pcmk_rc_error;
- }
-
-error:
- if (rc != pcmk_rc_ok) {
- if (agent_spec != NULL) {
- rc = ENXIO;
- g_set_error(&error, PCMK__RC_ERROR, rc,
- _("No %s found for %s"), text, agent_spec);
-
- } else {
- rc = ENXIO;
- g_set_error(&error, PCMK__RC_ERROR, rc,
- _("No %s found"), text);
- }
- }
-
- lrmd_api_delete(lrmd_conn);
- return rc;
-}
-
-static int
-populate_working_set(xmlNodePtr *cib_xml_copy)
+initialize_scheduler_data(xmlNodePtr *cib_xml_copy)
{
int rc = pcmk_rc_ok;
@@ -1258,14 +1157,15 @@ populate_working_set(xmlNodePtr *cib_xml_copy)
}
if (rc == pcmk_rc_ok) {
- data_set = pe_new_working_set();
- if (data_set == NULL) {
+ scheduler = pe_new_working_set();
+ if (scheduler == NULL) {
rc = ENOMEM;
} else {
- pe__set_working_set_flags(data_set,
- pe_flag_no_counts|pe_flag_no_compat);
- data_set->priv = out;
- rc = update_working_set_xml(data_set, cib_xml_copy);
+ pe__set_working_set_flags(scheduler,
+ pcmk_sched_no_counts
+ |pcmk_sched_no_compat);
+ scheduler->priv = out;
+ rc = update_scheduler_input(scheduler, cib_xml_copy);
}
}
@@ -1275,7 +1175,7 @@ populate_working_set(xmlNodePtr *cib_xml_copy)
return rc;
}
- cluster_status(data_set);
+ cluster_status(scheduler);
return pcmk_rc_ok;
}
@@ -1287,7 +1187,7 @@ refresh(pcmk__output_t *out)
int attr_options = pcmk__node_attr_none;
if (options.host_uname) {
- pe_node_t *node = pe_find_node(data_set->nodes, options.host_uname);
+ pcmk_node_t *node = pe_find_node(scheduler->nodes, options.host_uname);
if (pe__is_guest_or_remote_node(node)) {
node = pe__current_node(node->details->remote_rsc);
@@ -1324,7 +1224,7 @@ refresh(pcmk__output_t *out)
}
static void
-refresh_resource(pcmk__output_t *out, pe_resource_t *rsc, pe_node_t *node)
+refresh_resource(pcmk__output_t *out, pcmk_resource_t *rsc, pcmk_node_t *node)
{
int rc = pcmk_rc_ok;
@@ -1335,7 +1235,7 @@ refresh_resource(pcmk__output_t *out, pe_resource_t *rsc, pe_node_t *node)
crm_debug("Re-checking the state of %s (%s requested) on %s",
rsc->id, options.rsc_id, (options.host_uname? options.host_uname: "all nodes"));
rc = cli_resource_delete(controld_api, options.host_uname, rsc, NULL, 0,
- FALSE, data_set, options.force);
+ FALSE, scheduler, options.force);
if ((rc == pcmk_rc_ok) && !out->is_quiet(out)) {
// Show any reasons why resource might stay stopped
@@ -1474,7 +1374,7 @@ validate_cmdline_config(void)
options.cmdline_params = pcmk__strkey_table(free, free);
}
options.require_resource = FALSE;
- options.require_dataset = FALSE;
+ options.require_scheduler = FALSE;
options.require_cib = FALSE;
}
@@ -1547,8 +1447,8 @@ int
main(int argc, char **argv)
{
xmlNode *cib_xml_copy = NULL;
- pe_resource_t *rsc = NULL;
- pe_node_t *node = NULL;
+ pcmk_resource_t *rsc = NULL;
+ pcmk_node_t *node = NULL;
int rc = pcmk_rc_ok;
GOptionGroup *output_group = NULL;
@@ -1730,7 +1630,7 @@ main(int argc, char **argv)
*/
if (options.find_flags && options.rsc_id) {
- options.require_dataset = TRUE;
+ options.require_scheduler = TRUE;
}
// Establish a connection to the CIB if needed
@@ -1752,9 +1652,9 @@ main(int argc, char **argv)
}
}
- /* Populate working set from XML file if specified or CIB query otherwise */
- if (options.require_dataset) {
- rc = populate_working_set(&cib_xml_copy);
+ // Populate scheduler data from XML file if specified or CIB query otherwise
+ if (options.require_scheduler) {
+ rc = initialize_scheduler_data(&cib_xml_copy);
if (rc != pcmk_rc_ok) {
exit_code = pcmk_rc2exitc(rc);
goto done;
@@ -1763,7 +1663,7 @@ main(int argc, char **argv)
// If command requires that resource exist if specified, find it
if (options.find_flags && options.rsc_id) {
- rsc = pe_find_resource_with_flags(data_set->resources, options.rsc_id,
+ rsc = pe_find_resource_with_flags(scheduler->resources, options.rsc_id,
options.find_flags);
if (rsc == NULL) {
exit_code = CRM_EX_NOSUCH;
@@ -1786,8 +1686,8 @@ main(int argc, char **argv)
}
// If user supplied a node name, check whether it exists
- if ((options.host_uname != NULL) && (data_set != NULL)) {
- node = pe_find_node(data_set->nodes, options.host_uname);
+ if ((options.host_uname != NULL) && (scheduler != NULL)) {
+ node = pe_find_node(scheduler->nodes, options.host_uname);
if (node == NULL) {
exit_code = CRM_EX_NOSUCH;
@@ -1808,11 +1708,12 @@ main(int argc, char **argv)
}
pcmk_register_ipc_callback(controld_api, controller_event_callback,
NULL);
- rc = pcmk_connect_ipc(controld_api, pcmk_ipc_dispatch_main);
+ rc = pcmk__connect_ipc(controld_api, pcmk_ipc_dispatch_main, 5);
if (rc != pcmk_rc_ok) {
exit_code = pcmk_rc2exitc(rc);
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
- _("Error connecting to the controller: %s"), pcmk_rc_str(rc));
+ _("Error connecting to %s: %s"),
+ pcmk_ipc_name(controld_api, true), pcmk_rc_str(rc));
goto done;
}
}
@@ -1825,7 +1726,7 @@ main(int argc, char **argv)
case cmd_list_resources: {
GList *all = NULL;
all = g_list_prepend(all, (gpointer) "*");
- rc = out->message(out, "resource-list", data_set,
+ rc = out->message(out, "resource-list", scheduler,
pcmk_show_inactive_rscs | pcmk_show_rsc_only | pcmk_show_pending,
true, all, all, false);
g_list_free(all);
@@ -1837,7 +1738,7 @@ main(int argc, char **argv)
}
case cmd_list_instances:
- rc = out->message(out, "resource-names-list", data_set->resources);
+ rc = out->message(out, "resource-names-list", scheduler->resources);
if (rc != pcmk_rc_ok) {
rc = ENXIO;
@@ -1845,14 +1746,20 @@ main(int argc, char **argv)
break;
- case cmd_list_standards:
- case cmd_list_providers:
case cmd_list_alternatives:
- rc = list_providers(out, options.agent_spec);
+ rc = pcmk__list_alternatives(out, options.agent_spec);
break;
case cmd_list_agents:
- rc = list_agents(out, options.agent_spec);
+ rc = pcmk__list_agents(out, options.agent_spec);
+ break;
+
+ case cmd_list_standards:
+ rc = pcmk__list_standards(out);
+ break;
+
+ case cmd_list_providers:
+ rc = pcmk__list_providers(out, options.agent_spec);
break;
case cmd_metadata:
@@ -1860,10 +1767,10 @@ main(int argc, char **argv)
break;
case cmd_restart:
- /* We don't pass data_set because rsc needs to stay valid for the
+ /* We don't pass scheduler because rsc needs to stay valid for the
* entire lifetime of cli_resource_restart(), but it will reset and
- * update the working set multiple times, so it needs to use its own
- * copy.
+ * update the scheduler data multiple times, so it needs to use its
+ * own copy.
*/
rc = cli_resource_restart(out, rsc, node, options.move_lifetime,
options.timeout_ms, cib_conn,
@@ -1885,13 +1792,13 @@ main(int argc, char **argv)
} else {
exit_code = cli_resource_execute(rsc, options.rsc_id,
options.operation, options.override_params,
- options.timeout_ms, cib_conn, data_set,
+ options.timeout_ms, cib_conn, scheduler,
args->verbosity, options.force, options.check_level);
}
goto done;
case cmd_digests:
- node = pe_find_node(data_set->nodes, options.host_uname);
+ node = pe_find_node(scheduler->nodes, options.host_uname);
if (node == NULL) {
rc = pcmk_rc_node_unknown;
} else {
@@ -1901,19 +1808,20 @@ main(int argc, char **argv)
break;
case cmd_colocations:
- rc = out->message(out, "locations-and-colocations", rsc, data_set,
+ rc = out->message(out, "locations-and-colocations", rsc,
options.recursive, (bool) options.force);
break;
case cmd_cts:
rc = pcmk_rc_ok;
- g_list_foreach(data_set->resources, (GFunc) cli_resource_print_cts, out);
- cli_resource_print_cts_constraints(data_set);
+ g_list_foreach(scheduler->resources, (GFunc) cli_resource_print_cts,
+ out);
+ cli_resource_print_cts_constraints(scheduler);
break;
case cmd_fail:
rc = cli_resource_fail(controld_api, options.host_uname,
- options.rsc_id, data_set);
+ options.rsc_id, scheduler);
if (rc == pcmk_rc_ok) {
start_mainloop(controld_api);
}
@@ -1922,28 +1830,28 @@ main(int argc, char **argv)
case cmd_list_active_ops:
rc = cli_resource_print_operations(options.rsc_id,
options.host_uname, TRUE,
- data_set);
+ scheduler);
break;
case cmd_list_all_ops:
rc = cli_resource_print_operations(options.rsc_id,
options.host_uname, FALSE,
- data_set);
+ scheduler);
break;
case cmd_locate: {
- GList *nodes = cli_resource_search(rsc, options.rsc_id, data_set);
+ GList *nodes = cli_resource_search(rsc, options.rsc_id, scheduler);
rc = out->message(out, "resource-search-list", nodes, options.rsc_id);
g_list_free_full(nodes, free);
break;
}
case cmd_query_xml:
- rc = cli_resource_print(rsc, data_set, true);
+ rc = cli_resource_print(rsc, scheduler, true);
break;
case cmd_query_raw_xml:
- rc = cli_resource_print(rsc, data_set, false);
+ rc = cli_resource_print(rsc, scheduler, false);
break;
case cmd_why:
@@ -1951,7 +1859,7 @@ main(int argc, char **argv)
rc = pcmk_rc_node_unknown;
} else {
rc = out->message(out, "resource-reasons-list",
- data_set->resources, rsc, node);
+ scheduler->resources, rsc, node);
}
break;
@@ -1965,7 +1873,7 @@ main(int argc, char **argv)
} else {
rc = cli_resource_move(rsc, options.rsc_id, options.host_uname,
options.move_lifetime, cib_conn,
- options.cib_options, data_set,
+ options.cib_options, scheduler,
options.promoted_role_only,
options.force);
}
@@ -1984,9 +1892,10 @@ main(int argc, char **argv)
rc = pcmk_rc_node_unknown;
} else {
rc = cli_resource_ban(out, options.rsc_id, node->details->uname,
- options.move_lifetime, NULL, cib_conn,
+ options.move_lifetime, cib_conn,
options.cib_options,
- options.promoted_role_only);
+ options.promoted_role_only,
+ PCMK__ROLE_PROMOTED);
}
if (rc == EINVAL) {
@@ -2011,7 +1920,7 @@ main(int argc, char **argv)
case cmd_get_param: {
unsigned int count = 0;
GHashTable *params = NULL;
- pe_node_t *current = rsc->fns->active_node(rsc, &count, NULL);
+ pcmk_node_t *current = rsc->fns->active_node(rsc, &count, NULL);
bool free_params = true;
const char* value = NULL;
@@ -2025,14 +1934,14 @@ main(int argc, char **argv)
crm_debug("Looking up %s in %s", options.prop_name, rsc->id);
if (pcmk__str_eq(options.attr_set_type, XML_TAG_ATTR_SETS, pcmk__str_none)) {
- params = pe_rsc_params(rsc, current, data_set);
+ params = pe_rsc_params(rsc, current, scheduler);
free_params = false;
value = g_hash_table_lookup(params, options.prop_name);
} else if (pcmk__str_eq(options.attr_set_type, XML_TAG_META_SETS, pcmk__str_none)) {
params = pcmk__strkey_table(free, free);
- get_meta_attributes(params, rsc, current, data_set);
+ get_meta_attributes(params, rsc, current, scheduler);
value = g_hash_table_lookup(params, options.prop_name);
@@ -2044,7 +1953,7 @@ main(int argc, char **argv)
} else {
params = pcmk__strkey_table(free, free);
pe__unpack_dataset_nvpairs(rsc->xml, XML_TAG_UTILIZATION, NULL, params,
- NULL, FALSE, data_set);
+ NULL, FALSE, scheduler);
value = g_hash_table_lookup(params, options.prop_name);
}
@@ -2092,7 +2001,7 @@ main(int argc, char **argv)
if (rsc == NULL) {
rc = cli_cleanup_all(controld_api, options.host_uname,
options.operation, options.interval_spec,
- data_set);
+ scheduler);
if (rc == pcmk_rc_ok) {
start_mainloop(controld_api);
}
diff --git a/tools/crm_resource.h b/tools/crm_resource.h
index dcd6c3d..dc86572 100644
--- a/tools/crm_resource.h
+++ b/tools/crm_resource.h
@@ -19,6 +19,7 @@
#include <crm/common/xml.h>
#include <crm/common/mainloop.h>
#include <crm/common/output_internal.h>
+#include <crm/common/scheduler_internal.h>
#include <crm/cib.h>
#include <crm/common/attrd_internal.h>
@@ -43,54 +44,56 @@ enum resource_check_flags {
};
typedef struct resource_checks_s {
- pe_resource_t *rsc; // Resource being checked
+ pcmk_resource_t *rsc; // Resource being checked
uint32_t flags; // Group of enum resource_check_flags
const char *lock_node; // Node that resource is shutdown-locked to, if any
} resource_checks_t;
-resource_checks_t *cli_check_resource(pe_resource_t *rsc, char *role_s, char *managed);
+resource_checks_t *cli_check_resource(pcmk_resource_t *rsc, char *role_s,
+ char *managed);
/* ban */
int cli_resource_prefer(pcmk__output_t *out, const char *rsc_id, const char *host,
const char *move_lifetime, cib_t * cib_conn, int cib_options,
- gboolean promoted_role_only);
+ gboolean promoted_role_only, const char *promoted_role);
int cli_resource_ban(pcmk__output_t *out, const char *rsc_id, const char *host,
- const char *move_lifetime, GList *allnodes, cib_t * cib_conn,
- int cib_options, gboolean promoted_role_only);
+ const char *move_lifetime, cib_t *cib_conn, int cib_options,
+ gboolean promoted_role_only, const char *promoted_role);
int cli_resource_clear(const char *rsc_id, const char *host, GList *allnodes,
cib_t * cib_conn, int cib_options, bool clear_ban_constraints, gboolean force);
int cli_resource_clear_all_expired(xmlNode *root, cib_t *cib_conn, int cib_options,
const char *rsc, const char *node, gboolean promoted_role_only);
/* print */
-void cli_resource_print_cts(pe_resource_t * rsc, pcmk__output_t *out);
-void cli_resource_print_cts_constraints(pe_working_set_t * data_set);
+void cli_resource_print_cts(pcmk_resource_t *rsc, pcmk__output_t *out);
+void cli_resource_print_cts_constraints(pcmk_scheduler_t *scheduler);
-int cli_resource_print(pe_resource_t *rsc, pe_working_set_t *data_set, bool expanded);
+int cli_resource_print(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler,
+ bool expanded);
int cli_resource_print_operations(const char *rsc_id, const char *host_uname,
- bool active, pe_working_set_t * data_set);
+ bool active, pcmk_scheduler_t *scheduler);
/* runtime */
-int cli_resource_check(pcmk__output_t *out, pe_resource_t *rsc,
- pe_node_t *node);
+int cli_resource_check(pcmk__output_t *out, pcmk_resource_t *rsc,
+ pcmk_node_t *node);
int cli_resource_fail(pcmk_ipc_api_t *controld_api, const char *host_uname,
- const char *rsc_id, pe_working_set_t *data_set);
-GList *cli_resource_search(pe_resource_t *rsc, const char *requested_name,
- pe_working_set_t *data_set);
+ const char *rsc_id, pcmk_scheduler_t *scheduler);
+GList *cli_resource_search(pcmk_resource_t *rsc, const char *requested_name,
+ pcmk_scheduler_t *scheduler);
int cli_resource_delete(pcmk_ipc_api_t *controld_api, const char *host_uname,
- const pe_resource_t *rsc, const char *operation,
+ const pcmk_resource_t *rsc, const char *operation,
const char *interval_spec, bool just_failures,
- pe_working_set_t *data_set, gboolean force);
+ pcmk_scheduler_t *scheduler, gboolean force);
int cli_cleanup_all(pcmk_ipc_api_t *controld_api, const char *node_name,
const char *operation, const char *interval_spec,
- pe_working_set_t *data_set);
-int cli_resource_restart(pcmk__output_t *out, pe_resource_t *rsc,
- const pe_node_t *node, const char *move_lifetime,
+ pcmk_scheduler_t *scheduler);
+int cli_resource_restart(pcmk__output_t *out, pcmk_resource_t *rsc,
+ const pcmk_node_t *node, const char *move_lifetime,
int timeout_ms, cib_t *cib, int cib_options,
gboolean promoted_role_only, gboolean force);
-int cli_resource_move(const pe_resource_t *rsc, const char *rsc_id,
+int cli_resource_move(const pcmk_resource_t *rsc, const char *rsc_id,
const char *host_name, const char *move_lifetime,
- cib_t *cib, int cib_options, pe_working_set_t *data_set,
+ cib_t *cib, int cib_options, pcmk_scheduler_t *scheduler,
gboolean promoted_role_only, gboolean force);
crm_exit_t cli_resource_execute_from_params(pcmk__output_t *out, const char *rsc_name,
const char *rsc_class, const char *rsc_prov,
@@ -98,24 +101,28 @@ crm_exit_t cli_resource_execute_from_params(pcmk__output_t *out, const char *rsc
GHashTable *params, GHashTable *override_hash,
int timeout_ms, int resource_verbose,
gboolean force, int check_level);
-crm_exit_t cli_resource_execute(pe_resource_t *rsc, const char *requested_name,
+crm_exit_t cli_resource_execute(pcmk_resource_t *rsc,
+ const char *requested_name,
const char *rsc_action, GHashTable *override_hash,
- int timeout_ms, cib_t *cib, pe_working_set_t *data_set,
+ int timeout_ms, cib_t *cib,
+ pcmk_scheduler_t *scheduler,
int resource_verbose, gboolean force, int check_level);
-int cli_resource_update_attribute(pe_resource_t *rsc, const char *requested_name,
+int cli_resource_update_attribute(pcmk_resource_t *rsc,
+ const char *requested_name,
const char *attr_set, const char *attr_set_type,
const char *attr_id, const char *attr_name,
const char *attr_value, gboolean recursive,
cib_t *cib, int cib_options, gboolean force);
-int cli_resource_delete_attribute(pe_resource_t *rsc, const char *requested_name,
+int cli_resource_delete_attribute(pcmk_resource_t *rsc,
+ const char *requested_name,
const char *attr_set, const char *attr_set_type,
const char *attr_id, const char *attr_name,
cib_t *cib, int cib_options, gboolean force);
-int update_working_set_xml(pe_working_set_t *data_set, xmlNode **xml);
+int update_scheduler_input(pcmk_scheduler_t *scheduler, xmlNode **xml);
int wait_till_stable(pcmk__output_t *out, int timeout_ms, cib_t * cib);
-bool resource_is_running_on(pe_resource_t *rsc, const char *host);
+bool resource_is_running_on(pcmk_resource_t *rsc, const char *host);
void crm_resource_register_messages(pcmk__output_t *out);
diff --git a/tools/crm_resource_ban.c b/tools/crm_resource_ban.c
index b1edac8..3b0e4a1 100644
--- a/tools/crm_resource_ban.c
+++ b/tools/crm_resource_ban.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2004-2021 the Pacemaker project contributors
+ * Copyright 2004-2023 the Pacemaker project contributors
*
* The version control history for this file may have further details.
*
@@ -56,42 +56,17 @@ parse_cli_lifetime(pcmk__output_t *out, const char *move_lifetime)
return later_s;
}
-static const char *
-promoted_role_name(void)
-{
- /* This is a judgment call for what string to use. @TODO Ideally we'd
- * use the legacy string if the DC only supports that, and the new one
- * otherwise. Basing it on --enable-compat-2.0 is a decent guess.
- */
-#ifdef PCMK__COMPAT_2_0
- return RSC_ROLE_PROMOTED_LEGACY_S;
-#else
- return RSC_ROLE_PROMOTED_S;
-#endif
-}
-
// \return Standard Pacemaker return code
int
cli_resource_ban(pcmk__output_t *out, const char *rsc_id, const char *host,
- const char *move_lifetime, GList *allnodes, cib_t * cib_conn,
- int cib_options, gboolean promoted_role_only)
+ const char *move_lifetime, cib_t * cib_conn, int cib_options,
+ gboolean promoted_role_only, const char *promoted_role)
{
char *later_s = NULL;
int rc = pcmk_rc_ok;
xmlNode *fragment = NULL;
xmlNode *location = NULL;
- if(host == NULL) {
- GList *n = allnodes;
- for(; n && rc == pcmk_rc_ok; n = n->next) {
- pe_node_t *target = n->data;
-
- rc = cli_resource_ban(out, rsc_id, target->details->uname, move_lifetime,
- NULL, cib_conn, cib_options, promoted_role_only);
- }
- return rc;
- }
-
later_s = parse_cli_lifetime(out, move_lifetime);
if(move_lifetime && later_s == NULL) {
return EINVAL;
@@ -114,9 +89,9 @@ cli_resource_ban(pcmk__output_t *out, const char *rsc_id, const char *host,
crm_xml_add(location, XML_LOC_ATTR_SOURCE, rsc_id);
if(promoted_role_only) {
- crm_xml_add(location, XML_RULE_ATTR_ROLE, promoted_role_name());
+ crm_xml_add(location, XML_RULE_ATTR_ROLE, promoted_role);
} else {
- crm_xml_add(location, XML_RULE_ATTR_ROLE, RSC_ROLE_STARTED_S);
+ crm_xml_add(location, XML_RULE_ATTR_ROLE, PCMK__ROLE_STARTED);
}
if (later_s == NULL) {
@@ -151,14 +126,24 @@ cli_resource_ban(pcmk__output_t *out, const char *rsc_id, const char *host,
free_xml(fragment);
free(later_s);
+
+ if (rc != pcmk_rc_ok && promoted_role_only && strcmp(promoted_role, PCMK__ROLE_PROMOTED) == 0) {
+ int banrc = cli_resource_ban(out, rsc_id, host, move_lifetime,
+ cib_conn, cib_options, promoted_role_only,
+ PCMK__ROLE_PROMOTED_LEGACY);
+ if (banrc == pcmk_rc_ok) {
+ rc = banrc;
+ }
+ }
+
return rc;
}
// \return Standard Pacemaker return code
int
cli_resource_prefer(pcmk__output_t *out,const char *rsc_id, const char *host,
- const char *move_lifetime, cib_t * cib_conn, int cib_options,
- gboolean promoted_role_only)
+ const char *move_lifetime, cib_t *cib_conn, int cib_options,
+ gboolean promoted_role_only, const char *promoted_role)
{
char *later_s = parse_cli_lifetime(out, move_lifetime);
int rc = pcmk_rc_ok;
@@ -181,9 +166,9 @@ cli_resource_prefer(pcmk__output_t *out,const char *rsc_id, const char *host,
crm_xml_add(location, XML_LOC_ATTR_SOURCE, rsc_id);
if(promoted_role_only) {
- crm_xml_add(location, XML_RULE_ATTR_ROLE, promoted_role_name());
+ crm_xml_add(location, XML_RULE_ATTR_ROLE, promoted_role);
} else {
- crm_xml_add(location, XML_RULE_ATTR_ROLE, RSC_ROLE_STARTED_S);
+ crm_xml_add(location, XML_RULE_ATTR_ROLE, PCMK__ROLE_STARTED);
}
if (later_s == NULL) {
@@ -218,6 +203,16 @@ cli_resource_prefer(pcmk__output_t *out,const char *rsc_id, const char *host,
free_xml(fragment);
free(later_s);
+
+ if (rc != pcmk_rc_ok && promoted_role_only && strcmp(promoted_role, PCMK__ROLE_PROMOTED) == 0) {
+ int preferrc = cli_resource_prefer(out, rsc_id, host, move_lifetime,
+ cib_conn, cib_options, promoted_role_only,
+ PCMK__ROLE_PROMOTED_LEGACY);
+ if (preferrc == pcmk_rc_ok) {
+ rc = preferrc;
+ }
+ }
+
return rc;
}
@@ -335,7 +330,7 @@ cli_resource_clear(const char *rsc_id, const char *host, GList *allnodes, cib_t
* On the first error, abort.
*/
for(; n; n = n->next) {
- pe_node_t *target = n->data;
+ pcmk_node_t *target = n->data;
rc = cli_resource_clear(rsc_id, target->details->uname, NULL,
cib_conn, cib_options, clear_ban_constraints,
@@ -358,6 +353,9 @@ build_clear_xpath_string(GString *buf, const xmlNode *constraint_node,
const char *cons_rsc = crm_element_value(constraint_node,
XML_LOC_ATTR_SOURCE);
GString *rsc_role_substr = NULL;
+ const char *promoted_role_rule = "@" XML_RULE_ATTR_ROLE "='" PCMK__ROLE_PROMOTED
+ "' or @" XML_RULE_ATTR_ROLE "='"
+ PCMK__ROLE_PROMOTED_LEGACY "'";
CRM_ASSERT(buf != NULL);
g_string_truncate(buf, 0);
@@ -384,8 +382,7 @@ build_clear_xpath_string(GString *buf, const xmlNode *constraint_node,
rsc_role_substr = g_string_sized_new(64);
pcmk__g_strcat(rsc_role_substr,
"@" XML_LOC_ATTR_SOURCE "='", rsc, "' "
- "and @" XML_RULE_ATTR_ROLE "='",
- promoted_role_name(), "'", NULL);
+ "and (" , promoted_role_rule, ")", NULL);
} else if (rsc != NULL) {
rsc_role_substr = g_string_sized_new(64);
@@ -394,9 +391,7 @@ build_clear_xpath_string(GString *buf, const xmlNode *constraint_node,
} else if (promoted_role_only) {
rsc_role_substr = g_string_sized_new(64);
- pcmk__g_strcat(rsc_role_substr,
- "@" XML_RULE_ATTR_ROLE "='", promoted_role_name(),
- "'", NULL);
+ g_string_append(rsc_role_substr, promoted_role_rule);
}
if (rsc_role_substr != NULL) {
diff --git a/tools/crm_resource_print.c b/tools/crm_resource_print.c
index c1be53c..bdf3ad9 100644
--- a/tools/crm_resource_print.c
+++ b/tools/crm_resource_print.c
@@ -20,8 +20,8 @@
static int
print_constraint(xmlNode *xml_obj, void *userdata)
{
- pe_working_set_t *data_set = (pe_working_set_t *) userdata;
- pcmk__output_t *out = data_set->priv;
+ pcmk_scheduler_t *scheduler = (pcmk_scheduler_t *) userdata;
+ pcmk__output_t *out = scheduler->priv;
xmlNode *lifetime = NULL;
const char *id = crm_element_value(xml_obj, XML_ATTR_ID);
@@ -31,16 +31,16 @@ print_constraint(xmlNode *xml_obj, void *userdata)
// @COMPAT lifetime is deprecated
lifetime = first_named_child(xml_obj, "lifetime");
- if (pe_evaluate_rules(lifetime, NULL, data_set->now, NULL) == FALSE) {
+ if (pe_evaluate_rules(lifetime, NULL, scheduler->now, NULL) == FALSE) {
return pcmk_rc_ok;
}
- if (!pcmk__str_eq(XML_CONS_TAG_RSC_DEPEND, crm_element_name(xml_obj), pcmk__str_casei)) {
+ if (!pcmk__xe_is(xml_obj, XML_CONS_TAG_RSC_DEPEND)) {
return pcmk_rc_ok;
}
out->info(out, "Constraint %s %s %s %s %s %s %s",
- crm_element_name(xml_obj),
+ xml_obj->name,
cons_string(crm_element_value(xml_obj, XML_ATTR_ID)),
cons_string(crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE)),
cons_string(crm_element_value(xml_obj, XML_COLOC_ATTR_TARGET)),
@@ -52,21 +52,22 @@ print_constraint(xmlNode *xml_obj, void *userdata)
}
void
-cli_resource_print_cts_constraints(pe_working_set_t * data_set)
+cli_resource_print_cts_constraints(pcmk_scheduler_t *scheduler)
{
- pcmk__xe_foreach_child(pcmk_find_cib_element(data_set->input, XML_CIB_TAG_CONSTRAINTS),
- NULL, print_constraint, data_set);
+ pcmk__xe_foreach_child(pcmk_find_cib_element(scheduler->input,
+ XML_CIB_TAG_CONSTRAINTS),
+ NULL, print_constraint, scheduler);
}
void
-cli_resource_print_cts(pe_resource_t * rsc, pcmk__output_t *out)
+cli_resource_print_cts(pcmk_resource_t *rsc, pcmk__output_t *out)
{
const char *host = NULL;
bool needs_quorum = TRUE;
const char *rtype = crm_element_value(rsc->xml, XML_ATTR_TYPE);
const char *rprov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER);
const char *rclass = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
- pe_node_t *node = pe__current_node(rsc);
+ pcmk_node_t *node = pe__current_node(rsc);
if (pcmk__str_eq(rclass, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) {
needs_quorum = FALSE;
@@ -79,7 +80,7 @@ cli_resource_print_cts(pe_resource_t * rsc, pcmk__output_t *out)
}
out->info(out, "Resource: %s %s %s %s %s %s %s %s %d %lld %#.16llx",
- crm_element_name(rsc->xml), rsc->id,
+ rsc->xml->name, rsc->id,
rsc->clone_name ? rsc->clone_name : rsc->id, rsc->parent ? rsc->parent->id : "NA",
rprov ? rprov : "NA", rclass, rtype, host ? host : "NA", needs_quorum, rsc->flags,
rsc->flags);
@@ -90,11 +91,11 @@ cli_resource_print_cts(pe_resource_t * rsc, pcmk__output_t *out)
// \return Standard Pacemaker return code
int
cli_resource_print_operations(const char *rsc_id, const char *host_uname,
- bool active, pe_working_set_t * data_set)
+ bool active, pcmk_scheduler_t *scheduler)
{
- pcmk__output_t *out = data_set->priv;
+ pcmk__output_t *out = scheduler->priv;
int rc = pcmk_rc_no_output;
- GList *ops = find_operations(rsc_id, host_uname, active, data_set);
+ GList *ops = find_operations(rsc_id, host_uname, active, scheduler);
if (!ops) {
return rc;
@@ -105,7 +106,7 @@ cli_resource_print_operations(const char *rsc_id, const char *host_uname,
for (GList *lpc = ops; lpc != NULL; lpc = lpc->next) {
xmlNode *xml_op = (xmlNode *) lpc->data;
- out->message(out, "node-and-op", data_set, xml_op);
+ out->message(out, "node-and-op", scheduler, xml_op);
}
out->end_list(out);
@@ -114,9 +115,10 @@ cli_resource_print_operations(const char *rsc_id, const char *host_uname,
// \return Standard Pacemaker return code
int
-cli_resource_print(pe_resource_t *rsc, pe_working_set_t *data_set, bool expanded)
+cli_resource_print(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler,
+ bool expanded)
{
- pcmk__output_t *out = data_set->priv;
+ pcmk__output_t *out = scheduler->priv;
uint32_t show_opts = pcmk_show_pending;
GList *all = NULL;
@@ -131,10 +133,11 @@ cli_resource_print(pe_resource_t *rsc, pe_working_set_t *data_set, bool expanded
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("attribute-list", "pe_resource_t *", "const char *", "const char *")
+PCMK__OUTPUT_ARGS("attribute-list", "pcmk_resource_t *", "const char *",
+ "const char *")
static int
attribute_list_default(pcmk__output_t *out, va_list args) {
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
const char *attr = va_arg(args, char *);
const char *value = va_arg(args, const char *);
@@ -224,10 +227,11 @@ agent_status_xml(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("attribute-list", "pe_resource_t *", "const char *", "const char *")
+PCMK__OUTPUT_ARGS("attribute-list", "pcmk_resource_t *", "const char *",
+ "const char *")
static int
attribute_list_text(pcmk__output_t *out, va_list args) {
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
const char *attr = va_arg(args, char *);
const char *value = va_arg(args, const char *);
@@ -276,10 +280,10 @@ override_xml(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("property-list", "pe_resource_t *", "const char *")
+PCMK__OUTPUT_ARGS("property-list", "pcmk_resource_t *", "const char *")
static int
property_list_default(pcmk__output_t *out, va_list args) {
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
const char *attr = va_arg(args, char *);
const char *value = crm_element_value(rsc->xml, attr);
@@ -293,10 +297,10 @@ property_list_default(pcmk__output_t *out, va_list args) {
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("property-list", "pe_resource_t *", "const char *")
+PCMK__OUTPUT_ARGS("property-list", "pcmk_resource_t *", "const char *")
static int
property_list_text(pcmk__output_t *out, va_list args) {
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
const char *attr = va_arg(args, const char *);
const char *value = crm_element_value(rsc->xml, attr);
@@ -346,7 +350,8 @@ resource_agent_action_default(pcmk__output_t *out, va_list args) {
type, rc, exit_reason);
/* hide output for validate-all if not in verbose */
- if (verbose == 0 && pcmk__str_eq(action, "validate-all", pcmk__str_casei)) {
+ if ((verbose == 0)
+ && pcmk__str_eq(action, PCMK_ACTION_VALIDATE_ALL, pcmk__str_casei)) {
return pcmk_rc_ok;
}
@@ -441,7 +446,7 @@ static int
resource_check_list_default(pcmk__output_t *out, va_list args) {
resource_checks_t *checks = va_arg(args, resource_checks_t *);
- const pe_resource_t *parent = pe__const_top_resource(checks->rsc, false);
+ const pcmk_resource_t *parent = pe__const_top_resource(checks->rsc, false);
if (checks->flags == 0) {
return pcmk_rc_no_output;
@@ -487,7 +492,7 @@ static int
resource_check_list_xml(pcmk__output_t *out, va_list args) {
resource_checks_t *checks = va_arg(args, resource_checks_t *);
- const pe_resource_t *parent = pe__const_top_resource(checks->rsc, false);
+ const pcmk_resource_t *parent = pe__const_top_resource(checks->rsc, false);
xmlNodePtr node = pcmk__output_create_xml_node(out, "check",
"id", parent->id,
@@ -547,9 +552,9 @@ resource_search_list_default(pcmk__output_t *out, va_list args)
if (ni->promoted) {
#ifdef PCMK__COMPAT_2_0
- role_text = " " RSC_ROLE_PROMOTED_LEGACY_S;
+ role_text = " " PCMK__ROLE_PROMOTED_LEGACY;
#else
- role_text = " " RSC_ROLE_PROMOTED_S;
+ role_text = " " PCMK__ROLE_PROMOTED;
#endif
}
out->list_item(out, "node", "resource %s is running on: %s%s",
@@ -587,14 +592,14 @@ resource_search_list_xml(pcmk__output_t *out, va_list args)
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("resource-reasons-list", "GList *", "pe_resource_t *",
- "pe_node_t *")
+PCMK__OUTPUT_ARGS("resource-reasons-list", "GList *", "pcmk_resource_t *",
+ "pcmk_node_t *")
static int
resource_reasons_list_default(pcmk__output_t *out, va_list args)
{
GList *resources = va_arg(args, GList *);
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
- pe_node_t *node = va_arg(args, pe_node_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
+ pcmk_node_t *node = va_arg(args, pcmk_node_t *);
const char *host_uname = (node == NULL)? NULL : node->details->uname;
@@ -605,7 +610,7 @@ resource_reasons_list_default(pcmk__output_t *out, va_list args)
GList *hosts = NULL;
for (lpc = resources; lpc != NULL; lpc = lpc->next) {
- pe_resource_t *rsc = (pe_resource_t *) lpc->data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) lpc->data;
rsc->fns->location(rsc, &hosts, TRUE);
if (hosts == NULL) {
@@ -638,14 +643,14 @@ resource_reasons_list_default(pcmk__output_t *out, va_list args)
GList *lpc = NULL;
for (lpc = activeResources; lpc != NULL; lpc = lpc->next) {
- pe_resource_t *rsc = (pe_resource_t *) lpc->data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) lpc->data;
out->list_item(out, "reason", "Resource %s is running on host %s",
rsc->id, host_uname);
cli_resource_check(out, rsc, node);
}
for(lpc = unactiveResources; lpc != NULL; lpc = lpc->next) {
- pe_resource_t *rsc = (pe_resource_t *) lpc->data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) lpc->data;
out->list_item(out, "reason", "Resource %s is assigned to host %s but not running",
rsc->id, host_uname);
cli_resource_check(out, rsc, node);
@@ -669,14 +674,14 @@ resource_reasons_list_default(pcmk__output_t *out, va_list args)
return pcmk_rc_ok;
}
-PCMK__OUTPUT_ARGS("resource-reasons-list", "GList *", "pe_resource_t *",
- "pe_node_t *")
+PCMK__OUTPUT_ARGS("resource-reasons-list", "GList *", "pcmk_resource_t *",
+ "pcmk_node_t *")
static int
resource_reasons_list_xml(pcmk__output_t *out, va_list args)
{
GList *resources = va_arg(args, GList *);
- pe_resource_t *rsc = va_arg(args, pe_resource_t *);
- pe_node_t *node = va_arg(args, pe_node_t *);
+ pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
+ pcmk_node_t *node = va_arg(args, pcmk_node_t *);
const char *host_uname = (node == NULL)? NULL : node->details->uname;
@@ -689,7 +694,7 @@ resource_reasons_list_xml(pcmk__output_t *out, va_list args)
pcmk__output_xml_create_parent(out, "resources", NULL);
for (lpc = resources; lpc != NULL; lpc = lpc->next) {
- pe_resource_t *rsc = (pe_resource_t *) lpc->data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) lpc->data;
rsc->fns->location(rsc, &hosts, TRUE);
@@ -723,7 +728,7 @@ resource_reasons_list_xml(pcmk__output_t *out, va_list args)
pcmk__output_xml_create_parent(out, "resources", NULL);
for (lpc = activeResources; lpc != NULL; lpc = lpc->next) {
- pe_resource_t *rsc = (pe_resource_t *) lpc->data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) lpc->data;
pcmk__output_xml_create_parent(out, "resource",
"id", rsc->id,
@@ -736,7 +741,7 @@ resource_reasons_list_xml(pcmk__output_t *out, va_list args)
}
for(lpc = unactiveResources; lpc != NULL; lpc = lpc->next) {
- pe_resource_t *rsc = (pe_resource_t *) lpc->data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) lpc->data;
pcmk__output_xml_create_parent(out, "resource",
"id", rsc->id,
@@ -766,7 +771,8 @@ resource_reasons_list_xml(pcmk__output_t *out, va_list args)
}
static void
-add_resource_name(pe_resource_t *rsc, pcmk__output_t *out) {
+add_resource_name(pcmk_resource_t *rsc, pcmk__output_t *out)
+{
if (rsc->children == NULL) {
out->list_item(out, "resource", "%s", rsc->id);
} else {
diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c
index f25dbbc..da360fd 100644
--- a/tools/crm_resource_runtime.c
+++ b/tools/crm_resource_runtime.c
@@ -16,22 +16,22 @@
#include <crm/services_internal.h>
static GList *
-build_node_info_list(const pe_resource_t *rsc)
+build_node_info_list(const pcmk_resource_t *rsc)
{
GList *retval = NULL;
for (const GList *iter = rsc->children; iter != NULL; iter = iter->next) {
- const pe_resource_t *child = (const pe_resource_t *) iter->data;
+ const pcmk_resource_t *child = (const pcmk_resource_t *) iter->data;
for (const GList *iter2 = child->running_on;
iter2 != NULL; iter2 = iter2->next) {
- const pe_node_t *node = (const pe_node_t *) iter2->data;
+ const pcmk_node_t *node = (const pcmk_node_t *) iter2->data;
node_info_t *ni = calloc(1, sizeof(node_info_t));
ni->node_name = node->details->uname;
- ni->promoted = pcmk_is_set(rsc->flags, pe_rsc_promotable) &&
- child->fns->state(child, TRUE) == RSC_ROLE_PROMOTED;
+ ni->promoted = pcmk_is_set(rsc->flags, pcmk_rsc_promotable) &&
+ child->fns->state(child, TRUE) == pcmk_role_promoted;
retval = g_list_prepend(retval, ni);
}
@@ -41,18 +41,18 @@ build_node_info_list(const pe_resource_t *rsc)
}
GList *
-cli_resource_search(pe_resource_t *rsc, const char *requested_name,
- pe_working_set_t *data_set)
+cli_resource_search(pcmk_resource_t *rsc, const char *requested_name,
+ pcmk_scheduler_t *scheduler)
{
GList *retval = NULL;
- const pe_resource_t *parent = pe__const_top_resource(rsc, false);
+ const pcmk_resource_t *parent = pe__const_top_resource(rsc, false);
if (pe_rsc_is_clone(rsc)) {
retval = build_node_info_list(rsc);
/* The anonymous clone children's common ID is supplied */
} else if (pe_rsc_is_clone(parent)
- && !pcmk_is_set(rsc->flags, pe_rsc_unique)
+ && !pcmk_is_set(rsc->flags, pcmk_rsc_unique)
&& rsc->clone_name
&& pcmk__str_eq(requested_name, rsc->clone_name, pcmk__str_casei)
&& !pcmk__str_eq(requested_name, rsc->id, pcmk__str_casei)) {
@@ -61,10 +61,10 @@ cli_resource_search(pe_resource_t *rsc, const char *requested_name,
} else if (rsc->running_on != NULL) {
for (GList *iter = rsc->running_on; iter != NULL; iter = iter->next) {
- pe_node_t *node = (pe_node_t *) iter->data;
+ pcmk_node_t *node = (pcmk_node_t *) iter->data;
node_info_t *ni = calloc(1, sizeof(node_info_t));
ni->node_name = node->details->uname;
- ni->promoted = (rsc->fns->state(rsc, TRUE) == RSC_ROLE_PROMOTED);
+ ni->promoted = (rsc->fns->state(rsc, TRUE) == pcmk_role_promoted);
retval = g_list_prepend(retval, ni);
}
@@ -133,7 +133,7 @@ find_resource_attr(pcmk__output_t *out, cib_t * the_cib, const char *attr,
}
crm_log_xml_debug(xml_search, "Match");
- if (xml_has_children(xml_search)) {
+ if (xml_search->children != NULL) {
xmlNode *child = NULL;
rc = ENOTUNIQ;
@@ -159,8 +159,9 @@ find_resource_attr(pcmk__output_t *out, cib_t * the_cib, const char *attr,
/* PRIVATE. Use the find_matching_attr_resources instead. */
static void
-find_matching_attr_resources_recursive(pcmk__output_t *out, GList/* <pe_resource_t*> */ ** result,
- pe_resource_t * rsc, const char * rsc_id,
+find_matching_attr_resources_recursive(pcmk__output_t *out,
+ GList /* <pcmk_resource_t*> */ **result,
+ pcmk_resource_t *rsc, const char *rsc_id,
const char * attr_set, const char * attr_set_type,
const char * attr_id, const char * attr_name,
cib_t * cib, const char * cmd, int depth)
@@ -171,18 +172,19 @@ find_matching_attr_resources_recursive(pcmk__output_t *out, GList/* <pe_resource
/* visit the children */
for(GList *gIter = rsc->children; gIter; gIter = gIter->next) {
- find_matching_attr_resources_recursive(out, result, (pe_resource_t*)gIter->data,
+ find_matching_attr_resources_recursive(out, result,
+ (pcmk_resource_t *) gIter->data,
rsc_id, attr_set, attr_set_type,
attr_id, attr_name, cib, cmd, depth+1);
/* do it only once for clones */
- if(pe_clone == rsc->variant) {
+ if (rsc->variant == pcmk_rsc_variant_clone) {
break;
}
}
rc = find_resource_attr(out, cib, XML_ATTR_ID, lookup_id, attr_set_type,
attr_set, attr_id, attr_name, &local_attr_id);
- /* Post-order traversal.
+ /* Post-order traversal.
* The root is always on the list and it is the last item. */
if((0 == depth) || (pcmk_rc_ok == rc)) {
/* push the head */
@@ -195,8 +197,8 @@ find_matching_attr_resources_recursive(pcmk__output_t *out, GList/* <pe_resource
/* The result is a linearized pre-ordered tree of resources. */
-static GList/*<pe_resource_t*>*/ *
-find_matching_attr_resources(pcmk__output_t *out, pe_resource_t * rsc,
+static GList/*<pcmk_resource_t*>*/ *
+find_matching_attr_resources(pcmk__output_t *out, pcmk_resource_t *rsc,
const char * rsc_id, const char * attr_set,
const char * attr_set_type, const char * attr_id,
const char * attr_name, cib_t * cib, const char * cmd,
@@ -212,7 +214,8 @@ find_matching_attr_resources(pcmk__output_t *out, pe_resource_t * rsc,
if(force == TRUE) {
return g_list_append(result, rsc);
}
- if(rsc->parent && pe_clone == rsc->parent->variant) {
+ if ((rsc->parent != NULL)
+ && (rsc->parent->variant == pcmk_rsc_variant_clone)) {
int rc = pcmk_rc_ok;
char *local_attr_id = NULL;
rc = find_resource_attr(out, cib, XML_ATTR_ID, rsc_id, attr_set_type,
@@ -225,10 +228,12 @@ find_matching_attr_resources(pcmk__output_t *out, pe_resource_t * rsc,
cmd, attr_name, rsc->id, rsc_id);
}
return g_list_append(result, rsc);
- } else if(rsc->parent == NULL && rsc->children && pe_clone == rsc->variant) {
- pe_resource_t *child = rsc->children->data;
- if(child->variant == pe_native) {
+ } else if ((rsc->parent == NULL) && (rsc->children != NULL)
+ && (rsc->variant == pcmk_rsc_variant_clone)) {
+ pcmk_resource_t *child = rsc->children->data;
+
+ if (child->variant == pcmk_rsc_variant_primitive) {
lookup_id = clone_strip(child->id); /* Could be a cloned group! */
rc = find_resource_attr(out, cib, XML_ATTR_ID, lookup_id, attr_set_type,
attr_set, attr_id, attr_name, &local_attr_id);
@@ -253,7 +258,7 @@ find_matching_attr_resources(pcmk__output_t *out, pe_resource_t * rsc,
// \return Standard Pacemaker return code
int
-cli_resource_update_attribute(pe_resource_t *rsc, const char *requested_name,
+cli_resource_update_attribute(pcmk_resource_t *rsc, const char *requested_name,
const char *attr_set, const char *attr_set_type,
const char *attr_id, const char *attr_name,
const char *attr_value, gboolean recursive,
@@ -264,7 +269,7 @@ cli_resource_update_attribute(pe_resource_t *rsc, const char *requested_name,
char *found_attr_id = NULL;
- GList/*<pe_resource_t*>*/ *resources = NULL;
+ GList/*<pcmk_resource_t*>*/ *resources = NULL;
const char *top_id = pe__const_top_resource(rsc, false)->id;
if ((attr_id == NULL) && !force) {
@@ -333,7 +338,7 @@ cli_resource_update_attribute(pe_resource_t *rsc, const char *requested_name,
xmlNode *xml_obj = NULL;
found_attr_id = NULL;
- rsc = (pe_resource_t *) iter->data;
+ rsc = (pcmk_resource_t *) iter->data;
lookup_id = clone_strip(rsc->id); /* Could be a cloned group! */
rc = find_resource_attr(out, cib, XML_ATTR_ID, lookup_id, attr_set_type,
@@ -358,7 +363,7 @@ cli_resource_update_attribute(pe_resource_t *rsc, const char *requested_name,
rsc_attr_id = found_attr_id;
}
- xml_top = create_xml_node(NULL, crm_element_name(rsc->xml));
+ xml_top = create_xml_node(NULL, (const char *) rsc->xml->name);
crm_xml_add(xml_top, XML_ATTR_ID, lookup_id);
xml_obj = create_xml_node(xml_top, attr_set_type);
@@ -408,19 +413,19 @@ cli_resource_update_attribute(pe_resource_t *rsc, const char *requested_name,
need_init = false;
pcmk__unpack_constraints(rsc->cluster);
pe__clear_resource_flags_on_all(rsc->cluster,
- pe_rsc_detect_loop);
+ pcmk_rsc_detect_loop);
}
/* We want to set the attribute only on resources explicitly
* colocated with this one, so we use rsc->rsc_cons_lhs directly
* rather than the with_this_colocations() method.
*/
- pe__set_resource_flags(rsc, pe_rsc_detect_loop);
+ pe__set_resource_flags(rsc, pcmk_rsc_detect_loop);
for (lpc = rsc->rsc_cons_lhs; lpc != NULL; lpc = lpc->next) {
pcmk__colocation_t *cons = (pcmk__colocation_t *) lpc->data;
crm_debug("Checking %s %d", cons->id, cons->score);
- if (!pcmk_is_set(cons->dependent->flags, pe_rsc_detect_loop)
+ if (!pcmk_is_set(cons->dependent->flags, pcmk_rsc_detect_loop)
&& (cons->score > 0)) {
crm_debug("Setting %s=%s for dependent resource %s",
attr_name, attr_value, cons->dependent->id);
@@ -440,14 +445,14 @@ cli_resource_update_attribute(pe_resource_t *rsc, const char *requested_name,
// \return Standard Pacemaker return code
int
-cli_resource_delete_attribute(pe_resource_t *rsc, const char *requested_name,
+cli_resource_delete_attribute(pcmk_resource_t *rsc, const char *requested_name,
const char *attr_set, const char *attr_set_type,
const char *attr_id, const char *attr_name,
cib_t *cib, int cib_options, gboolean force)
{
pcmk__output_t *out = rsc->cluster->priv;
int rc = pcmk_rc_ok;
- GList/*<pe_resource_t*>*/ *resources = NULL;
+ GList/*<pcmk_resource_t*>*/ *resources = NULL;
if ((attr_id == NULL) && !force) {
find_resource_attr(out, cib, XML_ATTR_ID,
@@ -482,7 +487,7 @@ cli_resource_delete_attribute(pe_resource_t *rsc, const char *requested_name,
char *found_attr_id = NULL;
const char *rsc_attr_id = attr_id;
- rsc = (pe_resource_t *) iter->data;
+ rsc = (pcmk_resource_t *) iter->data;
lookup_id = clone_strip(rsc->id);
rc = find_resource_attr(out, cib, XML_ATTR_ID, lookup_id, attr_set_type,
@@ -534,9 +539,10 @@ cli_resource_delete_attribute(pe_resource_t *rsc, const char *requested_name,
// \return Standard Pacemaker return code
static int
send_lrm_rsc_op(pcmk_ipc_api_t *controld_api, bool do_fail_resource,
- const char *host_uname, const char *rsc_id, pe_working_set_t *data_set)
+ const char *host_uname, const char *rsc_id,
+ pcmk_scheduler_t *scheduler)
{
- pcmk__output_t *out = data_set->priv;
+ pcmk__output_t *out = scheduler->priv;
const char *router_node = host_uname;
const char *rsc_api_id = NULL;
const char *rsc_long_id = NULL;
@@ -544,13 +550,13 @@ send_lrm_rsc_op(pcmk_ipc_api_t *controld_api, bool do_fail_resource,
const char *rsc_provider = NULL;
const char *rsc_type = NULL;
bool cib_only = false;
- pe_resource_t *rsc = pe_find_resource(data_set->resources, rsc_id);
+ pcmk_resource_t *rsc = pe_find_resource(scheduler->resources, rsc_id);
if (rsc == NULL) {
out->err(out, "Resource %s not found", rsc_id);
return ENXIO;
- } else if (rsc->variant != pe_native) {
+ } else if (rsc->variant != pcmk_rsc_variant_primitive) {
out->err(out, "We can only process primitive resources, not %s", rsc_id);
return EINVAL;
}
@@ -564,7 +570,7 @@ send_lrm_rsc_op(pcmk_ipc_api_t *controld_api, bool do_fail_resource,
}
{
- pe_node_t *node = pe_find_node(data_set->nodes, host_uname);
+ pcmk_node_t *node = pe_find_node(scheduler->nodes, host_uname);
if (node == NULL) {
out->err(out, "Node %s not found", host_uname);
@@ -617,17 +623,20 @@ send_lrm_rsc_op(pcmk_ipc_api_t *controld_api, bool do_fail_resource,
* \note The caller is responsible for freeing the result.
*/
static inline char *
-rsc_fail_name(const pe_resource_t *rsc)
+rsc_fail_name(const pcmk_resource_t *rsc)
{
const char *name = (rsc->clone_name? rsc->clone_name : rsc->id);
- return pcmk_is_set(rsc->flags, pe_rsc_unique)? strdup(name) : clone_strip(name);
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_unique)) {
+ return strdup(name);
+ }
+ return clone_strip(name);
}
// \return Standard Pacemaker return code
static int
clear_rsc_history(pcmk_ipc_api_t *controld_api, const char *host_uname,
- const char *rsc_id, pe_working_set_t *data_set)
+ const char *rsc_id, pcmk_scheduler_t *scheduler)
{
int rc = pcmk_rc_ok;
@@ -636,7 +645,7 @@ clear_rsc_history(pcmk_ipc_api_t *controld_api, const char *host_uname,
* single operation, we might wind up with a wrong idea of the current
* resource state, and we might not re-probe the resource.
*/
- rc = send_lrm_rsc_op(controld_api, false, host_uname, rsc_id, data_set);
+ rc = send_lrm_rsc_op(controld_api, false, host_uname, rsc_id, scheduler);
if (rc != pcmk_rc_ok) {
return rc;
}
@@ -654,7 +663,7 @@ clear_rsc_history(pcmk_ipc_api_t *controld_api, const char *host_uname,
static int
clear_rsc_failures(pcmk__output_t *out, pcmk_ipc_api_t *controld_api,
const char *node_name, const char *rsc_id, const char *operation,
- const char *interval_spec, pe_working_set_t *data_set)
+ const char *interval_spec, pcmk_scheduler_t *scheduler)
{
int rc = pcmk_rc_ok;
const char *failed_value = NULL;
@@ -675,7 +684,7 @@ clear_rsc_failures(pcmk__output_t *out, pcmk_ipc_api_t *controld_api,
crm_parse_interval_spec(interval_spec));
}
- for (xmlNode *xml_op = pcmk__xml_first_child(data_set->failed);
+ for (xmlNode *xml_op = pcmk__xml_first_child(scheduler->failed);
xml_op != NULL;
xml_op = pcmk__xml_next(xml_op)) {
@@ -687,10 +696,12 @@ clear_rsc_failures(pcmk__output_t *out, pcmk_ipc_api_t *controld_api,
// No resource specified means all resources match
if (rsc_id) {
- pe_resource_t *fail_rsc = pe_find_resource_with_flags(data_set->resources,
- failed_id,
- pe_find_renamed|pe_find_anon);
+ pcmk_resource_t *fail_rsc = NULL;
+ fail_rsc = pe_find_resource_with_flags(scheduler->resources,
+ failed_id,
+ pcmk_rsc_match_history
+ |pcmk_rsc_match_anon_basename);
if (!fail_rsc || !pcmk__str_eq(rsc_id, fail_rsc->id, pcmk__str_casei)) {
continue;
}
@@ -722,7 +733,7 @@ clear_rsc_failures(pcmk__output_t *out, pcmk_ipc_api_t *controld_api,
g_hash_table_iter_init(&iter, rscs);
while (g_hash_table_iter_next(&iter, (gpointer *) &failed_id, NULL)) {
crm_debug("Erasing failures of %s on %s", failed_id, node_name);
- rc = clear_rsc_history(controld_api, node_name, failed_id, data_set);
+ rc = clear_rsc_history(controld_api, node_name, failed_id, scheduler);
if (rc != pcmk_rc_ok) {
return rc;
}
@@ -733,8 +744,8 @@ clear_rsc_failures(pcmk__output_t *out, pcmk_ipc_api_t *controld_api,
// \return Standard Pacemaker return code
static int
-clear_rsc_fail_attrs(const pe_resource_t *rsc, const char *operation,
- const char *interval_spec, const pe_node_t *node)
+clear_rsc_fail_attrs(const pcmk_resource_t *rsc, const char *operation,
+ const char *interval_spec, const pcmk_node_t *node)
{
int rc = pcmk_rc_ok;
int attr_options = pcmk__node_attr_none;
@@ -754,13 +765,13 @@ clear_rsc_fail_attrs(const pe_resource_t *rsc, const char *operation,
// \return Standard Pacemaker return code
int
cli_resource_delete(pcmk_ipc_api_t *controld_api, const char *host_uname,
- const pe_resource_t *rsc, const char *operation,
+ const pcmk_resource_t *rsc, const char *operation,
const char *interval_spec, bool just_failures,
- pe_working_set_t *data_set, gboolean force)
+ pcmk_scheduler_t *scheduler, gboolean force)
{
- pcmk__output_t *out = data_set->priv;
+ pcmk__output_t *out = scheduler->priv;
int rc = pcmk_rc_ok;
- pe_node_t *node = NULL;
+ pcmk_node_t *node = NULL;
if (rsc == NULL) {
return ENXIO;
@@ -768,10 +779,11 @@ cli_resource_delete(pcmk_ipc_api_t *controld_api, const char *host_uname,
} else if (rsc->children) {
for (const GList *lpc = rsc->children; lpc != NULL; lpc = lpc->next) {
- const pe_resource_t *child = (const pe_resource_t *) lpc->data;
+ const pcmk_resource_t *child = (const pcmk_resource_t *) lpc->data;
rc = cli_resource_delete(controld_api, host_uname, child, operation,
- interval_spec, just_failures, data_set, force);
+ interval_spec, just_failures, scheduler,
+ force);
if (rc != pcmk_rc_ok) {
return rc;
}
@@ -783,11 +795,11 @@ cli_resource_delete(pcmk_ipc_api_t *controld_api, const char *host_uname,
GList *nodes = g_hash_table_get_values(rsc->known_on);
if(nodes == NULL && force) {
- nodes = pcmk__copy_node_list(data_set->nodes, false);
+ nodes = pcmk__copy_node_list(scheduler->nodes, false);
} else if(nodes == NULL && rsc->exclusive_discover) {
GHashTableIter iter;
- pe_node_t *node = NULL;
+ pcmk_node_t *node = NULL;
g_hash_table_iter_init(&iter, rsc->allowed_nodes);
while (g_hash_table_iter_next(&iter, NULL, (void**)&node)) {
@@ -801,12 +813,12 @@ cli_resource_delete(pcmk_ipc_api_t *controld_api, const char *host_uname,
}
for (lpc = nodes; lpc != NULL; lpc = lpc->next) {
- node = (pe_node_t *) lpc->data;
+ node = (pcmk_node_t *) lpc->data;
if (node->details->online) {
rc = cli_resource_delete(controld_api, node->details->uname, rsc,
operation, interval_spec, just_failures,
- data_set, force);
+ scheduler, force);
}
if (rc != pcmk_rc_ok) {
g_list_free(nodes);
@@ -818,7 +830,7 @@ cli_resource_delete(pcmk_ipc_api_t *controld_api, const char *host_uname,
return pcmk_rc_ok;
}
- node = pe_find_node(data_set->nodes, host_uname);
+ node = pe_find_node(scheduler->nodes, host_uname);
if (node == NULL) {
out->err(out, "Unable to clean up %s because node %s not found",
@@ -847,13 +859,13 @@ cli_resource_delete(pcmk_ipc_api_t *controld_api, const char *host_uname,
if (just_failures) {
rc = clear_rsc_failures(out, controld_api, host_uname, rsc->id, operation,
- interval_spec, data_set);
+ interval_spec, scheduler);
} else {
- rc = clear_rsc_history(controld_api, host_uname, rsc->id, data_set);
+ rc = clear_rsc_history(controld_api, host_uname, rsc->id, scheduler);
}
if (rc != pcmk_rc_ok) {
out->err(out, "Cleaned %s failures on %s, but unable to clean history: %s",
- rsc->id, host_uname, pcmk_strerror(rc));
+ rsc->id, host_uname, pcmk_rc_str(rc));
} else {
out->info(out, "Cleaned up %s on %s", rsc->id, host_uname);
}
@@ -864,9 +876,9 @@ cli_resource_delete(pcmk_ipc_api_t *controld_api, const char *host_uname,
int
cli_cleanup_all(pcmk_ipc_api_t *controld_api, const char *node_name,
const char *operation, const char *interval_spec,
- pe_working_set_t *data_set)
+ pcmk_scheduler_t *scheduler)
{
- pcmk__output_t *out = data_set->priv;
+ pcmk__output_t *out = scheduler->priv;
int rc = pcmk_rc_ok;
int attr_options = pcmk__node_attr_none;
const char *display_name = node_name? node_name : "all nodes";
@@ -878,7 +890,7 @@ cli_cleanup_all(pcmk_ipc_api_t *controld_api, const char *node_name,
}
if (node_name) {
- pe_node_t *node = pe_find_node(data_set->nodes, node_name);
+ pcmk_node_t *node = pe_find_node(scheduler->nodes, node_name);
if (node == NULL) {
out->err(out, "Unknown node: %s", node_name);
@@ -899,21 +911,21 @@ cli_cleanup_all(pcmk_ipc_api_t *controld_api, const char *node_name,
if (node_name) {
rc = clear_rsc_failures(out, controld_api, node_name, NULL,
- operation, interval_spec, data_set);
+ operation, interval_spec, scheduler);
if (rc != pcmk_rc_ok) {
out->err(out, "Cleaned all resource failures on %s, but unable to clean history: %s",
- node_name, pcmk_strerror(rc));
+ node_name, pcmk_rc_str(rc));
return rc;
}
} else {
- for (GList *iter = data_set->nodes; iter; iter = iter->next) {
- pe_node_t *node = (pe_node_t *) iter->data;
+ for (GList *iter = scheduler->nodes; iter; iter = iter->next) {
+ pcmk_node_t *node = (pcmk_node_t *) iter->data;
rc = clear_rsc_failures(out, controld_api, node->details->uname, NULL,
- operation, interval_spec, data_set);
+ operation, interval_spec, scheduler);
if (rc != pcmk_rc_ok) {
out->err(out, "Cleaned all resource failures on all nodes, but unable to clean history: %s",
- pcmk_strerror(rc));
+ pcmk_rc_str(rc));
return rc;
}
}
@@ -933,13 +945,13 @@ check_role(resource_checks_t *checks)
return;
}
switch (text2role(role_s)) {
- case RSC_ROLE_STOPPED:
+ case pcmk_role_stopped:
checks->flags |= rsc_remain_stopped;
break;
- case RSC_ROLE_UNPROMOTED:
+ case pcmk_role_unpromoted:
if (pcmk_is_set(pe__const_top_resource(checks->rsc, false)->flags,
- pe_rsc_promotable)) {
+ pcmk_rsc_promotable)) {
checks->flags |= rsc_unpromotable;
}
break;
@@ -970,7 +982,7 @@ check_locked(resource_checks_t *checks)
}
static bool
-node_is_unhealthy(pe_node_t *node)
+node_is_unhealthy(pcmk_node_t *node)
{
switch (pe__health_strategy(node->details->data_set)) {
case pcmk__health_strategy_none:
@@ -1000,7 +1012,7 @@ node_is_unhealthy(pe_node_t *node)
}
static void
-check_node_health(resource_checks_t *checks, pe_node_t *node)
+check_node_health(resource_checks_t *checks, pcmk_node_t *node)
{
if (node == NULL) {
GHashTableIter iter;
@@ -1025,7 +1037,7 @@ check_node_health(resource_checks_t *checks, pe_node_t *node)
}
int
-cli_resource_check(pcmk__output_t *out, pe_resource_t *rsc, pe_node_t *node)
+cli_resource_check(pcmk__output_t *out, pcmk_resource_t *rsc, pcmk_node_t *node)
{
resource_checks_t checks = { .rsc = rsc };
@@ -1040,15 +1052,15 @@ cli_resource_check(pcmk__output_t *out, pe_resource_t *rsc, pe_node_t *node)
// \return Standard Pacemaker return code
int
cli_resource_fail(pcmk_ipc_api_t *controld_api, const char *host_uname,
- const char *rsc_id, pe_working_set_t *data_set)
+ const char *rsc_id, pcmk_scheduler_t *scheduler)
{
crm_notice("Failing %s on %s", rsc_id, host_uname);
- return send_lrm_rsc_op(controld_api, true, host_uname, rsc_id, data_set);
+ return send_lrm_rsc_op(controld_api, true, host_uname, rsc_id, scheduler);
}
static GHashTable *
-generate_resource_params(pe_resource_t *rsc, pe_node_t *node,
- pe_working_set_t *data_set)
+generate_resource_params(pcmk_resource_t *rsc, pcmk_node_t *node,
+ pcmk_scheduler_t *scheduler)
{
GHashTable *params = NULL;
GHashTable *meta = NULL;
@@ -1059,7 +1071,7 @@ generate_resource_params(pe_resource_t *rsc, pe_node_t *node,
combined = pcmk__strkey_table(free, free);
- params = pe_rsc_params(rsc, node, data_set);
+ params = pe_rsc_params(rsc, node, scheduler);
if (params != NULL) {
g_hash_table_iter_init(&iter, params);
while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) {
@@ -1068,7 +1080,7 @@ generate_resource_params(pe_resource_t *rsc, pe_node_t *node,
}
meta = pcmk__strkey_table(free, free);
- get_meta_attributes(meta, rsc, node, data_set);
+ get_meta_attributes(meta, rsc, node, scheduler);
if (meta != NULL) {
g_hash_table_iter_init(&iter, meta);
while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) {
@@ -1082,7 +1094,7 @@ generate_resource_params(pe_resource_t *rsc, pe_node_t *node,
return combined;
}
-bool resource_is_running_on(pe_resource_t *rsc, const char *host)
+bool resource_is_running_on(pcmk_resource_t *rsc, const char *host)
{
bool found = true;
GList *hIter = NULL;
@@ -1094,7 +1106,7 @@ bool resource_is_running_on(pe_resource_t *rsc, const char *host)
rsc->fns->location(rsc, &hosts, TRUE);
for (hIter = hosts; host != NULL && hIter != NULL; hIter = hIter->next) {
- pe_node_t *node = (pe_node_t *) hIter->data;
+ pcmk_node_t *node = (pcmk_node_t *) hIter->data;
if (pcmk__strcase_any_of(host, node->details->uname, node->details->id, NULL)) {
crm_trace("Resource %s is running on %s\n", rsc->id, host);
@@ -1132,13 +1144,13 @@ get_active_resources(const char *host, GList *rsc_list)
GList *active = NULL;
for (rIter = rsc_list; rIter != NULL; rIter = rIter->next) {
- pe_resource_t *rsc = (pe_resource_t *) rIter->data;
+ pcmk_resource_t *rsc = (pcmk_resource_t *) rIter->data;
/* Expand groups to their members, because if we're restarting a member
* other than the first, we can't otherwise tell which resources are
* stopping and starting.
*/
- if (rsc->variant == pe_group) {
+ if (rsc->variant == pcmk_rsc_variant_group) {
active = g_list_concat(active,
get_active_resources(host, rsc->children));
} else if (resource_is_running_on(rsc, host)) {
@@ -1148,7 +1160,7 @@ get_active_resources(const char *host, GList *rsc_list)
return active;
}
-static void dump_list(GList *items, const char *tag)
+static void dump_list(GList *items, const char *tag)
{
int lpc = 0;
GList *item = NULL;
@@ -1170,45 +1182,45 @@ static void display_list(pcmk__output_t *out, GList *items, const char *tag)
/*!
* \internal
- * \brief Upgrade XML to latest schema version and use it as working set input
+ * \brief Upgrade XML to latest schema version and use it as scheduler input
*
- * This also updates the working set timestamp to the current time.
+ * This also updates the scheduler timestamp to the current time.
*
- * \param[in,out] data_set Working set instance to update
- * \param[in,out] xml XML to use as input
+ * \param[in,out] scheduler Scheduler data to update
+ * \param[in,out] xml XML to use as input
*
* \return Standard Pacemaker return code
* \note On success, caller is responsible for freeing memory allocated for
- * data_set->now.
+ * scheduler->now.
* \todo This follows the example of other callers of cli_config_update()
* and returns ENOKEY ("Required key not available") if that fails,
* but perhaps pcmk_rc_schema_validation would be better in that case.
*/
int
-update_working_set_xml(pe_working_set_t *data_set, xmlNode **xml)
+update_scheduler_input(pcmk_scheduler_t *scheduler, xmlNode **xml)
{
if (cli_config_update(xml, NULL, FALSE) == FALSE) {
return ENOKEY;
}
- data_set->input = *xml;
- data_set->now = crm_time_new(NULL);
+ scheduler->input = *xml;
+ scheduler->now = crm_time_new(NULL);
return pcmk_rc_ok;
}
/*!
* \internal
- * \brief Update a working set's XML input based on a CIB query
+ * \brief Update scheduler XML input based on a CIB query
*
- * \param[in] data_set Data set instance to initialize
+ * \param[in] scheduler Scheduler data to initialize
* \param[in] cib Connection to the CIB manager
*
* \return Standard Pacemaker return code
* \note On success, caller is responsible for freeing memory allocated for
- * data_set->input and data_set->now.
+ * scheduler->input and scheduler->now.
*/
static int
-update_working_set_from_cib(pcmk__output_t *out, pe_working_set_t * data_set,
- cib_t *cib)
+update_scheduler_input_to_cib(pcmk__output_t *out, pcmk_scheduler_t *scheduler,
+ cib_t *cib)
{
xmlNode *cib_xml_copy = NULL;
int rc = pcmk_rc_ok;
@@ -1217,10 +1229,10 @@ update_working_set_from_cib(pcmk__output_t *out, pe_working_set_t * data_set,
rc = pcmk_legacy2rc(rc);
if (rc != pcmk_rc_ok) {
- out->err(out, "Could not obtain the current CIB: %s (%d)", pcmk_strerror(rc), rc);
+ out->err(out, "Could not obtain the current CIB: %s (%d)", pcmk_rc_str(rc), rc);
return rc;
}
- rc = update_working_set_xml(data_set, &cib_xml_copy);
+ rc = update_scheduler_input(scheduler, &cib_xml_copy);
if (rc != pcmk_rc_ok) {
out->err(out, "Could not upgrade the current CIB XML");
free_xml(cib_xml_copy);
@@ -1232,18 +1244,19 @@ update_working_set_from_cib(pcmk__output_t *out, pe_working_set_t * data_set,
// \return Standard Pacemaker return code
static int
-update_dataset(cib_t *cib, pe_working_set_t * data_set, bool simulate)
+update_dataset(cib_t *cib, pcmk_scheduler_t *scheduler, bool simulate)
{
char *pid = NULL;
char *shadow_file = NULL;
cib_t *shadow_cib = NULL;
int rc = pcmk_rc_ok;
- pcmk__output_t *out = data_set->priv;
+ pcmk__output_t *out = scheduler->priv;
- pe_reset_working_set(data_set);
- pe__set_working_set_flags(data_set, pe_flag_no_counts|pe_flag_no_compat);
- rc = update_working_set_from_cib(out, data_set, cib);
+ pe_reset_working_set(scheduler);
+ pe__set_working_set_flags(scheduler,
+ pcmk_sched_no_counts|pcmk_sched_no_compat);
+ rc = update_scheduler_input_to_cib(out, scheduler, cib);
if (rc != pcmk_rc_ok) {
return rc;
}
@@ -1261,7 +1274,7 @@ update_dataset(cib_t *cib, pe_working_set_t * data_set, bool simulate)
goto done;
}
- rc = write_xml_file(data_set->input, shadow_file, FALSE);
+ rc = write_xml_file(scheduler->input, shadow_file, FALSE);
if (rc < 0) {
out->err(out, "Could not populate shadow cib: %s (%d)", pcmk_strerror(rc), rc);
@@ -1272,26 +1285,27 @@ update_dataset(cib_t *cib, pe_working_set_t * data_set, bool simulate)
rc = pcmk_legacy2rc(rc);
if (rc != pcmk_rc_ok) {
- out->err(out, "Could not connect to shadow cib: %s (%d)", pcmk_strerror(rc), rc);
+ out->err(out, "Could not connect to shadow cib: %s (%d)", pcmk_rc_str(rc), rc);
goto done;
}
- pcmk__schedule_actions(data_set->input,
- pe_flag_no_counts|pe_flag_no_compat, data_set);
+ pcmk__schedule_actions(scheduler->input,
+ pcmk_sched_no_counts|pcmk_sched_no_compat,
+ scheduler);
prev_quiet = out->is_quiet(out);
out->quiet = true;
- pcmk__simulate_transition(data_set, shadow_cib, NULL);
+ pcmk__simulate_transition(scheduler, shadow_cib, NULL);
out->quiet = prev_quiet;
- rc = update_dataset(shadow_cib, data_set, false);
+ rc = update_dataset(shadow_cib, scheduler, false);
} else {
- cluster_status(data_set);
+ cluster_status(scheduler);
}
done:
- /* Do not free data_set->input here, we need rsc->xml to be valid later on */
+ // Do not free scheduler->input here, we need rsc->xml to be valid later on
cib_delete(shadow_cib);
free(pid);
@@ -1303,64 +1317,96 @@ update_dataset(cib_t *cib, pe_working_set_t * data_set, bool simulate)
return rc;
}
+/*!
+ * \internal
+ * \brief Find the maximum stop timeout of a resource and its children (if any)
+ *
+ * \param[in,out] rsc Resource to get timeout for
+ *
+ * \return Maximum stop timeout for \p rsc (in milliseconds)
+ */
static int
-max_delay_for_resource(pe_working_set_t * data_set, pe_resource_t *rsc)
+max_rsc_stop_timeout(pcmk_resource_t *rsc)
{
- int delay = 0;
+ long long result_ll;
int max_delay = 0;
+ xmlNode *config = NULL;
+ GHashTable *meta = NULL;
- if(rsc && rsc->children) {
- GList *iter = NULL;
+ if (rsc == NULL) {
+ return 0;
+ }
- for(iter = rsc->children; iter; iter = iter->next) {
- pe_resource_t *child = (pe_resource_t *)iter->data;
+ // If resource is collective, use maximum of its children's stop timeouts
+ if (rsc->children != NULL) {
+ for (GList *iter = rsc->children; iter; iter = iter->next) {
+ pcmk_resource_t *child = iter->data;
+ int delay = max_rsc_stop_timeout(child);
- delay = max_delay_for_resource(data_set, child);
- if(delay > max_delay) {
- double seconds = delay / 1000.0;
- crm_trace("Calculated new delay of %.1fs due to %s", seconds, child->id);
+ if (delay > max_delay) {
+ pe_rsc_trace(rsc,
+ "Maximum stop timeout for %s is now %s due to %s",
+ rsc->id, pcmk__readable_interval(delay), child->id);
max_delay = delay;
}
}
+ return max_delay;
+ }
- } else if(rsc) {
- char *key = crm_strdup_printf("%s_%s_0", rsc->id, RSC_STOP);
- pe_action_t *stop = custom_action(rsc, key, RSC_STOP, NULL, TRUE, FALSE, data_set);
- const char *value = g_hash_table_lookup(stop->meta, XML_ATTR_TIMEOUT);
- long long result_ll;
+ // Get resource's stop action configuration from CIB
+ config = pcmk__find_action_config(rsc, PCMK_ACTION_STOP, 0, true);
- if ((pcmk__scan_ll(value, &result_ll, -1LL) == pcmk_rc_ok)
- && (result_ll >= 0) && (result_ll <= INT_MAX)) {
- max_delay = (int) result_ll;
- } else {
- max_delay = -1;
- }
- pe_free_action(stop);
+ /* Get configured timeout for stop action (fully evaluated for rules,
+ * defaults, etc.).
+ *
+ * @TODO This currently ignores node (which might matter for rules)
+ */
+ meta = pcmk__unpack_action_meta(rsc, NULL, PCMK_ACTION_STOP, 0, config);
+ if ((pcmk__scan_ll(g_hash_table_lookup(meta, XML_ATTR_TIMEOUT),
+ &result_ll, -1LL) == pcmk_rc_ok)
+ && (result_ll >= 0) && (result_ll <= INT_MAX)) {
+ max_delay = (int) result_ll;
}
+ g_hash_table_destroy(meta);
return max_delay;
}
+/*!
+ * \internal
+ * \brief Find a reasonable waiting time for stopping any one resource in a list
+ *
+ * \param[in,out] scheduler Scheduler data
+ * \param[in] resources List of names of resources that will be stopped
+ *
+ * \return Rough estimate of a reasonable time to wait (in seconds) to stop any
+ * one resource in \p resources
+ * \note This estimate is very rough, simply the maximum stop timeout of all
+ * given resources and their children, plus a small fudge factor. It does
+ * not account for children that must be stopped in sequence, action
+ * throttling, or any demotions needed. It checks the stop timeout, even
+ * if the resources in question are actually being started.
+ */
static int
-max_delay_in(pe_working_set_t * data_set, GList *resources)
+wait_time_estimate(pcmk_scheduler_t *scheduler, const GList *resources)
{
int max_delay = 0;
- GList *item = NULL;
-
- for (item = resources; item != NULL; item = item->next) {
- int delay = 0;
- pe_resource_t *rsc = pe_find_resource(data_set->resources, (const char *)item->data);
- delay = max_delay_for_resource(data_set, rsc);
+ // Find maximum stop timeout in milliseconds
+ for (const GList *item = resources; item != NULL; item = item->next) {
+ pcmk_resource_t *rsc = pe_find_resource(scheduler->resources,
+ (const char *) item->data);
+ int delay = max_rsc_stop_timeout(rsc);
- if(delay > max_delay) {
- double seconds = delay / 1000.0;
- crm_trace("Calculated new delay of %.1fs due to %s", seconds, rsc->id);
+ if (delay > max_delay) {
+ pe_rsc_trace(rsc,
+ "Wait time is now %s due to %s",
+ pcmk__readable_interval(delay), rsc->id);
max_delay = delay;
}
}
- return 5 + (max_delay / 1000);
+ return (max_delay / 1000) + 5;
}
#define waiting_for_starts(d, r, h) ((d != NULL) || \
@@ -1390,8 +1436,8 @@ max_delay_in(pe_working_set_t * data_set, GList *resources)
* \return Standard Pacemaker return code (exits on certain failures)
*/
int
-cli_resource_restart(pcmk__output_t *out, pe_resource_t *rsc,
- const pe_node_t *node, const char *move_lifetime,
+cli_resource_restart(pcmk__output_t *out, pcmk_resource_t *rsc,
+ const pcmk_node_t *node, const char *move_lifetime,
int timeout_ms, cib_t *cib, int cib_options,
gboolean promoted_role_only, gboolean force)
{
@@ -1412,8 +1458,8 @@ cli_resource_restart(pcmk__output_t *out, pe_resource_t *rsc,
GList *current_active = NULL;
GList *restart_target_active = NULL;
- pe_working_set_t *data_set = NULL;
- pe_resource_t *parent = uber_parent(rsc);
+ pcmk_scheduler_t *scheduler = NULL;
+ pcmk_resource_t *parent = uber_parent(rsc);
bool running = false;
const char *id = rsc->clone_name ? rsc->clone_name : rsc->id;
@@ -1435,7 +1481,9 @@ cli_resource_restart(pcmk__output_t *out, pe_resource_t *rsc,
lookup_id = clone_strip(rsc->id);
}
- rsc = parent->fns->find_rsc(parent, lookup_id, node, pe_find_any|pe_find_current);
+ rsc = parent->fns->find_rsc(parent, lookup_id, node,
+ pcmk_rsc_match_basename
+ |pcmk_rsc_match_current_node);
free(lookup_id);
running = resource_is_running_on(rsc, host);
}
@@ -1449,6 +1497,11 @@ cli_resource_restart(pcmk__output_t *out, pe_resource_t *rsc,
return ENXIO;
}
+ if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
+ out->err(out, "Unmanaged resources cannot be restarted.");
+ return EAGAIN;
+ }
+
rsc_id = strdup(rsc->id);
if (pe_rsc_is_unique_clone(parent)) {
@@ -1485,32 +1538,32 @@ cli_resource_restart(pcmk__output_t *out, pe_resource_t *rsc,
- Allow a --no-deps option (aka. --force-restart)
*/
- data_set = pe_new_working_set();
- if (data_set == NULL) {
- crm_perror(LOG_ERR, "Could not allocate working set");
- rc = ENOMEM;
+ scheduler = pe_new_working_set();
+ if (scheduler == NULL) {
+ rc = errno;
+ out->err(out, "Could not allocate scheduler data: %s", pcmk_rc_str(rc));
goto done;
}
- data_set->priv = out;
- rc = update_dataset(cib, data_set, false);
+ scheduler->priv = out;
+ rc = update_dataset(cib, scheduler, false);
if(rc != pcmk_rc_ok) {
- out->err(out, "Could not get new resource list: %s (%d)", pcmk_strerror(rc), rc);
+ out->err(out, "Could not get new resource list: %s (%d)", pcmk_rc_str(rc), rc);
goto done;
}
- restart_target_active = get_active_resources(host, data_set->resources);
- current_active = get_active_resources(host, data_set->resources);
+ restart_target_active = get_active_resources(host, scheduler->resources);
+ current_active = get_active_resources(host, scheduler->resources);
dump_list(current_active, "Origin");
if (stop_via_ban) {
/* Stop the clone or bundle instance by banning it from the host */
out->quiet = true;
- rc = cli_resource_ban(out, lookup_id, host, move_lifetime, NULL, cib,
- cib_options, promoted_role_only);
-
+ rc = cli_resource_ban(out, lookup_id, host, move_lifetime, cib,
+ cib_options, promoted_role_only,
+ PCMK__ROLE_PROMOTED);
} else {
/* Stop the resource by setting target-role to Stopped.
* Remember any existing target-role so we can restore it later
@@ -1521,11 +1574,11 @@ cli_resource_restart(pcmk__output_t *out, pe_resource_t *rsc,
NULL, XML_RSC_ATTR_TARGET_ROLE, &orig_target_role);
rc = cli_resource_update_attribute(rsc, rsc_id, NULL, XML_TAG_META_SETS,
NULL, XML_RSC_ATTR_TARGET_ROLE,
- RSC_STOPPED, FALSE, cib, cib_options,
- force);
+ PCMK_ACTION_STOPPED, FALSE, cib,
+ cib_options, force);
}
if(rc != pcmk_rc_ok) {
- out->err(out, "Could not set target-role for %s: %s (%d)", rsc_id, pcmk_strerror(rc), rc);
+ out->err(out, "Could not set target-role for %s: %s (%d)", rsc_id, pcmk_rc_str(rc), rc);
if (current_active != NULL) {
g_list_free_full(current_active, free);
current_active = NULL;
@@ -1537,13 +1590,13 @@ cli_resource_restart(pcmk__output_t *out, pe_resource_t *rsc,
goto done;
}
- rc = update_dataset(cib, data_set, true);
+ rc = update_dataset(cib, scheduler, true);
if(rc != pcmk_rc_ok) {
out->err(out, "Could not determine which resources would be stopped");
goto failure;
}
- target_active = get_active_resources(host, data_set->resources);
+ target_active = get_active_resources(host, scheduler->resources);
dump_list(target_active, "Target");
list_delta = pcmk__subtract_lists(current_active, target_active, (GCompareFunc) strcmp);
@@ -1554,7 +1607,8 @@ cli_resource_restart(pcmk__output_t *out, pe_resource_t *rsc,
while (list_delta != NULL) {
before = g_list_length(list_delta);
if(timeout_ms == 0) {
- step_timeout_s = max_delay_in(data_set, list_delta) / sleep_interval;
+ step_timeout_s = wait_time_estimate(scheduler, list_delta)
+ / sleep_interval;
}
/* We probably don't need the entire step timeout */
@@ -1564,7 +1618,7 @@ cli_resource_restart(pcmk__output_t *out, pe_resource_t *rsc,
timeout -= sleep_interval;
crm_trace("%ds remaining", timeout);
}
- rc = update_dataset(cib, data_set, FALSE);
+ rc = update_dataset(cib, scheduler, FALSE);
if(rc != pcmk_rc_ok) {
out->err(out, "Could not determine which resources were stopped");
goto failure;
@@ -1572,12 +1626,12 @@ cli_resource_restart(pcmk__output_t *out, pe_resource_t *rsc,
if (current_active != NULL) {
g_list_free_full(current_active, free);
- current_active = NULL;
}
- current_active = get_active_resources(host, data_set->resources);
+ current_active = get_active_resources(host, scheduler->resources);
+
g_list_free(list_delta);
- list_delta = NULL;
list_delta = pcmk__subtract_lists(current_active, target_active, (GCompareFunc) strcmp);
+
dump_list(current_active, "Current");
dump_list(list_delta, "Delta");
}
@@ -1610,15 +1664,15 @@ cli_resource_restart(pcmk__output_t *out, pe_resource_t *rsc,
}
if(rc != pcmk_rc_ok) {
- out->err(out, "Could not unset target-role for %s: %s (%d)", rsc_id, pcmk_strerror(rc), rc);
+ out->err(out, "Could not unset target-role for %s: %s (%d)", rsc_id, pcmk_rc_str(rc), rc);
goto done;
}
if (target_active != NULL) {
g_list_free_full(target_active, free);
- target_active = NULL;
}
target_active = restart_target_active;
+
list_delta = pcmk__subtract_lists(target_active, current_active, (GCompareFunc) strcmp);
out->info(out, "Waiting for %d resources to start again:", g_list_length(list_delta));
display_list(out, list_delta, " * ");
@@ -1627,7 +1681,8 @@ cli_resource_restart(pcmk__output_t *out, pe_resource_t *rsc,
while (waiting_for_starts(list_delta, rsc, host)) {
before = g_list_length(list_delta);
if(timeout_ms == 0) {
- step_timeout_s = max_delay_in(data_set, list_delta) / sleep_interval;
+ step_timeout_s = wait_time_estimate(scheduler, list_delta)
+ / sleep_interval;
}
/* We probably don't need the entire step timeout */
@@ -1639,21 +1694,20 @@ cli_resource_restart(pcmk__output_t *out, pe_resource_t *rsc,
crm_trace("%ds remaining", timeout);
}
- rc = update_dataset(cib, data_set, false);
+ rc = update_dataset(cib, scheduler, false);
if(rc != pcmk_rc_ok) {
out->err(out, "Could not determine which resources were started");
goto failure;
}
+ /* It's OK if dependent resources moved to a different node,
+ * so we check active resources on all nodes.
+ */
if (current_active != NULL) {
g_list_free_full(current_active, free);
- current_active = NULL;
}
+ current_active = get_active_resources(NULL, scheduler->resources);
- /* It's OK if dependent resources moved to a different node,
- * so we check active resources on all nodes.
- */
- current_active = get_active_resources(NULL, data_set->resources);
g_list_free(list_delta);
list_delta = pcmk__subtract_lists(target_active, current_active, (GCompareFunc) strcmp);
dump_list(current_active, "Current");
@@ -1702,16 +1756,17 @@ done:
}
free(rsc_id);
free(lookup_id);
- pe_free_working_set(data_set);
+ pe_free_working_set(scheduler);
return rc;
}
static inline bool
-action_is_pending(const pe_action_t *action)
+action_is_pending(const pcmk_action_t *action)
{
- if (pcmk_any_flags_set(action->flags, pe_action_optional|pe_action_pseudo)
- || !pcmk_is_set(action->flags, pe_action_runnable)
- || pcmk__str_eq("notify", action->task, pcmk__str_casei)) {
+ if (pcmk_any_flags_set(action->flags,
+ pcmk_action_optional|pcmk_action_pseudo)
+ || !pcmk_is_set(action->flags, pcmk_action_runnable)
+ || pcmk__str_eq(PCMK_ACTION_NOTIFY, action->task, pcmk__str_casei)) {
return false;
}
return true;
@@ -1729,7 +1784,7 @@ static bool
actions_are_pending(const GList *actions)
{
for (const GList *action = actions; action != NULL; action = action->next) {
- const pe_action_t *a = (const pe_action_t *) action->data;
+ const pcmk_action_t *a = (const pcmk_action_t *) action->data;
if (action_is_pending(a)) {
crm_notice("Waiting for %s (flags=%#.8x)", a->uuid, a->flags);
@@ -1746,7 +1801,7 @@ print_pending_actions(pcmk__output_t *out, GList *actions)
out->info(out, "Pending actions:");
for (action = actions; action != NULL; action = action->next) {
- pe_action_t *a = (pe_action_t *) action->data;
+ pcmk_action_t *a = (pcmk_action_t *) action->data;
if (!action_is_pending(a)) {
continue;
@@ -1786,27 +1841,28 @@ print_pending_actions(pcmk__output_t *out, GList *actions)
int
wait_till_stable(pcmk__output_t *out, int timeout_ms, cib_t * cib)
{
- pe_working_set_t *data_set = NULL;
+ pcmk_scheduler_t *scheduler = NULL;
+ xmlXPathObjectPtr search;
int rc = pcmk_rc_ok;
+ bool pending_unknown_state_resources;
int timeout_s = timeout_ms? ((timeout_ms + 999) / 1000) : WAIT_DEFAULT_TIMEOUT_S;
time_t expire_time = time(NULL) + timeout_s;
time_t time_diff;
bool printed_version_warning = out->is_quiet(out); // i.e. don't print if quiet
- data_set = pe_new_working_set();
- if (data_set == NULL) {
+ scheduler = pe_new_working_set();
+ if (scheduler == NULL) {
return ENOMEM;
}
do {
-
/* Abort if timeout is reached */
time_diff = expire_time - time(NULL);
if (time_diff > 0) {
crm_info("Waiting up to %lld seconds for cluster actions to complete", (long long) time_diff);
} else {
- print_pending_actions(out, data_set->actions);
- pe_free_working_set(data_set);
+ print_pending_actions(out, scheduler->actions);
+ pe_free_working_set(scheduler);
return ETIME;
}
if (rc == pcmk_rc_ok) { /* this avoids sleep on first loop iteration */
@@ -1814,14 +1870,15 @@ wait_till_stable(pcmk__output_t *out, int timeout_ms, cib_t * cib)
}
/* Get latest transition graph */
- pe_reset_working_set(data_set);
- rc = update_working_set_from_cib(out, data_set, cib);
+ pe_reset_working_set(scheduler);
+ rc = update_scheduler_input_to_cib(out, scheduler, cib);
if (rc != pcmk_rc_ok) {
- pe_free_working_set(data_set);
+ pe_free_working_set(scheduler);
return rc;
}
- pcmk__schedule_actions(data_set->input,
- pe_flag_no_counts|pe_flag_no_compat, data_set);
+ pcmk__schedule_actions(scheduler->input,
+ pcmk_sched_no_counts|pcmk_sched_no_compat,
+ scheduler);
if (!printed_version_warning) {
/* If the DC has a different version than the local node, the two
@@ -1832,7 +1889,7 @@ wait_till_stable(pcmk__output_t *out, int timeout_ms, cib_t * cib)
* wait as a new controller operation that would be forwarded to the
* DC. However, that would have potential problems of its own.
*/
- const char *dc_version = g_hash_table_lookup(data_set->config_hash,
+ const char *dc_version = g_hash_table_lookup(scheduler->config_hash,
"dc-version");
if (!pcmk__str_eq(dc_version, PACEMAKER_VERSION "-" BUILD_VERSION, pcmk__str_casei)) {
@@ -1842,9 +1899,13 @@ wait_till_stable(pcmk__output_t *out, int timeout_ms, cib_t * cib)
}
}
- } while (actions_are_pending(data_set->actions));
+ search = xpath_search(scheduler->input, "/cib/status/node_state/lrm/lrm_resources/lrm_resource/"
+ XML_LRM_TAG_RSC_OP "[@" XML_LRM_ATTR_RC "='193']");
+ pending_unknown_state_resources = (numXpathResults(search) > 0);
+ freeXpathObject(search);
+ } while (actions_are_pending(scheduler->actions) || pending_unknown_state_resources);
- pe_free_working_set(data_set);
+ pe_free_working_set(scheduler);
return rc;
}
@@ -1853,10 +1914,10 @@ get_action(const char *rsc_action) {
const char *action = NULL;
if (pcmk__str_eq(rsc_action, "validate", pcmk__str_casei)) {
- action = "validate-all";
+ action = PCMK_ACTION_VALIDATE_ALL;
} else if (pcmk__str_eq(rsc_action, "force-check", pcmk__str_casei)) {
- action = "monitor";
+ action = PCMK_ACTION_MONITOR;
} else if (pcmk__strcase_any_of(rsc_action, "force-start", "force-stop",
"force-demote", "force-promote", NULL)) {
@@ -1898,7 +1959,7 @@ set_agent_environment(GHashTable *params, int timeout_ms, int check_level,
free(level);
}
- setenv("HA_debug", (verbosity > 0)? "1" : "0", 1);
+ pcmk__set_env_option(PCMK__ENV_DEBUG, ((verbosity > 0)? "1" : "0"), true);
if (verbosity > 1) {
setenv("OCF_TRACE_RA", "1", 1);
}
@@ -1948,7 +2009,7 @@ cli_resource_execute_from_params(pcmk__output_t *out, const char *rsc_name,
// If no timeout was provided, use the same default as the cluster
if (timeout_ms == 0) {
- timeout_ms = crm_get_msec(CRM_DEFAULT_OP_TIMEOUT_S);
+ timeout_ms = PCMK_DEFAULT_ACTION_TIMEOUT_MS;
}
set_agent_environment(params, timeout_ms, check_level, resource_verbose);
@@ -2000,12 +2061,12 @@ done:
}
crm_exit_t
-cli_resource_execute(pe_resource_t *rsc, const char *requested_name,
+cli_resource_execute(pcmk_resource_t *rsc, const char *requested_name,
const char *rsc_action, GHashTable *override_hash,
- int timeout_ms, cib_t * cib, pe_working_set_t *data_set,
+ int timeout_ms, cib_t *cib, pcmk_scheduler_t *scheduler,
int resource_verbose, gboolean force, int check_level)
{
- pcmk__output_t *out = data_set->priv;
+ pcmk__output_t *out = scheduler->priv;
crm_exit_t exit_code = CRM_EX_OK;
const char *rid = NULL;
const char *rtype = NULL;
@@ -2016,7 +2077,7 @@ cli_resource_execute(pe_resource_t *rsc, const char *requested_name,
if (pcmk__strcase_any_of(rsc_action, "force-start", "force-demote",
"force-promote", NULL)) {
if(pe_rsc_is_clone(rsc)) {
- GList *nodes = cli_resource_search(rsc, requested_name, data_set);
+ GList *nodes = cli_resource_search(rsc, requested_name, scheduler);
if(nodes != NULL && force == FALSE) {
out->err(out, "It is not safe to %s %s here: the cluster claims it is already active",
rsc_action, rsc->id);
@@ -2034,10 +2095,10 @@ cli_resource_execute(pe_resource_t *rsc, const char *requested_name,
rsc = rsc->children->data;
}
- if(rsc->variant == pe_group) {
+ if (rsc->variant == pcmk_rsc_variant_group) {
out->err(out, "Sorry, the %s option doesn't support group resources", rsc_action);
return CRM_EX_UNIMPLEMENT_FEATURE;
- } else if (rsc->variant == pe_container || pe_rsc_is_bundled(rsc)) {
+ } else if (pe_rsc_is_bundled(rsc)) {
out->err(out, "Sorry, the %s option doesn't support bundled resources", rsc_action);
return CRM_EX_UNIMPLEMENT_FEATURE;
}
@@ -2047,10 +2108,11 @@ cli_resource_execute(pe_resource_t *rsc, const char *requested_name,
rtype = crm_element_value(rsc->xml, XML_ATTR_TYPE);
params = generate_resource_params(rsc, NULL /* @TODO use local node */,
- data_set);
+ scheduler);
if (timeout_ms == 0) {
- timeout_ms = pe_get_configured_timeout(rsc, get_action(rsc_action), data_set);
+ timeout_ms = pe_get_configured_timeout(rsc, get_action(rsc_action),
+ scheduler);
}
rid = pe_rsc_is_anon_clone(rsc->parent)? requested_name : rsc->id;
@@ -2063,26 +2125,28 @@ cli_resource_execute(pe_resource_t *rsc, const char *requested_name,
// \return Standard Pacemaker return code
int
-cli_resource_move(const pe_resource_t *rsc, const char *rsc_id,
+cli_resource_move(const pcmk_resource_t *rsc, const char *rsc_id,
const char *host_name, const char *move_lifetime, cib_t *cib,
- int cib_options, pe_working_set_t *data_set,
+ int cib_options, pcmk_scheduler_t *scheduler,
gboolean promoted_role_only, gboolean force)
{
- pcmk__output_t *out = data_set->priv;
+ pcmk__output_t *out = scheduler->priv;
int rc = pcmk_rc_ok;
unsigned int count = 0;
- pe_node_t *current = NULL;
- pe_node_t *dest = pe_find_node(data_set->nodes, host_name);
+ pcmk_node_t *current = NULL;
+ pcmk_node_t *dest = pe_find_node(scheduler->nodes, host_name);
bool cur_is_dest = false;
if (dest == NULL) {
return pcmk_rc_node_unknown;
}
- if (promoted_role_only && !pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
- const pe_resource_t *p = pe__const_top_resource(rsc, false);
+ if (promoted_role_only
+ && !pcmk_is_set(rsc->flags, pcmk_rsc_promotable)) {
- if (pcmk_is_set(p->flags, pe_rsc_promotable)) {
+ const pcmk_resource_t *p = pe__const_top_resource(rsc, false);
+
+ if (pcmk_is_set(p->flags, pcmk_rsc_promotable)) {
out->info(out, "Using parent '%s' for move instead of '%s'.", rsc->id, rsc_id);
rsc_id = p->id;
rsc = p;
@@ -2096,15 +2160,15 @@ cli_resource_move(const pe_resource_t *rsc, const char *rsc_id,
current = pe__find_active_requires(rsc, &count);
- if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
+ if (pcmk_is_set(rsc->flags, pcmk_rsc_promotable)) {
unsigned int promoted_count = 0;
- pe_node_t *promoted_node = NULL;
+ pcmk_node_t *promoted_node = NULL;
for (const GList *iter = rsc->children; iter; iter = iter->next) {
- const pe_resource_t *child = (const pe_resource_t *) iter->data;
+ const pcmk_resource_t *child = (const pcmk_resource_t *) iter->data;
enum rsc_role_e child_role = child->fns->state(child, TRUE);
- if (child_role == RSC_ROLE_PROMOTED) {
+ if (child_role == pcmk_role_promoted) {
rsc = child;
promoted_node = pe__current_node(child);
promoted_count++;
@@ -2137,15 +2201,17 @@ cli_resource_move(const pe_resource_t *rsc, const char *rsc_id,
}
/* Clear any previous prefer constraints across all nodes. */
- cli_resource_clear(rsc_id, NULL, data_set->nodes, cib, cib_options, false, force);
+ cli_resource_clear(rsc_id, NULL, scheduler->nodes, cib, cib_options, false,
+ force);
/* Clear any previous ban constraints on 'dest'. */
- cli_resource_clear(rsc_id, dest->details->uname, data_set->nodes, cib,
+ cli_resource_clear(rsc_id, dest->details->uname, scheduler->nodes, cib,
cib_options, TRUE, force);
/* Record an explicit preference for 'dest' */
rc = cli_resource_prefer(out, rsc_id, dest->details->uname, move_lifetime,
- cib, cib_options, promoted_role_only);
+ cib, cib_options, promoted_role_only,
+ PCMK__ROLE_PROMOTED);
crm_trace("%s%s now prefers %s%s",
rsc->id, (promoted_role_only? " (promoted)" : ""),
@@ -2158,8 +2224,8 @@ cli_resource_move(const pe_resource_t *rsc, const char *rsc_id,
/* Ban the original location if possible */
if(current) {
(void)cli_resource_ban(out, rsc_id, current->details->uname, move_lifetime,
- NULL, cib, cib_options, promoted_role_only);
-
+ cib, cib_options, promoted_role_only,
+ PCMK__ROLE_PROMOTED);
} else if(count > 1) {
out->info(out, "Resource '%s' is currently %s in %d locations. "
"One may now move to %s",
diff --git a/tools/crm_shadow.c b/tools/crm_shadow.c
index ef69502..b86f462 100644
--- a/tools/crm_shadow.c
+++ b/tools/crm_shadow.c
@@ -147,15 +147,15 @@ instruction_xml(pcmk__output_t *out, va_list args)
* -# Patchset containing the changes in the shadow CIB (can be \p NULL)
* -# Group of \p shadow_disp_flags indicating which fields to display
*/
-PCMK__OUTPUT_ARGS("shadow", "const char *", "const char *", "xmlNodePtr",
- "xmlNodePtr", "enum shadow_disp_flags")
+PCMK__OUTPUT_ARGS("shadow", "const char *", "const char *", "const xmlNode *",
+ "const xmlNode *", "enum shadow_disp_flags")
static int
shadow_default(pcmk__output_t *out, va_list args)
{
const char *instance = va_arg(args, const char *);
const char *filename = va_arg(args, const char *);
- xmlNodePtr content = va_arg(args, xmlNodePtr);
- xmlNodePtr diff = va_arg(args, xmlNodePtr);
+ const xmlNode *content = va_arg(args, const xmlNode *);
+ const xmlNode *diff = va_arg(args, const xmlNode *);
enum shadow_disp_flags flags = (enum shadow_disp_flags) va_arg(args, int);
int rc = pcmk_rc_no_output;
@@ -210,8 +210,8 @@ shadow_default(pcmk__output_t *out, va_list args)
* -# Patchset containing the changes in the shadow CIB (can be \p NULL)
* -# Group of \p shadow_disp_flags indicating which fields to display
*/
-PCMK__OUTPUT_ARGS("shadow", "const char *", "const char *", "xmlNodePtr",
- "xmlNodePtr", "enum shadow_disp_flags")
+PCMK__OUTPUT_ARGS("shadow", "const char *", "const char *", "const xmlNode *",
+ "const xmlNode *", "enum shadow_disp_flags")
static int
shadow_text(pcmk__output_t *out, va_list args)
{
@@ -221,8 +221,8 @@ shadow_text(pcmk__output_t *out, va_list args)
} else {
const char *instance = va_arg(args, const char *);
const char *filename = va_arg(args, const char *);
- xmlNodePtr content = va_arg(args, xmlNodePtr);
- xmlNodePtr diff = va_arg(args, xmlNodePtr);
+ const xmlNode *content = va_arg(args, const xmlNode *);
+ const xmlNode *diff = va_arg(args, const xmlNode *);
enum shadow_disp_flags flags = (enum shadow_disp_flags) va_arg(args, int);
int rc = pcmk_rc_no_output;
@@ -271,15 +271,15 @@ shadow_text(pcmk__output_t *out, va_list args)
* -# Group of \p shadow_disp_flags indicating which fields to display
* (ignored)
*/
-PCMK__OUTPUT_ARGS("shadow", "const char *", "const char *", "xmlNodePtr",
- "xmlNodePtr", "enum shadow_disp_flags")
+PCMK__OUTPUT_ARGS("shadow", "const char *", "const char *", "const xmlNode *",
+ "const xmlNode *", "enum shadow_disp_flags")
static int
shadow_xml(pcmk__output_t *out, va_list args)
{
const char *instance = va_arg(args, const char *);
const char *filename = va_arg(args, const char *);
- xmlNodePtr content = va_arg(args, xmlNodePtr);
- xmlNodePtr diff = va_arg(args, xmlNodePtr);
+ const xmlNode *content = va_arg(args, const xmlNode *);
+ const xmlNode *diff = va_arg(args, const xmlNode *);
enum shadow_disp_flags flags G_GNUC_UNUSED =
(enum shadow_disp_flags) va_arg(args, int);
@@ -512,13 +512,13 @@ read_xml(const char *filename, xmlNode **output, GError **error)
* \internal
* \brief Write the shadow XML to a file
*
- * \param[in,out] xml Shadow XML
- * \param[in] filename Name of destination file
- * \param[in] reset Whether the write is a reset (for logging only)
- * \param[out] error Where to store error
+ * \param[in] xml Shadow XML
+ * \param[in] filename Name of destination file
+ * \param[in] reset Whether the write is a reset (for logging only)
+ * \param[out] error Where to store error
*/
static int
-write_shadow_file(xmlNode *xml, const char *filename, bool reset,
+write_shadow_file(const xmlNode *xml, const char *filename, bool reset,
GError **error)
{
int rc = write_xml_file(xml, filename, FALSE);
@@ -927,9 +927,7 @@ show_shadow_diff(pcmk__output_t *out, GError **error)
xmlNodePtr old_config = NULL;
xmlNodePtr new_config = NULL;
xmlNodePtr diff = NULL;
- pcmk__output_t *logger_out = NULL;
bool quiet_orig = out->quiet;
- int rc = pcmk_rc_ok;
if (get_instance_from_env(error) != pcmk_rc_ok) {
return;
@@ -951,18 +949,7 @@ show_shadow_diff(pcmk__output_t *out, GError **error)
xml_calculate_changes(old_config, new_config);
diff = xml_create_patchset(0, old_config, new_config, NULL, false);
- rc = pcmk__log_output_new(&logger_out);
- if (rc != pcmk_rc_ok) {
- exit_code = pcmk_rc2exitc(rc);
- g_set_error(error, PCMK__EXITC_ERROR, exit_code,
- "Could not create logger object: %s", pcmk_rc_str(rc));
- goto done;
- }
- pcmk__output_set_log_level(logger_out, LOG_INFO);
- rc = pcmk__xml_show_changes(logger_out, new_config);
- logger_out->finish(logger_out, pcmk_rc2exitc(rc), true, NULL);
- pcmk__output_free(logger_out);
-
+ pcmk__log_xml_changes(LOG_INFO, new_config);
xml_accept_changes(new_config);
out->quiet = true;
diff --git a/tools/crm_simulate.c b/tools/crm_simulate.c
index 932c5bd..aab4110 100644
--- a/tools/crm_simulate.c
+++ b/tools/crm_simulate.c
@@ -29,6 +29,7 @@
#include <crm/common/util.h>
#include <crm/common/iso8601.h>
#include <crm/pengine/status.h>
+#include <crm/pengine/internal.h>
#include <pacemaker-internal.h>
#include <pacemaker.h>
@@ -450,7 +451,7 @@ int
main(int argc, char **argv)
{
int rc = pcmk_rc_ok;
- pe_working_set_t *data_set = NULL;
+ pcmk_scheduler_t *scheduler = NULL;
pcmk__output_t *out = NULL;
GError *error = NULL;
@@ -513,24 +514,26 @@ main(int argc, char **argv)
#endif
}
- data_set = pe_new_working_set();
- if (data_set == NULL) {
+ scheduler = pe_new_working_set();
+ if (scheduler == NULL) {
rc = ENOMEM;
- g_set_error(&error, PCMK__RC_ERROR, rc, "Could not allocate working set");
+ g_set_error(&error, PCMK__RC_ERROR, rc,
+ "Could not allocate scheduler data");
goto done;
}
if (pcmk_is_set(options.flags, pcmk_sim_show_scores)) {
- pe__set_working_set_flags(data_set, pe_flag_show_scores);
+ pe__set_working_set_flags(scheduler, pcmk_sched_output_scores);
}
if (pcmk_is_set(options.flags, pcmk_sim_show_utilization)) {
- pe__set_working_set_flags(data_set, pe_flag_show_utilization);
+ pe__set_working_set_flags(scheduler, pcmk_sched_show_utilization);
}
- pe__set_working_set_flags(data_set, pe_flag_no_compat);
+ pe__set_working_set_flags(scheduler, pcmk_sched_no_compat);
if (options.test_dir != NULL) {
- data_set->priv = out;
- pcmk__profile_dir(options.test_dir, options.repeat, data_set, options.use_date);
+ scheduler->priv = out;
+ pcmk__profile_dir(options.test_dir, options.repeat, scheduler,
+ options.use_date);
rc = pcmk_rc_ok;
goto done;
}
@@ -542,9 +545,9 @@ main(int argc, char **argv)
goto done;
}
- rc = pcmk__simulate(data_set, out, options.injections, options.flags, section_opts,
- options.use_date, options.input_file, options.graph_file,
- options.dot_file);
+ rc = pcmk__simulate(scheduler, out, options.injections, options.flags,
+ section_opts, options.use_date, options.input_file,
+ options.graph_file, options.dot_file);
done:
pcmk__output_and_clear_error(&error, NULL);
@@ -562,8 +565,8 @@ main(int argc, char **argv)
pcmk__free_arg_context(context);
g_strfreev(processed_args);
- if (data_set) {
- pe_free_working_set(data_set);
+ if (scheduler != NULL) {
+ pe_free_working_set(scheduler);
}
fflush(stderr);
diff --git a/tools/crm_ticket.c b/tools/crm_ticket.c
index c451e8a..d95b581 100644
--- a/tools/crm_ticket.c
+++ b/tools/crm_ticket.c
@@ -31,6 +31,7 @@
#include <crm/cib/internal.h>
#include <crm/pengine/rules.h>
#include <crm/pengine/status.h>
+#include <crm/pengine/internal.h>
#include <pacemaker-internal.h>
@@ -253,10 +254,10 @@ static GOptionEntry deprecated_entries[] = {
{ NULL }
};
-static pe_ticket_t *
-find_ticket(gchar *ticket_id, pe_working_set_t * data_set)
+static pcmk_ticket_t *
+find_ticket(gchar *ticket_id, pcmk_scheduler_t *scheduler)
{
- return g_hash_table_lookup(data_set->tickets, ticket_id);
+ return g_hash_table_lookup(scheduler->tickets, ticket_id);
}
static void
@@ -275,7 +276,7 @@ print_date(time_t time)
}
static void
-print_ticket(pe_ticket_t * ticket, bool raw, bool details)
+print_ticket(pcmk_ticket_t *ticket, bool raw, bool details)
{
if (raw) {
fprintf(stdout, "%s\n", ticket->id);
@@ -325,12 +326,12 @@ print_ticket(pe_ticket_t * ticket, bool raw, bool details)
}
static void
-print_ticket_list(pe_working_set_t * data_set, bool raw, bool details)
+print_ticket_list(pcmk_scheduler_t *scheduler, bool raw, bool details)
{
GHashTableIter iter;
- pe_ticket_t *ticket = NULL;
+ pcmk_ticket_t *ticket = NULL;
- g_hash_table_iter_init(&iter, data_set->tickets);
+ g_hash_table_iter_init(&iter, scheduler->tickets);
while (g_hash_table_iter_next(&iter, NULL, (void **)&ticket)) {
print_ticket(ticket, raw, details);
@@ -369,7 +370,7 @@ find_ticket_state(cib_t * the_cib, gchar *ticket_id, xmlNode ** ticket_state_xml
}
crm_log_xml_debug(xml_search, "Match");
- if (xml_has_children(xml_search)) {
+ if (xml_search->children != NULL) {
if (ticket_id) {
fprintf(stdout, "Multiple ticket_states match ticket_id=%s\n", ticket_id);
}
@@ -439,7 +440,7 @@ dump_ticket_xml(cib_t * the_cib, gchar *ticket_id)
char *state_xml_str = NULL;
state_xml_str = dump_xml_formatted(state_xml);
- fprintf(stdout, "\n%s", pcmk__s(state_xml_str, "<null>\n"));
+ fprintf(stdout, "\n%s", state_xml_str);
free_xml(state_xml);
free(state_xml_str);
}
@@ -461,8 +462,7 @@ dump_constraints(cib_t * the_cib, gchar *ticket_id)
}
cons_xml_str = dump_xml_formatted(cons_xml);
- fprintf(stdout, "Constraints XML:\n\n%s",
- pcmk__s(cons_xml_str, "<null>\n"));
+ fprintf(stdout, "Constraints XML:\n\n%s", cons_xml_str);
free_xml(cons_xml);
free(cons_xml_str);
@@ -471,14 +471,14 @@ dump_constraints(cib_t * the_cib, gchar *ticket_id)
static int
get_ticket_state_attr(gchar *ticket_id, const char *attr_name, const char **attr_value,
- pe_working_set_t * data_set)
+ pcmk_scheduler_t *scheduler)
{
- pe_ticket_t *ticket = NULL;
+ pcmk_ticket_t *ticket = NULL;
CRM_ASSERT(attr_value != NULL);
*attr_value = NULL;
- ticket = g_hash_table_lookup(data_set->tickets, ticket_id);
+ ticket = g_hash_table_lookup(scheduler->tickets, ticket_id);
if (ticket == NULL) {
return ENXIO;
}
@@ -564,7 +564,7 @@ allow_modification(gchar *ticket_id)
}
static int
-modify_ticket_state(gchar * ticket_id, cib_t * cib, pe_working_set_t * data_set)
+modify_ticket_state(gchar *ticket_id, cib_t *cib, pcmk_scheduler_t *scheduler)
{
int rc = pcmk_rc_ok;
xmlNode *xml_top = NULL;
@@ -577,7 +577,7 @@ modify_ticket_state(gchar * ticket_id, cib_t * cib, pe_working_set_t * data_set)
char *key = NULL;
char *value = NULL;
- pe_ticket_t *ticket = NULL;
+ pcmk_ticket_t *ticket = NULL;
rc = find_ticket_state(cib, ticket_id, &ticket_state_xml);
if (rc == pcmk_rc_ok) {
@@ -605,7 +605,7 @@ modify_ticket_state(gchar * ticket_id, cib_t * cib, pe_working_set_t * data_set)
xml_remove_prop(ticket_state_xml, key);
}
- ticket = find_ticket(ticket_id, data_set);
+ ticket = find_ticket(ticket_id, scheduler);
g_hash_table_iter_init(&hash_iter, attr_set);
while (g_hash_table_iter_next(&hash_iter, (gpointer *) & key, (gpointer *) & value)) {
@@ -719,7 +719,7 @@ build_arg_context(pcmk__common_args_t *args) {
int
main(int argc, char **argv)
{
- pe_working_set_t *data_set = NULL;
+ pcmk_scheduler_t *scheduler = NULL;
xmlNode *cib_xml_copy = NULL;
cib_t *cib_conn = NULL;
@@ -751,13 +751,16 @@ main(int argc, char **argv)
pcmk__cli_help('v');
}
- data_set = pe_new_working_set();
- if (data_set == NULL) {
- crm_perror(LOG_CRIT, "Could not allocate working set");
- exit_code = CRM_EX_OSERR;
+ scheduler = pe_new_working_set();
+ if (scheduler == NULL) {
+ rc = errno;
+ exit_code = pcmk_rc2exitc(rc);
+ g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
+ "Could not allocate scheduler data: %s", pcmk_rc_str(rc));
goto done;
}
- pe__set_working_set_flags(data_set, pe_flag_no_counts|pe_flag_no_compat);
+ pe__set_working_set_flags(scheduler,
+ pcmk_sched_no_counts|pcmk_sched_no_compat);
cib_conn = cib_new();
if (cib_conn == NULL) {
@@ -798,14 +801,14 @@ main(int argc, char **argv)
goto done;
}
- data_set->input = cib_xml_copy;
- data_set->now = crm_time_new(NULL);
+ scheduler->input = cib_xml_copy;
+ scheduler->now = crm_time_new(NULL);
- cluster_status(data_set);
+ cluster_status(scheduler);
/* For recording the tickets that are referenced in rsc_ticket constraints
* but have never been granted yet. */
- pcmk__unpack_constraints(data_set);
+ pcmk__unpack_constraints(scheduler);
if (options.ticket_cmd == 'l' || options.ticket_cmd == 'L' || options.ticket_cmd == 'w') {
bool raw = false;
@@ -818,7 +821,7 @@ main(int argc, char **argv)
}
if (options.ticket_id) {
- pe_ticket_t *ticket = find_ticket(options.ticket_id, data_set);
+ pcmk_ticket_t *ticket = find_ticket(options.ticket_id, scheduler);
if (ticket == NULL) {
exit_code = CRM_EX_NOSUCH;
@@ -829,7 +832,7 @@ main(int argc, char **argv)
print_ticket(ticket, raw, details);
} else {
- print_ticket_list(data_set, raw, details);
+ print_ticket_list(scheduler, raw, details);
}
} else if (options.ticket_cmd == 'q') {
@@ -860,7 +863,8 @@ main(int argc, char **argv)
goto done;
}
- rc = get_ticket_state_attr(options.ticket_id, options.get_attr_name, &value, data_set);
+ rc = get_ticket_state_attr(options.ticket_id, options.get_attr_name,
+ &value, scheduler);
if (rc == pcmk_rc_ok) {
fprintf(stdout, "%s\n", value);
} else if (rc == ENXIO && options.attr_default) {
@@ -878,9 +882,9 @@ main(int argc, char **argv)
}
if (options.force == FALSE) {
- pe_ticket_t *ticket = NULL;
+ pcmk_ticket_t *ticket = NULL;
- ticket = find_ticket(options.ticket_id, data_set);
+ ticket = find_ticket(options.ticket_id, scheduler);
if (ticket == NULL) {
exit_code = CRM_EX_NOSUCH;
g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
@@ -934,7 +938,7 @@ main(int argc, char **argv)
goto done;
}
- rc = modify_ticket_state(options.ticket_id, cib_conn, data_set);
+ rc = modify_ticket_state(options.ticket_id, cib_conn, scheduler);
exit_code = pcmk_rc2exitc(rc);
if (rc != pcmk_rc_ok) {
@@ -985,8 +989,8 @@ main(int argc, char **argv)
}
attr_delete = NULL;
- pe_free_working_set(data_set);
- data_set = NULL;
+ pe_free_working_set(scheduler);
+ scheduler = NULL;
cib__clean_up_connection(&cib_conn);
diff --git a/tools/crm_verify.c b/tools/crm_verify.c
index 43b09da..199814e 100644
--- a/tools/crm_verify.c
+++ b/tools/crm_verify.c
@@ -85,10 +85,23 @@ build_arg_context(pcmk__common_args_t *args, GOptionGroup **group) {
"Check the consistency of the configuration in the running cluster:\n\n"
"\tcrm_verify --live-check\n\n"
"Check the consistency of the configuration in a given file and "
+ "produce quiet output:\n\n"
+ "\tcrm_verify --xml-file file.xml --quiet\n\n"
+ "Check the consistency of the configuration in a given file and "
"produce verbose output:\n\n"
"\tcrm_verify --xml-file file.xml --verbose\n\n";
+ GOptionEntry extra_prog_entries[] = {
+ { "quiet", 'q', 0, G_OPTION_ARG_NONE, &(args->quiet),
+ "Don't print verify information",
+ NULL },
+ { NULL }
+ };
+
context = pcmk__build_arg_context(args, "text (default), xml", group, NULL);
+
+ pcmk__add_main_args(context, extra_prog_entries);
+
g_option_context_set_description(context, description);
pcmk__add_arg_group(context, "data", "Data sources:",
@@ -105,8 +118,7 @@ main(int argc, char **argv)
xmlNode *cib_object = NULL;
xmlNode *status = NULL;
- pe_working_set_t *data_set = NULL;
- const char *xml_tag = NULL;
+ pcmk_scheduler_t *scheduler = NULL;
int rc = pcmk_rc_ok;
crm_exit_t exit_code = CRM_EX_OK;
@@ -126,6 +138,10 @@ main(int argc, char **argv)
goto done;
}
+ if (args->verbosity > 0) {
+ args->verbosity -= args->quiet;
+ }
+
pcmk__cli_init_logging("crm_verify", args->verbosity);
rc = pcmk__output_new(&out, args->output_ty, args->output_dest, argv);
@@ -143,6 +159,9 @@ main(int argc, char **argv)
pcmk__register_lib_messages(out);
+ pcmk__set_config_error_handler((pcmk__config_error_func) out->err, out);
+ pcmk__set_config_warning_handler((pcmk__config_warning_func) out->err, out);
+
crm_info("=#=#=#=#= Getting XML =#=#=#=#=");
if (options.use_live_cib) {
@@ -184,8 +203,7 @@ main(int argc, char **argv)
goto done;
}
- xml_tag = crm_element_name(cib_object);
- if (!pcmk__str_eq(xml_tag, XML_TAG_CIB, pcmk__str_casei)) {
+ if (!pcmk__xe_is(cib_object, XML_TAG_CIB)) {
rc = EBADMSG;
g_set_error(&error, PCMK__RC_ERROR, rc,
"This tool can only check complete configurations (i.e. those starting with <cib>).");
@@ -201,7 +219,7 @@ main(int argc, char **argv)
create_xml_node(cib_object, XML_CIB_TAG_STATUS);
}
- if (validate_xml(cib_object, NULL, FALSE) == FALSE) {
+ if (pcmk__validate_xml(cib_object, NULL, (xmlRelaxNGValidityErrorFunc) out->err, out) == FALSE) {
pcmk__config_err("CIB did not pass schema validation");
free_xml(cib_object);
cib_object = NULL;
@@ -215,13 +233,14 @@ main(int argc, char **argv)
xml_latest_schema());
}
- data_set = pe_new_working_set();
- if (data_set == NULL) {
+ scheduler = pe_new_working_set();
+ if (scheduler == NULL) {
rc = errno;
- crm_perror(LOG_CRIT, "Unable to allocate working set");
+ g_set_error(&error, PCMK__RC_ERROR, rc,
+ "Could not allocate scheduler data: %s", pcmk_rc_str(rc));
goto done;
}
- data_set->priv = out;
+ scheduler->priv = out;
/* Process the configuration to set crm_config_error/crm_config_warning.
*
@@ -229,31 +248,31 @@ main(int argc, char **argv)
* example, action configuration), so we aren't necessarily checking those.
*/
if (cib_object != NULL) {
- unsigned long long flags = pe_flag_no_counts|pe_flag_no_compat;
+ unsigned long long flags = pcmk_sched_no_counts|pcmk_sched_no_compat;
if ((status == NULL) && !options.use_live_cib) {
// No status available, so do minimal checks
- flags |= pe_flag_check_config;
+ flags |= pcmk_sched_validate_only;
}
- pcmk__schedule_actions(cib_object, flags, data_set);
+ pcmk__schedule_actions(cib_object, flags, scheduler);
}
- pe_free_working_set(data_set);
+ pe_free_working_set(scheduler);
if (crm_config_error) {
rc = pcmk_rc_schema_validation;
- if (args->verbosity > 0) {
+ if (args->verbosity > 0 || pcmk__str_eq(args->output_ty, "xml", pcmk__str_none)) {
g_set_error(&error, PCMK__RC_ERROR, rc,
"Errors found during check: config not valid");
} else {
g_set_error(&error, PCMK__RC_ERROR, rc,
"Errors found during check: config not valid\n-V may provide more details");
- }
+ }
} else if (crm_config_warning) {
rc = pcmk_rc_schema_validation;
- if (args->verbosity > 0) {
+ if (args->verbosity > 0 || pcmk__str_eq(args->output_ty, "xml", pcmk__str_none)) {
g_set_error(&error, PCMK__RC_ERROR, rc,
"Warnings found during check: config may not be valid");
} else {
@@ -273,7 +292,7 @@ main(int argc, char **argv)
exit_code = pcmk_rc2exitc(rc);
}
- pcmk__output_and_clear_error(&error, NULL);
+ pcmk__output_and_clear_error(&error, out);
if (out != NULL) {
out->finish(out, exit_code, true, NULL);
diff --git a/tools/stonith_admin.c b/tools/stonith_admin.c
index 1077de7..01f72d5 100644
--- a/tools/stonith_admin.c
+++ b/tools/stonith_admin.c
@@ -344,7 +344,11 @@ request_fencing(stonith_t *st, const char *target, const char *command,
if (rc != pcmk_rc_ok) {
const char *rc_str = pcmk_rc_str(rc);
- const char *what = (strcmp(command, "on") == 0)? "unfence" : "fence";
+ const char *what = "fence";
+
+ if (strcmp(command, PCMK_ACTION_ON) == 0) {
+ what = "unfence";
+ }
// If reason is identical to return code string, don't display it twice
if (pcmk__str_eq(rc_str, reason, pcmk__str_none)) {
@@ -542,7 +546,7 @@ main(int argc, char **argv)
case 'I':
rc = pcmk__fence_installed(out, st, options.timeout*1000);
if (rc != pcmk_rc_ok) {
- out->err(out, "Failed to list installed devices: %s", pcmk_strerror(rc));
+ out->err(out, "Failed to list installed devices: %s", pcmk_rc_str(rc));
}
break;
@@ -550,7 +554,7 @@ main(int argc, char **argv)
case 'L':
rc = pcmk__fence_registered(out, st, target, options.timeout*1000);
if (rc != pcmk_rc_ok) {
- out->err(out, "Failed to list registered devices: %s", pcmk_strerror(rc));
+ out->err(out, "Failed to list registered devices: %s", pcmk_rc_str(rc));
}
break;
@@ -566,7 +570,7 @@ main(int argc, char **argv)
case 's':
rc = pcmk__fence_list_targets(out, st, device, options.timeout*1000);
if (rc != pcmk_rc_ok) {
- out->err(out, "Couldn't list targets: %s", pcmk_strerror(rc));
+ out->err(out, "Couldn't list targets: %s", pcmk_rc_str(rc));
}
break;
@@ -621,15 +625,15 @@ main(int argc, char **argv)
break;
case 'B':
- rc = request_fencing(st, target, "reboot", &error);
+ rc = request_fencing(st, target, PCMK_ACTION_REBOOT, &error);
break;
case 'F':
- rc = request_fencing(st, target, "off", &error);
+ rc = request_fencing(st, target, PCMK_ACTION_OFF, &error);
break;
case 'U':
- rc = request_fencing(st, target, "on", &error);
+ rc = request_fencing(st, target, PCMK_ACTION_ON, &error);
break;
case 'h':
diff --git a/xml/Makefile.am b/xml/Makefile.am
index 70dc8ea..6acb338 100644
--- a/xml/Makefile.am
+++ b/xml/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2004-2022 the Pacemaker project contributors
+# Copyright 2004-2023 the Pacemaker project contributors
#
# The version control history for this file may have further details.
#
@@ -56,10 +56,11 @@ version_pairs_last = $(wordlist \
# Names of API schemas that form the choices for pacemaker-result content
API_request_base = command-output \
crm_attribute \
- crm_error \
- crm_mon \
+ crm_error \
+ crm_mon \
+ crm_node \
crm_resource \
- crm_rule \
+ crm_rule \
crm_shadow \
crm_simulate \
crmadmin \
@@ -69,7 +70,14 @@ API_request_base = command-output \
version
# Names of CIB schemas that form the choices for cib/configuration content
-CIB_cfg_base = options nodes resources constraints fencing acls tags alerts
+CIB_cfg_base = options \
+ nodes \
+ resources \
+ constraints \
+ fencing \
+ acls \
+ tags \
+ alerts
# Names of all schemas (including top level and those included by others)
API_base = $(API_request_base) \
@@ -86,7 +94,13 @@ API_base = $(API_request_base) \
resources \
status \
subprocess-output
-CIB_base = cib $(CIB_cfg_base) status score rule nvset
+
+CIB_base = cib \
+ $(CIB_cfg_base) \
+ status \
+ score \
+ rule \
+ nvset
# Static schema files and transforms (only CIB has transforms)
#
@@ -103,7 +117,7 @@ MON_abs_files = $(abs_srcdir)/crm_mon.rng
API_files = $(foreach base,$(API_base),$(wildcard $(srcdir)/api/$(base)-*.rng))
CIB_files = $(foreach base,$(CIB_base),$(wildcard $(srcdir)/$(base).rng $(srcdir)/$(base)-*.rng))
CIB_xsl = $(srcdir)/upgrade-1.3.xsl \
- $(srcdir)/upgrade-2.10.xsl \
+ $(srcdir)/upgrade-2.10.xsl \
$(wildcard $(srcdir)/upgrade-*enter.xsl) \
$(wildcard $(srcdir)/upgrade-*leave.xsl)
MON_files = $(srcdir)/crm_mon.rng
@@ -130,7 +144,9 @@ MON_build_copies = $(foreach f,$(MON_abs_files),$(subst $(abs_srcdir),$(abs_bui
# Dynamically generated schema files
API_generated = api/api-result.rng $(foreach base,$(API_versions),api/api-result-$(base).rng)
-CIB_generated = pacemaker.rng $(foreach base,$(CIB_versions),pacemaker-$(base).rng) versions.rng
+CIB_generated = pacemaker.rng \
+ $(foreach base,$(CIB_versions),pacemaker-$(base).rng) \
+ versions.rng
MON_generated = crm_mon.rng
CIB_version_pairs = $(call version_pairs,${CIB_numeric_versions})
@@ -138,7 +154,8 @@ CIB_version_pairs_cnt = $(words ${CIB_version_pairs})
CIB_version_pairs_last = $(call version_pairs_last,${CIB_version_pairs_cnt},${CIB_version_pairs})
dist_API_DATA = $(API_files)
-dist_CIB_DATA = $(CIB_files) $(CIB_xsl)
+dist_CIB_DATA = $(CIB_files) \
+ $(CIB_xsl)
nodist_API_DATA = $(API_generated)
nodist_CIB_DATA = $(CIB_generated)
@@ -159,10 +176,12 @@ EXTRA_DIST = README.md \
test-2-leave \
test-2-roundtrip
+.PHONY: cib-versions
cib-versions:
@echo "Max: $(CIB_max)"
@echo "Available: $(CIB_versions)"
+.PHONY: api-versions
api-versions:
@echo "Max: $(API_max)"
@echo "Available: $(API_versions)"
@@ -253,56 +272,24 @@ versions.rng: pacemaker-$(CIB_max).rng Makefile.am
$(AM_V_at)echo ' </start>' >> $@
$(AM_V_SCHEMA)echo '</grammar>' >> $@
-# diff fails with ec=2 if no predecessor is found;
-# this uses '=' GNU extension to sed, if that's not available,
-# one can use: hline=`echo "$${p}" | grep -Fn "$${hunk}" | cut -d: -f1`;
-# XXX: use line information from hunk to avoid "not detected" for ambiguity
-version_diff = \
- @for p in $(1); do \
- set `echo "$${p}" | tr '-' ' '`; \
- echo "\#\#\# *-$$2.rng vs. predecessor"; \
- for v in *-$$2.rng; do \
- echo "\#\#\#\# $${v} vs. predecessor"; b=`echo "$${v}" | cut -d- -f1`; \
- old=`./best-match.sh $${b} $$1`; \
- p=`diff -u "$${old}" "$${v}" 2>/dev/null`; \
- case $$? in \
- 1) echo "$${p}" | sed -n -e '/^@@ /!d;=;p' \
- -e ':l;n;/^\([- ]\|+.*<[^ />]\+\([^/>]\+="ID\|>$$\)\)/bl;s/^[+ ]\(.*\)/\1/p' \
- | while read hline; do \
- read h && read i || break; \
- iline=`grep -Fn "$${i}" "$${v}" | cut -d: -f1`; \
- ctxt="(not detected)"; \
- if test `echo "$${iline}" | wc -l` -eq 1; then \
- ctxt=`{ sed -n -e "1,$$(($${iline}-1))p" "$${v}"; \
- echo "<inject id=\"GOAL\"/>$${i}"; \
- sed -n -e "$$(($${iline}+1)),$$ p" "$${v}"; \
- } | $(XSLTPROC) --param skip 1 context-of.xsl -`; \
- fi; \
- echo "$${p}" | sed -n -e "$$(($${hline}-2)),$${hline}!d" \
- -e '/^\(+++\|---\)/p'; \
- echo "$${h} context: $${ctxt}"; \
- echo "$${p}" | sed -n -e "1,$${hline}d" \
- -e '/^\(---\|@@ \)/be;p;d;:e;n;be'; \
- done; \
- ;; \
- 2) echo "\#\#\#\#\# $${v} has no predecessor";; \
- esac; \
- done; \
- done
-
+.PHONY: diff
diff: best-match.sh
@echo "# Comparing changes in + since $(CIB_max)"
- $(call version_diff,${CIB_version_pairs_last})
+ @./version-diff.sh ${CIB_version_pairs_last}
+.PHONY: fulldiff
fulldiff: best-match.sh
@echo "# Comparing all changes across all the subsequent increments"
- $(call version_diff,${CIB_version_pairs})
+ @./version-diff.sh ${CIB_version_pairs}
-CLEANFILES = $(API_generated) $(CIB_generated) $(MON_generated)
+CLEANFILES = $(API_generated) \
+ $(CIB_generated) \
+ $(MON_generated)
# Remove pacemaker schema files generated by *any* source version. This allows
# "make -C xml clean" to have the desired effect when checking out an earlier
# revision in a source tree.
+.PHONY: clean-local
clean-local:
if [ "x$(srcdir)" != "x$(builddir)" ]; then \
rm -f $(API_build_copies) $(CIB_build_copies) $(MON_build_copies); \
diff --git a/xml/README.md b/xml/README.md
index e32edc2..a3a1973 100644
--- a/xml/README.md
+++ b/xml/README.md
@@ -112,7 +112,6 @@ itself, allowing for more sophistication down the road.
4. If required, add an XSLT file, and update `xslt\_SCRIPTS` in `xml/Makefile.am`.
5. Commit.
6. Run `make -C xml clean; make -C xml` to rebuild the schemas in the local
-6. Run `make -C xml clean; make -C xml` to rebuild the schemas in the local
source directory.
7. The CIB validity and upgrade regression tests will break after the schema is
updated. Run `cts/cts-cli -s` to make the expected outputs reflect the
diff --git a/xml/api/crm_node-2.32.rng b/xml/api/crm_node-2.32.rng
new file mode 100644
index 0000000..402c761
--- /dev/null
+++ b/xml/api/crm_node-2.32.rng
@@ -0,0 +1,53 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<grammar xmlns="http://relaxng.org/ns/structure/1.0"
+ datatypeLibrary="http://www.w3.org/2001/XMLSchema-datatypes">
+
+ <!-- Output of the crm_node command -->
+ <start>
+ <ref name="element-crm-node" />
+ </start>
+
+ <define name="element-crm-node">
+ <choice>
+ <ref name="cluster-info" />
+ <ref name="node-info" />
+ <ref name="node-list" />
+ </choice>
+ </define>
+
+ <define name="cluster-info">
+ <element name="cluster-info">
+ <attribute name="quorum"> <data type="boolean" /> </attribute>
+ </element>
+ </define>
+
+ <define name="node-info">
+ <element name="node-info">
+ <attribute name="nodeid"> <data type="nonNegativeInteger" /> </attribute>
+ <optional>
+ <attribute name="uname"> <text/> </attribute>
+ </optional>
+ </element>
+ </define>
+
+ <define name="node-list">
+ <element name="nodes">
+ <oneOrMore>
+ <ref name="element-node" />
+ </oneOrMore>
+ </element>
+ </define>
+
+ <define name="element-node">
+ <element name="node">
+ <attribute name="id"> <data type="nonNegativeInteger" /> </attribute>
+ <optional>
+ <attribute name="name"> <text/> </attribute>
+ </optional>
+ <optional>
+ <attribute name="state"> <text/> </attribute>
+ </optional>
+ </element>
+ </define>
+
+</grammar>
diff --git a/xml/version-diff.sh.in b/xml/version-diff.sh.in
new file mode 100644
index 0000000..1ece3b3
--- /dev/null
+++ b/xml/version-diff.sh.in
@@ -0,0 +1,60 @@
+#!@BASH_PATH@
+#
+# Copyright 2016-2023 the Pacemaker project contributors
+#
+# The version control history for this file may have further details.
+#
+# This source code is licensed under the GNU General Public License version 2
+# or later (GPLv2+) WITHOUT ANY WARRANTY.
+#
+
+# diff fails with ec=2 if no predecessor is found;
+# this uses '=' GNU extension to sed, if that's not available,
+# one can use: hline=`echo "$${p}" | grep -Fn "$${hunk}" | cut -d: -f1`;
+# XXX: use line information from hunk to avoid "not detected" for ambiguity
+for p in $*; do
+ set $(echo "$p" | tr '-' ' ')
+ echo "### *-$2.rng vs. predecessor"
+
+ for v in *-"$2".rng; do
+ echo "#### $v vs. predecessor"
+
+ b=$(echo "$v" | cut -d- -f1)
+ old=$(./best-match.sh "$b" "$1")
+ p=$(diff -u "$old" "$v" 2>/dev/null)
+
+ case $? in
+ 1)
+ echo "$p" | sed -n -e '/^@@ /!d;=;p' -e ':l;n;/^\([- ]\|+.*<[^ />]\+\([^/>]\+="ID\|>$$\)\)/bl;s/^[+ ]\(.*\)/\1/p' |
+ while read -r hline; do
+ if read -r h; then
+ read -r i
+ else
+ break
+ fi
+
+ iline=$(grep -Fn "$i" "$v" | cut -d: -f1)
+
+ if [ "$(echo "$iline" | wc -l)" = "1" ]; then
+ ctxt=$({ sed -n -e "1,$((iline - 1))p" "$v"
+ echo "<inject id=\"GOAL\"/>$i"
+ sed -n -e "$((iline + 1)),$ p" "$v"
+ } | xsltproc --param skip 1 context-of.xsl -)
+ else
+ ctxt="(not detected)"
+ fi
+
+ echo "$p" | sed -n -e "$((hline - 2)),$hline!d" -e '/^\(+++\|---\)/p'
+ echo "$h context: $ctxt"
+ echo "$p" | sed -n -e "1,${hline}d" -e '/^\(---\|@@ \)/be;p;d;:e;n;be'
+ done
+
+ ;;
+
+ 2)
+ echo "##### $v has no predecessor"
+ ;;
+
+ esac
+ done
+done